| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /********************************************************************* | 
|  | 2 | * | 
|  | 3 | * Filename:      w83977af_ir.c | 
|  | 4 | * Version:       1.0 | 
|  | 5 | * Description:   FIR driver for the Winbond W83977AF Super I/O chip | 
|  | 6 | * Status:        Experimental. | 
|  | 7 | * Author:        Paul VanderSpek | 
|  | 8 | * Created at:    Wed Nov  4 11:46:16 1998 | 
|  | 9 | * Modified at:   Fri Jan 28 12:10:59 2000 | 
|  | 10 | * Modified by:   Dag Brattli <dagb@cs.uit.no> | 
|  | 11 | * | 
|  | 12 | *     Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> | 
|  | 13 | *     Copyright (c) 1998-1999 Rebel.com | 
|  | 14 | * | 
|  | 15 | *     This program is free software; you can redistribute it and/or | 
|  | 16 | *     modify it under the terms of the GNU General Public License as | 
|  | 17 | *     published by the Free Software Foundation; either version 2 of | 
|  | 18 | *     the License, or (at your option) any later version. | 
|  | 19 | * | 
|  | 20 | *     Neither Paul VanderSpek nor Rebel.com admit liability nor provide | 
|  | 21 | *     warranty for any of this software. This material is provided "AS-IS" | 
|  | 22 | *     and at no charge. | 
|  | 23 | * | 
|  | 24 | *     If you find bugs in this file, its very likely that the same bug | 
|  | 25 | *     will also be in pc87108.c since the implementations are quite | 
|  | 26 | *     similar. | 
|  | 27 | * | 
|  | 28 | *     Notice that all functions that needs to access the chip in _any_ | 
|  | 29 | *     way, must save BSR register on entry, and restore it on exit. | 
|  | 30 | *     It is _very_ important to follow this policy! | 
|  | 31 | * | 
|  | 32 | *         __u8 bank; | 
|  | 33 | * | 
|  | 34 | *         bank = inb( iobase+BSR); | 
|  | 35 | * | 
|  | 36 | *         do_your_stuff_here(); | 
|  | 37 | * | 
|  | 38 | *         outb( bank, iobase+BSR); | 
|  | 39 | * | 
|  | 40 | ********************************************************************/ | 
|  | 41 |  | 
|  | 42 | #include <linux/module.h> | 
|  | 43 | #include <linux/config.h> | 
|  | 44 | #include <linux/kernel.h> | 
|  | 45 | #include <linux/types.h> | 
|  | 46 | #include <linux/skbuff.h> | 
|  | 47 | #include <linux/netdevice.h> | 
|  | 48 | #include <linux/ioport.h> | 
|  | 49 | #include <linux/delay.h> | 
|  | 50 | #include <linux/slab.h> | 
|  | 51 | #include <linux/init.h> | 
|  | 52 | #include <linux/rtnetlink.h> | 
|  | 53 | #include <linux/dma-mapping.h> | 
|  | 54 |  | 
|  | 55 | #include <asm/io.h> | 
|  | 56 | #include <asm/dma.h> | 
|  | 57 | #include <asm/byteorder.h> | 
|  | 58 |  | 
|  | 59 | #include <net/irda/irda.h> | 
|  | 60 | #include <net/irda/wrapper.h> | 
|  | 61 | #include <net/irda/irda_device.h> | 
|  | 62 | #include "w83977af.h" | 
|  | 63 | #include "w83977af_ir.h" | 
|  | 64 |  | 
|  | 65 | #ifdef  CONFIG_ARCH_NETWINDER            /* Adjust to NetWinder differences */ | 
|  | 66 | #undef  CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */ | 
|  | 67 | #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */ | 
|  | 68 | #endif | 
|  | 69 | #undef  CONFIG_USE_INTERNAL_TIMER  /* Just cannot make that timer work */ | 
|  | 70 | #define CONFIG_USE_W977_PNP        /* Currently needed */ | 
|  | 71 | #define PIO_MAX_SPEED       115200 | 
|  | 72 |  | 
|  | 73 | static char *driver_name = "w83977af_ir"; | 
|  | 74 | static int  qos_mtt_bits = 0x07;   /* 1 ms or more */ | 
|  | 75 |  | 
|  | 76 | #define CHIP_IO_EXTENT 8 | 
|  | 77 |  | 
|  | 78 | static unsigned int io[] = { 0x180, ~0, ~0, ~0 }; | 
|  | 79 | #ifdef CONFIG_ARCH_NETWINDER             /* Adjust to NetWinder differences */ | 
|  | 80 | static unsigned int irq[] = { 6, 0, 0, 0 }; | 
|  | 81 | #else | 
|  | 82 | static unsigned int irq[] = { 11, 0, 0, 0 }; | 
|  | 83 | #endif | 
|  | 84 | static unsigned int dma[] = { 1, 0, 0, 0 }; | 
|  | 85 | static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE }; | 
|  | 86 | static unsigned int efio = W977_EFIO_BASE; | 
|  | 87 |  | 
|  | 88 | static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL}; | 
|  | 89 |  | 
|  | 90 | /* Some prototypes */ | 
|  | 91 | static int  w83977af_open(int i, unsigned int iobase, unsigned int irq, | 
|  | 92 | unsigned int dma); | 
|  | 93 | static int  w83977af_close(struct w83977af_ir *self); | 
|  | 94 | static int  w83977af_probe(int iobase, int irq, int dma); | 
|  | 95 | static int  w83977af_dma_receive(struct w83977af_ir *self); | 
|  | 96 | static int  w83977af_dma_receive_complete(struct w83977af_ir *self); | 
|  | 97 | static int  w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev); | 
|  | 98 | static int  w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size); | 
|  | 99 | static void w83977af_dma_write(struct w83977af_ir *self, int iobase); | 
|  | 100 | static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed); | 
|  | 101 | static int  w83977af_is_receiving(struct w83977af_ir *self); | 
|  | 102 |  | 
|  | 103 | static int  w83977af_net_open(struct net_device *dev); | 
|  | 104 | static int  w83977af_net_close(struct net_device *dev); | 
|  | 105 | static int  w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 
|  | 106 | static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev); | 
|  | 107 |  | 
|  | 108 | /* | 
|  | 109 | * Function w83977af_init () | 
|  | 110 | * | 
|  | 111 | *    Initialize chip. Just try to find out how many chips we are dealing with | 
|  | 112 | *    and where they are | 
|  | 113 | */ | 
|  | 114 | static int __init w83977af_init(void) | 
|  | 115 | { | 
|  | 116 | int i; | 
|  | 117 |  | 
|  | 118 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 
|  | 119 |  | 
|  | 120 | for (i=0; (io[i] < 2000) && (i < 4); i++) { | 
|  | 121 | if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) | 
|  | 122 | return 0; | 
|  | 123 | } | 
|  | 124 | return -ENODEV; | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | /* | 
|  | 128 | * Function w83977af_cleanup () | 
|  | 129 | * | 
|  | 130 | *    Close all configured chips | 
|  | 131 | * | 
|  | 132 | */ | 
|  | 133 | static void __exit w83977af_cleanup(void) | 
|  | 134 | { | 
|  | 135 | int i; | 
|  | 136 |  | 
|  | 137 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 
|  | 138 |  | 
|  | 139 | for (i=0; i < 4; i++) { | 
|  | 140 | if (dev_self[i]) | 
|  | 141 | w83977af_close(dev_self[i]); | 
|  | 142 | } | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * Function w83977af_open (iobase, irq) | 
|  | 147 | * | 
|  | 148 | *    Open driver instance | 
|  | 149 | * | 
|  | 150 | */ | 
|  | 151 | int w83977af_open(int i, unsigned int iobase, unsigned int irq, | 
|  | 152 | unsigned int dma) | 
|  | 153 | { | 
|  | 154 | struct net_device *dev; | 
|  | 155 | struct w83977af_ir *self; | 
|  | 156 | int err; | 
|  | 157 |  | 
|  | 158 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 
|  | 159 |  | 
|  | 160 | /* Lock the port that we need */ | 
|  | 161 | if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) { | 
|  | 162 | IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n", | 
|  | 163 | __FUNCTION__ , iobase); | 
|  | 164 | return -ENODEV; | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | if (w83977af_probe(iobase, irq, dma) == -1) { | 
|  | 168 | err = -1; | 
|  | 169 | goto err_out; | 
|  | 170 | } | 
|  | 171 | /* | 
|  | 172 | *  Allocate new instance of the driver | 
|  | 173 | */ | 
|  | 174 | dev = alloc_irdadev(sizeof(struct w83977af_ir)); | 
|  | 175 | if (dev == NULL) { | 
|  | 176 | printk( KERN_ERR "IrDA: Can't allocate memory for " | 
|  | 177 | "IrDA control block!\n"); | 
|  | 178 | err = -ENOMEM; | 
|  | 179 | goto err_out; | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | self = dev->priv; | 
|  | 183 | spin_lock_init(&self->lock); | 
|  | 184 |  | 
|  | 185 |  | 
|  | 186 | /* Initialize IO */ | 
|  | 187 | self->io.fir_base   = iobase; | 
|  | 188 | self->io.irq       = irq; | 
|  | 189 | self->io.fir_ext   = CHIP_IO_EXTENT; | 
|  | 190 | self->io.dma       = dma; | 
|  | 191 | self->io.fifo_size = 32; | 
|  | 192 |  | 
|  | 193 | /* Initialize QoS for this device */ | 
|  | 194 | irda_init_max_qos_capabilies(&self->qos); | 
|  | 195 |  | 
|  | 196 | /* The only value we must override it the baudrate */ | 
|  | 197 |  | 
|  | 198 | /* FIXME: The HP HDLS-1100 does not support 1152000! */ | 
|  | 199 | self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| | 
|  | 200 | IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); | 
|  | 201 |  | 
|  | 202 | /* The HP HDLS-1100 needs 1 ms according to the specs */ | 
|  | 203 | self->qos.min_turn_time.bits = qos_mtt_bits; | 
|  | 204 | irda_qos_bits_to_value(&self->qos); | 
|  | 205 |  | 
|  | 206 | /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ | 
|  | 207 | self->rx_buff.truesize = 14384; | 
|  | 208 | self->tx_buff.truesize = 4000; | 
|  | 209 |  | 
|  | 210 | /* Allocate memory if needed */ | 
|  | 211 | self->rx_buff.head = | 
|  | 212 | dma_alloc_coherent(NULL, self->rx_buff.truesize, | 
|  | 213 | &self->rx_buff_dma, GFP_KERNEL); | 
|  | 214 | if (self->rx_buff.head == NULL) { | 
|  | 215 | err = -ENOMEM; | 
|  | 216 | goto err_out1; | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | memset(self->rx_buff.head, 0, self->rx_buff.truesize); | 
|  | 220 |  | 
|  | 221 | self->tx_buff.head = | 
|  | 222 | dma_alloc_coherent(NULL, self->tx_buff.truesize, | 
|  | 223 | &self->tx_buff_dma, GFP_KERNEL); | 
|  | 224 | if (self->tx_buff.head == NULL) { | 
|  | 225 | err = -ENOMEM; | 
|  | 226 | goto err_out2; | 
|  | 227 | } | 
|  | 228 | memset(self->tx_buff.head, 0, self->tx_buff.truesize); | 
|  | 229 |  | 
|  | 230 | self->rx_buff.in_frame = FALSE; | 
|  | 231 | self->rx_buff.state = OUTSIDE_FRAME; | 
|  | 232 | self->tx_buff.data = self->tx_buff.head; | 
|  | 233 | self->rx_buff.data = self->rx_buff.head; | 
|  | 234 | self->netdev = dev; | 
|  | 235 |  | 
|  | 236 | /* Keep track of module usage */ | 
|  | 237 | SET_MODULE_OWNER(dev); | 
|  | 238 |  | 
|  | 239 | /* Override the network functions we need to use */ | 
|  | 240 | dev->hard_start_xmit = w83977af_hard_xmit; | 
|  | 241 | dev->open            = w83977af_net_open; | 
|  | 242 | dev->stop            = w83977af_net_close; | 
|  | 243 | dev->do_ioctl        = w83977af_net_ioctl; | 
|  | 244 | dev->get_stats	     = w83977af_net_get_stats; | 
|  | 245 |  | 
|  | 246 | err = register_netdev(dev); | 
|  | 247 | if (err) { | 
|  | 248 | IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__); | 
|  | 249 | goto err_out3; | 
|  | 250 | } | 
|  | 251 | IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); | 
|  | 252 |  | 
|  | 253 | /* Need to store self somewhere */ | 
|  | 254 | dev_self[i] = self; | 
|  | 255 |  | 
|  | 256 | return 0; | 
|  | 257 | err_out3: | 
|  | 258 | dma_free_coherent(NULL, self->tx_buff.truesize, | 
|  | 259 | self->tx_buff.head, self->tx_buff_dma); | 
|  | 260 | err_out2: | 
|  | 261 | dma_free_coherent(NULL, self->rx_buff.truesize, | 
|  | 262 | self->rx_buff.head, self->rx_buff_dma); | 
|  | 263 | err_out1: | 
|  | 264 | free_netdev(dev); | 
|  | 265 | err_out: | 
|  | 266 | release_region(iobase, CHIP_IO_EXTENT); | 
|  | 267 | return err; | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | /* | 
|  | 271 | * Function w83977af_close (self) | 
|  | 272 | * | 
|  | 273 | *    Close driver instance | 
|  | 274 | * | 
|  | 275 | */ | 
|  | 276 | static int w83977af_close(struct w83977af_ir *self) | 
|  | 277 | { | 
|  | 278 | int iobase; | 
|  | 279 |  | 
|  | 280 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 
|  | 281 |  | 
|  | 282 | iobase = self->io.fir_base; | 
|  | 283 |  | 
|  | 284 | #ifdef CONFIG_USE_W977_PNP | 
|  | 285 | /* enter PnP configuration mode */ | 
|  | 286 | w977_efm_enter(efio); | 
|  | 287 |  | 
|  | 288 | w977_select_device(W977_DEVICE_IR, efio); | 
|  | 289 |  | 
|  | 290 | /* Deactivate device */ | 
|  | 291 | w977_write_reg(0x30, 0x00, efio); | 
|  | 292 |  | 
|  | 293 | w977_efm_exit(efio); | 
|  | 294 | #endif /* CONFIG_USE_W977_PNP */ | 
|  | 295 |  | 
|  | 296 | /* Remove netdevice */ | 
|  | 297 | unregister_netdev(self->netdev); | 
|  | 298 |  | 
|  | 299 | /* Release the PORT that this driver is using */ | 
|  | 300 | IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n", | 
|  | 301 | __FUNCTION__ , self->io.fir_base); | 
|  | 302 | release_region(self->io.fir_base, self->io.fir_ext); | 
|  | 303 |  | 
|  | 304 | if (self->tx_buff.head) | 
|  | 305 | dma_free_coherent(NULL, self->tx_buff.truesize, | 
|  | 306 | self->tx_buff.head, self->tx_buff_dma); | 
|  | 307 |  | 
|  | 308 | if (self->rx_buff.head) | 
|  | 309 | dma_free_coherent(NULL, self->rx_buff.truesize, | 
|  | 310 | self->rx_buff.head, self->rx_buff_dma); | 
|  | 311 |  | 
|  | 312 | free_netdev(self->netdev); | 
|  | 313 |  | 
|  | 314 | return 0; | 
|  | 315 | } | 
|  | 316 |  | 
|  | 317 | int w83977af_probe( int iobase, int irq, int dma) | 
|  | 318 | { | 
|  | 319 | int version; | 
|  | 320 | int i; | 
|  | 321 |  | 
|  | 322 | for (i=0; i < 2; i++) { | 
|  | 323 | IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ ); | 
|  | 324 | #ifdef CONFIG_USE_W977_PNP | 
|  | 325 | /* Enter PnP configuration mode */ | 
|  | 326 | w977_efm_enter(efbase[i]); | 
|  | 327 |  | 
|  | 328 | w977_select_device(W977_DEVICE_IR, efbase[i]); | 
|  | 329 |  | 
|  | 330 | /* Configure PnP port, IRQ, and DMA channel */ | 
|  | 331 | w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]); | 
|  | 332 | w977_write_reg(0x61, (iobase) & 0xff, efbase[i]); | 
|  | 333 |  | 
|  | 334 | w977_write_reg(0x70, irq, efbase[i]); | 
|  | 335 | #ifdef CONFIG_ARCH_NETWINDER | 
|  | 336 | /* Netwinder uses 1 higher than Linux */ | 
|  | 337 | w977_write_reg(0x74, dma+1, efbase[i]); | 
|  | 338 | #else | 
|  | 339 | w977_write_reg(0x74, dma, efbase[i]); | 
|  | 340 | #endif /*CONFIG_ARCH_NETWINDER */ | 
|  | 341 | w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */ | 
|  | 342 |  | 
|  | 343 | /* Set append hardware CRC, enable IR bank selection */ | 
|  | 344 | w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]); | 
|  | 345 |  | 
|  | 346 | /* Activate device */ | 
|  | 347 | w977_write_reg(0x30, 0x01, efbase[i]); | 
|  | 348 |  | 
|  | 349 | w977_efm_exit(efbase[i]); | 
|  | 350 | #endif /* CONFIG_USE_W977_PNP */ | 
|  | 351 | /* Disable Advanced mode */ | 
|  | 352 | switch_bank(iobase, SET2); | 
|  | 353 | outb(iobase+2, 0x00); | 
|  | 354 |  | 
|  | 355 | /* Turn on UART (global) interrupts */ | 
|  | 356 | switch_bank(iobase, SET0); | 
|  | 357 | outb(HCR_EN_IRQ, iobase+HCR); | 
|  | 358 |  | 
|  | 359 | /* Switch to advanced mode */ | 
|  | 360 | switch_bank(iobase, SET2); | 
|  | 361 | outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1); | 
|  | 362 |  | 
|  | 363 | /* Set default IR-mode */ | 
|  | 364 | switch_bank(iobase, SET0); | 
|  | 365 | outb(HCR_SIR, iobase+HCR); | 
|  | 366 |  | 
|  | 367 | /* Read the Advanced IR ID */ | 
|  | 368 | switch_bank(iobase, SET3); | 
|  | 369 | version = inb(iobase+AUID); | 
|  | 370 |  | 
|  | 371 | /* Should be 0x1? */ | 
|  | 372 | if (0x10 == (version & 0xf0)) { | 
|  | 373 | efio = efbase[i]; | 
|  | 374 |  | 
|  | 375 | /* Set FIFO size to 32 */ | 
|  | 376 | switch_bank(iobase, SET2); | 
|  | 377 | outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2); | 
|  | 378 |  | 
|  | 379 | /* Set FIFO threshold to TX17, RX16 */ | 
|  | 380 | switch_bank(iobase, SET0); | 
|  | 381 | outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST| | 
|  | 382 | UFR_EN_FIFO,iobase+UFR); | 
|  | 383 |  | 
|  | 384 | /* Receiver frame length */ | 
|  | 385 | switch_bank(iobase, SET4); | 
|  | 386 | outb(2048 & 0xff, iobase+6); | 
|  | 387 | outb((2048 >> 8) & 0x1f, iobase+7); | 
|  | 388 |  | 
|  | 389 | /* | 
|  | 390 | * Init HP HSDL-1100 transceiver. | 
|  | 391 | * | 
|  | 392 | * Set IRX_MSL since we have 2 * receive paths IRRX, | 
|  | 393 | * and IRRXH. Clear IRSL0D since we want IRSL0 * to | 
|  | 394 | * be a input pin used for IRRXH | 
|  | 395 | * | 
|  | 396 | *   IRRX  pin 37 connected to receiver | 
|  | 397 | *   IRTX  pin 38 connected to transmitter | 
|  | 398 | *   FIRRX pin 39 connected to receiver      (IRSL0) | 
|  | 399 | *   CIRRX pin 40 connected to pin 37 | 
|  | 400 | */ | 
|  | 401 | switch_bank(iobase, SET7); | 
|  | 402 | outb(0x40, iobase+7); | 
|  | 403 |  | 
|  | 404 | IRDA_MESSAGE("W83977AF (IR) driver loaded. " | 
|  | 405 | "Version: 0x%02x\n", version); | 
|  | 406 |  | 
|  | 407 | return 0; | 
|  | 408 | } else { | 
|  | 409 | /* Try next extented function register address */ | 
|  | 410 | IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ ); | 
|  | 411 | } | 
|  | 412 | } | 
|  | 413 | return -1; | 
|  | 414 | } | 
|  | 415 |  | 
|  | 416 | void w83977af_change_speed(struct w83977af_ir *self, __u32 speed) | 
|  | 417 | { | 
|  | 418 | int ir_mode = HCR_SIR; | 
|  | 419 | int iobase; | 
|  | 420 | __u8 set; | 
|  | 421 |  | 
|  | 422 | iobase = self->io.fir_base; | 
|  | 423 |  | 
|  | 424 | /* Update accounting for new speed */ | 
|  | 425 | self->io.speed = speed; | 
|  | 426 |  | 
|  | 427 | /* Save current bank */ | 
|  | 428 | set = inb(iobase+SSR); | 
|  | 429 |  | 
|  | 430 | /* Disable interrupts */ | 
|  | 431 | switch_bank(iobase, SET0); | 
|  | 432 | outb(0, iobase+ICR); | 
|  | 433 |  | 
|  | 434 | /* Select Set 2 */ | 
|  | 435 | switch_bank(iobase, SET2); | 
|  | 436 | outb(0x00, iobase+ABHL); | 
|  | 437 |  | 
|  | 438 | switch (speed) { | 
|  | 439 | case 9600:   outb(0x0c, iobase+ABLL); break; | 
|  | 440 | case 19200:  outb(0x06, iobase+ABLL); break; | 
|  | 441 | case 38400:  outb(0x03, iobase+ABLL); break; | 
|  | 442 | case 57600:  outb(0x02, iobase+ABLL); break; | 
|  | 443 | case 115200: outb(0x01, iobase+ABLL); break; | 
|  | 444 | case 576000: | 
|  | 445 | ir_mode = HCR_MIR_576; | 
|  | 446 | IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ ); | 
|  | 447 | break; | 
|  | 448 | case 1152000: | 
|  | 449 | ir_mode = HCR_MIR_1152; | 
|  | 450 | IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ ); | 
|  | 451 | break; | 
|  | 452 | case 4000000: | 
|  | 453 | ir_mode = HCR_FIR; | 
|  | 454 | IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ ); | 
|  | 455 | break; | 
|  | 456 | default: | 
|  | 457 | ir_mode = HCR_FIR; | 
|  | 458 | IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed); | 
|  | 459 | break; | 
|  | 460 | } | 
|  | 461 |  | 
|  | 462 | /* Set speed mode */ | 
|  | 463 | switch_bank(iobase, SET0); | 
|  | 464 | outb(ir_mode, iobase+HCR); | 
|  | 465 |  | 
|  | 466 | /* set FIFO size to 32 */ | 
|  | 467 | switch_bank(iobase, SET2); | 
|  | 468 | outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2); | 
|  | 469 |  | 
|  | 470 | /* set FIFO threshold to TX17, RX16 */ | 
|  | 471 | switch_bank(iobase, SET0); | 
|  | 472 | outb(0x00, iobase+UFR);        /* Reset */ | 
|  | 473 | outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */ | 
|  | 474 | outb(0xa7, iobase+UFR); | 
|  | 475 |  | 
|  | 476 | netif_wake_queue(self->netdev); | 
|  | 477 |  | 
|  | 478 | /* Enable some interrupts so we can receive frames */ | 
|  | 479 | switch_bank(iobase, SET0); | 
|  | 480 | if (speed > PIO_MAX_SPEED) { | 
|  | 481 | outb(ICR_EFSFI, iobase+ICR); | 
|  | 482 | w83977af_dma_receive(self); | 
|  | 483 | } else | 
|  | 484 | outb(ICR_ERBRI, iobase+ICR); | 
|  | 485 |  | 
|  | 486 | /* Restore SSR */ | 
|  | 487 | outb(set, iobase+SSR); | 
|  | 488 | } | 
|  | 489 |  | 
|  | 490 | /* | 
|  | 491 | * Function w83977af_hard_xmit (skb, dev) | 
|  | 492 | * | 
|  | 493 | *    Sets up a DMA transfer to send the current frame. | 
|  | 494 | * | 
|  | 495 | */ | 
|  | 496 | int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev) | 
|  | 497 | { | 
|  | 498 | struct w83977af_ir *self; | 
|  | 499 | __s32 speed; | 
|  | 500 | int iobase; | 
|  | 501 | __u8 set; | 
|  | 502 | int mtt; | 
|  | 503 |  | 
|  | 504 | self = (struct w83977af_ir *) dev->priv; | 
|  | 505 |  | 
|  | 506 | iobase = self->io.fir_base; | 
|  | 507 |  | 
|  | 508 | IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies, | 
|  | 509 | (int) skb->len); | 
|  | 510 |  | 
|  | 511 | /* Lock transmit buffer */ | 
|  | 512 | netif_stop_queue(dev); | 
|  | 513 |  | 
|  | 514 | /* Check if we need to change the speed */ | 
|  | 515 | speed = irda_get_next_speed(skb); | 
|  | 516 | if ((speed != self->io.speed) && (speed != -1)) { | 
|  | 517 | /* Check for empty frame */ | 
|  | 518 | if (!skb->len) { | 
|  | 519 | w83977af_change_speed(self, speed); | 
|  | 520 | dev->trans_start = jiffies; | 
|  | 521 | dev_kfree_skb(skb); | 
|  | 522 | return 0; | 
|  | 523 | } else | 
|  | 524 | self->new_speed = speed; | 
|  | 525 | } | 
|  | 526 |  | 
|  | 527 | /* Save current set */ | 
|  | 528 | set = inb(iobase+SSR); | 
|  | 529 |  | 
|  | 530 | /* Decide if we should use PIO or DMA transfer */ | 
|  | 531 | if (self->io.speed > PIO_MAX_SPEED) { | 
|  | 532 | self->tx_buff.data = self->tx_buff.head; | 
|  | 533 | memcpy(self->tx_buff.data, skb->data, skb->len); | 
|  | 534 | self->tx_buff.len = skb->len; | 
|  | 535 |  | 
|  | 536 | mtt = irda_get_mtt(skb); | 
|  | 537 | #ifdef CONFIG_USE_INTERNAL_TIMER | 
|  | 538 | if (mtt > 50) { | 
|  | 539 | /* Adjust for timer resolution */ | 
|  | 540 | mtt /= 1000+1; | 
|  | 541 |  | 
|  | 542 | /* Setup timer */ | 
|  | 543 | switch_bank(iobase, SET4); | 
|  | 544 | outb(mtt & 0xff, iobase+TMRL); | 
|  | 545 | outb((mtt >> 8) & 0x0f, iobase+TMRH); | 
|  | 546 |  | 
|  | 547 | /* Start timer */ | 
|  | 548 | outb(IR_MSL_EN_TMR, iobase+IR_MSL); | 
|  | 549 | self->io.direction = IO_XMIT; | 
|  | 550 |  | 
|  | 551 | /* Enable timer interrupt */ | 
|  | 552 | switch_bank(iobase, SET0); | 
|  | 553 | outb(ICR_ETMRI, iobase+ICR); | 
|  | 554 | } else { | 
|  | 555 | #endif | 
|  | 556 | IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt); | 
|  | 557 | if (mtt) | 
|  | 558 | udelay(mtt); | 
|  | 559 |  | 
|  | 560 | /* Enable DMA interrupt */ | 
|  | 561 | switch_bank(iobase, SET0); | 
|  | 562 | outb(ICR_EDMAI, iobase+ICR); | 
|  | 563 | w83977af_dma_write(self, iobase); | 
|  | 564 | #ifdef CONFIG_USE_INTERNAL_TIMER | 
|  | 565 | } | 
|  | 566 | #endif | 
|  | 567 | } else { | 
|  | 568 | self->tx_buff.data = self->tx_buff.head; | 
|  | 569 | self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, | 
|  | 570 | self->tx_buff.truesize); | 
|  | 571 |  | 
|  | 572 | /* Add interrupt on tx low level (will fire immediately) */ | 
|  | 573 | switch_bank(iobase, SET0); | 
|  | 574 | outb(ICR_ETXTHI, iobase+ICR); | 
|  | 575 | } | 
|  | 576 | dev->trans_start = jiffies; | 
|  | 577 | dev_kfree_skb(skb); | 
|  | 578 |  | 
|  | 579 | /* Restore set register */ | 
|  | 580 | outb(set, iobase+SSR); | 
|  | 581 |  | 
|  | 582 | return 0; | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | /* | 
|  | 586 | * Function w83977af_dma_write (self, iobase) | 
|  | 587 | * | 
|  | 588 | *    Send frame using DMA | 
|  | 589 | * | 
|  | 590 | */ | 
|  | 591 | static void w83977af_dma_write(struct w83977af_ir *self, int iobase) | 
|  | 592 | { | 
|  | 593 | __u8 set; | 
|  | 594 | #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS | 
|  | 595 | unsigned long flags; | 
|  | 596 | __u8 hcr; | 
|  | 597 | #endif | 
|  | 598 | IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len); | 
|  | 599 |  | 
|  | 600 | /* Save current set */ | 
|  | 601 | set = inb(iobase+SSR); | 
|  | 602 |  | 
|  | 603 | /* Disable DMA */ | 
|  | 604 | switch_bank(iobase, SET0); | 
|  | 605 | outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR); | 
|  | 606 |  | 
|  | 607 | /* Choose transmit DMA channel  */ | 
|  | 608 | switch_bank(iobase, SET2); | 
|  | 609 | outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1); | 
|  | 610 | #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS | 
|  | 611 | spin_lock_irqsave(&self->lock, flags); | 
|  | 612 |  | 
|  | 613 | disable_dma(self->io.dma); | 
|  | 614 | clear_dma_ff(self->io.dma); | 
|  | 615 | set_dma_mode(self->io.dma, DMA_MODE_READ); | 
|  | 616 | set_dma_addr(self->io.dma, self->tx_buff_dma); | 
|  | 617 | set_dma_count(self->io.dma, self->tx_buff.len); | 
|  | 618 | #else | 
|  | 619 | irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, | 
|  | 620 | DMA_MODE_WRITE); | 
|  | 621 | #endif | 
|  | 622 | self->io.direction = IO_XMIT; | 
|  | 623 |  | 
|  | 624 | /* Enable DMA */ | 
|  | 625 | switch_bank(iobase, SET0); | 
|  | 626 | #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS | 
|  | 627 | hcr = inb(iobase+HCR); | 
|  | 628 | outb(hcr | HCR_EN_DMA, iobase+HCR); | 
|  | 629 | enable_dma(self->io.dma); | 
|  | 630 | spin_unlock_irqrestore(&self->lock, flags); | 
|  | 631 | #else | 
|  | 632 | outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR); | 
|  | 633 | #endif | 
|  | 634 |  | 
|  | 635 | /* Restore set register */ | 
|  | 636 | outb(set, iobase+SSR); | 
|  | 637 | } | 
|  | 638 |  | 
|  | 639 | /* | 
|  | 640 | * Function w83977af_pio_write (iobase, buf, len, fifo_size) | 
|  | 641 | * | 
|  | 642 | * | 
|  | 643 | * | 
|  | 644 | */ | 
|  | 645 | static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size) | 
|  | 646 | { | 
|  | 647 | int actual = 0; | 
|  | 648 | __u8 set; | 
|  | 649 |  | 
|  | 650 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 
|  | 651 |  | 
|  | 652 | /* Save current bank */ | 
|  | 653 | set = inb(iobase+SSR); | 
|  | 654 |  | 
|  | 655 | switch_bank(iobase, SET0); | 
|  | 656 | if (!(inb_p(iobase+USR) & USR_TSRE)) { | 
|  | 657 | IRDA_DEBUG(4, | 
|  | 658 | "%s(), warning, FIFO not empty yet!\n", __FUNCTION__  ); | 
|  | 659 |  | 
|  | 660 | fifo_size -= 17; | 
|  | 661 | IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n", | 
|  | 662 | __FUNCTION__ , fifo_size); | 
|  | 663 | } | 
|  | 664 |  | 
|  | 665 | /* Fill FIFO with current frame */ | 
|  | 666 | while ((fifo_size-- > 0) && (actual < len)) { | 
|  | 667 | /* Transmit next byte */ | 
|  | 668 | outb(buf[actual++], iobase+TBR); | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n", | 
|  | 672 | __FUNCTION__ , fifo_size, actual, len); | 
|  | 673 |  | 
|  | 674 | /* Restore bank */ | 
|  | 675 | outb(set, iobase+SSR); | 
|  | 676 |  | 
|  | 677 | return actual; | 
|  | 678 | } | 
|  | 679 |  | 
|  | 680 | /* | 
|  | 681 | * Function w83977af_dma_xmit_complete (self) | 
|  | 682 | * | 
|  | 683 | *    The transfer of a frame in finished. So do the necessary things | 
|  | 684 | * | 
|  | 685 | * | 
|  | 686 | */ | 
|  | 687 | static void w83977af_dma_xmit_complete(struct w83977af_ir *self) | 
|  | 688 | { | 
|  | 689 | int iobase; | 
|  | 690 | __u8 set; | 
|  | 691 |  | 
|  | 692 | IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies); | 
|  | 693 |  | 
|  | 694 | IRDA_ASSERT(self != NULL, return;); | 
|  | 695 |  | 
|  | 696 | iobase = self->io.fir_base; | 
|  | 697 |  | 
|  | 698 | /* Save current set */ | 
|  | 699 | set = inb(iobase+SSR); | 
|  | 700 |  | 
|  | 701 | /* Disable DMA */ | 
|  | 702 | switch_bank(iobase, SET0); | 
|  | 703 | outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR); | 
|  | 704 |  | 
|  | 705 | /* Check for underrrun! */ | 
|  | 706 | if (inb(iobase+AUDR) & AUDR_UNDR) { | 
|  | 707 | IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ ); | 
|  | 708 |  | 
|  | 709 | self->stats.tx_errors++; | 
|  | 710 | self->stats.tx_fifo_errors++; | 
|  | 711 |  | 
|  | 712 | /* Clear bit, by writing 1 to it */ | 
|  | 713 | outb(AUDR_UNDR, iobase+AUDR); | 
|  | 714 | } else | 
|  | 715 | self->stats.tx_packets++; | 
|  | 716 |  | 
|  | 717 |  | 
|  | 718 | if (self->new_speed) { | 
|  | 719 | w83977af_change_speed(self, self->new_speed); | 
|  | 720 | self->new_speed = 0; | 
|  | 721 | } | 
|  | 722 |  | 
|  | 723 | /* Unlock tx_buff and request another frame */ | 
|  | 724 | /* Tell the network layer, that we want more frames */ | 
|  | 725 | netif_wake_queue(self->netdev); | 
|  | 726 |  | 
|  | 727 | /* Restore set */ | 
|  | 728 | outb(set, iobase+SSR); | 
|  | 729 | } | 
|  | 730 |  | 
|  | 731 | /* | 
|  | 732 | * Function w83977af_dma_receive (self) | 
|  | 733 | * | 
|  | 734 | *    Get ready for receiving a frame. The device will initiate a DMA | 
|  | 735 | *    if it starts to receive a frame. | 
|  | 736 | * | 
|  | 737 | */ | 
|  | 738 | int w83977af_dma_receive(struct w83977af_ir *self) | 
|  | 739 | { | 
|  | 740 | int iobase; | 
|  | 741 | __u8 set; | 
|  | 742 | #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS | 
|  | 743 | unsigned long flags; | 
|  | 744 | __u8 hcr; | 
|  | 745 | #endif | 
|  | 746 | IRDA_ASSERT(self != NULL, return -1;); | 
|  | 747 |  | 
|  | 748 | IRDA_DEBUG(4, "%s\n", __FUNCTION__ ); | 
|  | 749 |  | 
|  | 750 | iobase= self->io.fir_base; | 
|  | 751 |  | 
|  | 752 | /* Save current set */ | 
|  | 753 | set = inb(iobase+SSR); | 
|  | 754 |  | 
|  | 755 | /* Disable DMA */ | 
|  | 756 | switch_bank(iobase, SET0); | 
|  | 757 | outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR); | 
|  | 758 |  | 
|  | 759 | /* Choose DMA Rx, DMA Fairness, and Advanced mode */ | 
|  | 760 | switch_bank(iobase, SET2); | 
|  | 761 | outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL, | 
|  | 762 | iobase+ADCR1); | 
|  | 763 |  | 
|  | 764 | self->io.direction = IO_RECV; | 
|  | 765 | self->rx_buff.data = self->rx_buff.head; | 
|  | 766 |  | 
|  | 767 | #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS | 
|  | 768 | spin_lock_irqsave(&self->lock, flags); | 
|  | 769 |  | 
|  | 770 | disable_dma(self->io.dma); | 
|  | 771 | clear_dma_ff(self->io.dma); | 
|  | 772 | set_dma_mode(self->io.dma, DMA_MODE_READ); | 
|  | 773 | set_dma_addr(self->io.dma, self->rx_buff_dma); | 
|  | 774 | set_dma_count(self->io.dma, self->rx_buff.truesize); | 
|  | 775 | #else | 
|  | 776 | irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, | 
|  | 777 | DMA_MODE_READ); | 
|  | 778 | #endif | 
|  | 779 | /* | 
|  | 780 | * Reset Rx FIFO. This will also flush the ST_FIFO, it's very | 
|  | 781 | * important that we don't reset the Tx FIFO since it might not | 
|  | 782 | * be finished transmitting yet | 
|  | 783 | */ | 
|  | 784 | switch_bank(iobase, SET0); | 
|  | 785 | outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR); | 
|  | 786 | self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0; | 
|  | 787 |  | 
|  | 788 | /* Enable DMA */ | 
|  | 789 | switch_bank(iobase, SET0); | 
|  | 790 | #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS | 
|  | 791 | hcr = inb(iobase+HCR); | 
|  | 792 | outb(hcr | HCR_EN_DMA, iobase+HCR); | 
|  | 793 | enable_dma(self->io.dma); | 
|  | 794 | spin_unlock_irqrestore(&self->lock, flags); | 
|  | 795 | #else | 
|  | 796 | outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR); | 
|  | 797 | #endif | 
|  | 798 | /* Restore set */ | 
|  | 799 | outb(set, iobase+SSR); | 
|  | 800 |  | 
|  | 801 | return 0; | 
|  | 802 | } | 
|  | 803 |  | 
|  | 804 | /* | 
|  | 805 | * Function w83977af_receive_complete (self) | 
|  | 806 | * | 
|  | 807 | *    Finished with receiving a frame | 
|  | 808 | * | 
|  | 809 | */ | 
|  | 810 | int w83977af_dma_receive_complete(struct w83977af_ir *self) | 
|  | 811 | { | 
|  | 812 | struct sk_buff *skb; | 
|  | 813 | struct st_fifo *st_fifo; | 
|  | 814 | int len; | 
|  | 815 | int iobase; | 
|  | 816 | __u8 set; | 
|  | 817 | __u8 status; | 
|  | 818 |  | 
|  | 819 | IRDA_DEBUG(4, "%s\n", __FUNCTION__ ); | 
|  | 820 |  | 
|  | 821 | st_fifo = &self->st_fifo; | 
|  | 822 |  | 
|  | 823 | iobase = self->io.fir_base; | 
|  | 824 |  | 
|  | 825 | /* Save current set */ | 
|  | 826 | set = inb(iobase+SSR); | 
|  | 827 |  | 
|  | 828 | iobase = self->io.fir_base; | 
|  | 829 |  | 
|  | 830 | /* Read status FIFO */ | 
|  | 831 | switch_bank(iobase, SET5); | 
|  | 832 | while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) { | 
|  | 833 | st_fifo->entries[st_fifo->tail].status = status; | 
|  | 834 |  | 
|  | 835 | st_fifo->entries[st_fifo->tail].len  = inb(iobase+RFLFL); | 
|  | 836 | st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8; | 
|  | 837 |  | 
|  | 838 | st_fifo->tail++; | 
|  | 839 | st_fifo->len++; | 
|  | 840 | } | 
|  | 841 |  | 
|  | 842 | while (st_fifo->len) { | 
|  | 843 | /* Get first entry */ | 
|  | 844 | status = st_fifo->entries[st_fifo->head].status; | 
|  | 845 | len    = st_fifo->entries[st_fifo->head].len; | 
|  | 846 | st_fifo->head++; | 
|  | 847 | st_fifo->len--; | 
|  | 848 |  | 
|  | 849 | /* Check for errors */ | 
|  | 850 | if (status & FS_FO_ERR_MSK) { | 
|  | 851 | if (status & FS_FO_LST_FR) { | 
|  | 852 | /* Add number of lost frames to stats */ | 
|  | 853 | self->stats.rx_errors += len; | 
|  | 854 | } else { | 
|  | 855 | /* Skip frame */ | 
|  | 856 | self->stats.rx_errors++; | 
|  | 857 |  | 
|  | 858 | self->rx_buff.data += len; | 
|  | 859 |  | 
|  | 860 | if (status & FS_FO_MX_LEX) | 
|  | 861 | self->stats.rx_length_errors++; | 
|  | 862 |  | 
|  | 863 | if (status & FS_FO_PHY_ERR) | 
|  | 864 | self->stats.rx_frame_errors++; | 
|  | 865 |  | 
|  | 866 | if (status & FS_FO_CRC_ERR) | 
|  | 867 | self->stats.rx_crc_errors++; | 
|  | 868 | } | 
|  | 869 | /* The errors below can be reported in both cases */ | 
|  | 870 | if (status & FS_FO_RX_OV) | 
|  | 871 | self->stats.rx_fifo_errors++; | 
|  | 872 |  | 
|  | 873 | if (status & FS_FO_FSF_OV) | 
|  | 874 | self->stats.rx_fifo_errors++; | 
|  | 875 |  | 
|  | 876 | } else { | 
|  | 877 | /* Check if we have transferred all data to memory */ | 
|  | 878 | switch_bank(iobase, SET0); | 
|  | 879 | if (inb(iobase+USR) & USR_RDR) { | 
|  | 880 | #ifdef CONFIG_USE_INTERNAL_TIMER | 
|  | 881 | /* Put this entry back in fifo */ | 
|  | 882 | st_fifo->head--; | 
|  | 883 | st_fifo->len++; | 
|  | 884 | st_fifo->entries[st_fifo->head].status = status; | 
|  | 885 | st_fifo->entries[st_fifo->head].len = len; | 
|  | 886 |  | 
|  | 887 | /* Restore set register */ | 
|  | 888 | outb(set, iobase+SSR); | 
|  | 889 |  | 
|  | 890 | return FALSE; 	/* I'll be back! */ | 
|  | 891 | #else | 
|  | 892 | udelay(80); /* Should be enough!? */ | 
|  | 893 | #endif | 
|  | 894 | } | 
|  | 895 |  | 
|  | 896 | skb = dev_alloc_skb(len+1); | 
|  | 897 | if (skb == NULL)  { | 
|  | 898 | printk(KERN_INFO | 
|  | 899 | "%s(), memory squeeze, dropping frame.\n", __FUNCTION__); | 
|  | 900 | /* Restore set register */ | 
|  | 901 | outb(set, iobase+SSR); | 
|  | 902 |  | 
|  | 903 | return FALSE; | 
|  | 904 | } | 
|  | 905 |  | 
|  | 906 | /*  Align to 20 bytes */ | 
|  | 907 | skb_reserve(skb, 1); | 
|  | 908 |  | 
|  | 909 | /* Copy frame without CRC */ | 
|  | 910 | if (self->io.speed < 4000000) { | 
|  | 911 | skb_put(skb, len-2); | 
|  | 912 | memcpy(skb->data, self->rx_buff.data, len-2); | 
|  | 913 | } else { | 
|  | 914 | skb_put(skb, len-4); | 
|  | 915 | memcpy(skb->data, self->rx_buff.data, len-4); | 
|  | 916 | } | 
|  | 917 |  | 
|  | 918 | /* Move to next frame */ | 
|  | 919 | self->rx_buff.data += len; | 
|  | 920 | self->stats.rx_packets++; | 
|  | 921 |  | 
|  | 922 | skb->dev = self->netdev; | 
|  | 923 | skb->mac.raw  = skb->data; | 
|  | 924 | skb->protocol = htons(ETH_P_IRDA); | 
|  | 925 | netif_rx(skb); | 
|  | 926 | self->netdev->last_rx = jiffies; | 
|  | 927 | } | 
|  | 928 | } | 
|  | 929 | /* Restore set register */ | 
|  | 930 | outb(set, iobase+SSR); | 
|  | 931 |  | 
|  | 932 | return TRUE; | 
|  | 933 | } | 
|  | 934 |  | 
|  | 935 | /* | 
|  | 936 | * Function pc87108_pio_receive (self) | 
|  | 937 | * | 
|  | 938 | *    Receive all data in receiver FIFO | 
|  | 939 | * | 
|  | 940 | */ | 
|  | 941 | static void w83977af_pio_receive(struct w83977af_ir *self) | 
|  | 942 | { | 
|  | 943 | __u8 byte = 0x00; | 
|  | 944 | int iobase; | 
|  | 945 |  | 
|  | 946 | IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); | 
|  | 947 |  | 
|  | 948 | IRDA_ASSERT(self != NULL, return;); | 
|  | 949 |  | 
|  | 950 | iobase = self->io.fir_base; | 
|  | 951 |  | 
|  | 952 | /*  Receive all characters in Rx FIFO */ | 
|  | 953 | do { | 
|  | 954 | byte = inb(iobase+RBR); | 
|  | 955 | async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, | 
|  | 956 | byte); | 
|  | 957 | } while (inb(iobase+USR) & USR_RDR); /* Data available */ | 
|  | 958 | } | 
|  | 959 |  | 
|  | 960 | /* | 
|  | 961 | * Function w83977af_sir_interrupt (self, eir) | 
|  | 962 | * | 
|  | 963 | *    Handle SIR interrupt | 
|  | 964 | * | 
|  | 965 | */ | 
|  | 966 | static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) | 
|  | 967 | { | 
|  | 968 | int actual; | 
|  | 969 | __u8 new_icr = 0; | 
|  | 970 | __u8 set; | 
|  | 971 | int iobase; | 
|  | 972 |  | 
|  | 973 | IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr); | 
|  | 974 |  | 
|  | 975 | iobase = self->io.fir_base; | 
|  | 976 | /* Transmit FIFO low on data */ | 
|  | 977 | if (isr & ISR_TXTH_I) { | 
|  | 978 | /* Write data left in transmit buffer */ | 
|  | 979 | actual = w83977af_pio_write(self->io.fir_base, | 
|  | 980 | self->tx_buff.data, | 
|  | 981 | self->tx_buff.len, | 
|  | 982 | self->io.fifo_size); | 
|  | 983 |  | 
|  | 984 | self->tx_buff.data += actual; | 
|  | 985 | self->tx_buff.len  -= actual; | 
|  | 986 |  | 
|  | 987 | self->io.direction = IO_XMIT; | 
|  | 988 |  | 
|  | 989 | /* Check if finished */ | 
|  | 990 | if (self->tx_buff.len > 0) { | 
|  | 991 | new_icr |= ICR_ETXTHI; | 
|  | 992 | } else { | 
|  | 993 | set = inb(iobase+SSR); | 
|  | 994 | switch_bank(iobase, SET0); | 
|  | 995 | outb(AUDR_SFEND, iobase+AUDR); | 
|  | 996 | outb(set, iobase+SSR); | 
|  | 997 |  | 
|  | 998 | self->stats.tx_packets++; | 
|  | 999 |  | 
|  | 1000 | /* Feed me more packets */ | 
|  | 1001 | netif_wake_queue(self->netdev); | 
|  | 1002 | new_icr |= ICR_ETBREI; | 
|  | 1003 | } | 
|  | 1004 | } | 
|  | 1005 | /* Check if transmission has completed */ | 
|  | 1006 | if (isr & ISR_TXEMP_I) { | 
|  | 1007 | /* Check if we need to change the speed? */ | 
|  | 1008 | if (self->new_speed) { | 
|  | 1009 | IRDA_DEBUG(2, | 
|  | 1010 | "%s(), Changing speed!\n", __FUNCTION__ ); | 
|  | 1011 | w83977af_change_speed(self, self->new_speed); | 
|  | 1012 | self->new_speed = 0; | 
|  | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | /* Turn around and get ready to receive some data */ | 
|  | 1016 | self->io.direction = IO_RECV; | 
|  | 1017 | new_icr |= ICR_ERBRI; | 
|  | 1018 | } | 
|  | 1019 |  | 
|  | 1020 | /* Rx FIFO threshold or timeout */ | 
|  | 1021 | if (isr & ISR_RXTH_I) { | 
|  | 1022 | w83977af_pio_receive(self); | 
|  | 1023 |  | 
|  | 1024 | /* Keep receiving */ | 
|  | 1025 | new_icr |= ICR_ERBRI; | 
|  | 1026 | } | 
|  | 1027 | return new_icr; | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | /* | 
|  | 1031 | * Function pc87108_fir_interrupt (self, eir) | 
|  | 1032 | * | 
|  | 1033 | *    Handle MIR/FIR interrupt | 
|  | 1034 | * | 
|  | 1035 | */ | 
|  | 1036 | static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr) | 
|  | 1037 | { | 
|  | 1038 | __u8 new_icr = 0; | 
|  | 1039 | __u8 set; | 
|  | 1040 | int iobase; | 
|  | 1041 |  | 
|  | 1042 | iobase = self->io.fir_base; | 
|  | 1043 | set = inb(iobase+SSR); | 
|  | 1044 |  | 
|  | 1045 | /* End of frame detected in FIFO */ | 
|  | 1046 | if (isr & (ISR_FEND_I|ISR_FSF_I)) { | 
|  | 1047 | if (w83977af_dma_receive_complete(self)) { | 
|  | 1048 |  | 
|  | 1049 | /* Wait for next status FIFO interrupt */ | 
|  | 1050 | new_icr |= ICR_EFSFI; | 
|  | 1051 | } else { | 
|  | 1052 | /* DMA not finished yet */ | 
|  | 1053 |  | 
|  | 1054 | /* Set timer value, resolution 1 ms */ | 
|  | 1055 | switch_bank(iobase, SET4); | 
|  | 1056 | outb(0x01, iobase+TMRL); /* 1 ms */ | 
|  | 1057 | outb(0x00, iobase+TMRH); | 
|  | 1058 |  | 
|  | 1059 | /* Start timer */ | 
|  | 1060 | outb(IR_MSL_EN_TMR, iobase+IR_MSL); | 
|  | 1061 |  | 
|  | 1062 | new_icr |= ICR_ETMRI; | 
|  | 1063 | } | 
|  | 1064 | } | 
|  | 1065 | /* Timer finished */ | 
|  | 1066 | if (isr & ISR_TMR_I) { | 
|  | 1067 | /* Disable timer */ | 
|  | 1068 | switch_bank(iobase, SET4); | 
|  | 1069 | outb(0, iobase+IR_MSL); | 
|  | 1070 |  | 
|  | 1071 | /* Clear timer event */ | 
|  | 1072 | /* switch_bank(iobase, SET0); */ | 
|  | 1073 | /* 		outb(ASCR_CTE, iobase+ASCR); */ | 
|  | 1074 |  | 
|  | 1075 | /* Check if this is a TX timer interrupt */ | 
|  | 1076 | if (self->io.direction == IO_XMIT) { | 
|  | 1077 | w83977af_dma_write(self, iobase); | 
|  | 1078 |  | 
|  | 1079 | new_icr |= ICR_EDMAI; | 
|  | 1080 | } else { | 
|  | 1081 | /* Check if DMA has now finished */ | 
|  | 1082 | w83977af_dma_receive_complete(self); | 
|  | 1083 |  | 
|  | 1084 | new_icr |= ICR_EFSFI; | 
|  | 1085 | } | 
|  | 1086 | } | 
|  | 1087 | /* Finished with DMA */ | 
|  | 1088 | if (isr & ISR_DMA_I) { | 
|  | 1089 | w83977af_dma_xmit_complete(self); | 
|  | 1090 |  | 
|  | 1091 | /* Check if there are more frames to be transmitted */ | 
|  | 1092 | /* if (irda_device_txqueue_empty(self)) { */ | 
|  | 1093 |  | 
|  | 1094 | /* Prepare for receive | 
|  | 1095 | * | 
|  | 1096 | * ** Netwinder Tx DMA likes that we do this anyway ** | 
|  | 1097 | */ | 
|  | 1098 | w83977af_dma_receive(self); | 
|  | 1099 | new_icr = ICR_EFSFI; | 
|  | 1100 | /* } */ | 
|  | 1101 | } | 
|  | 1102 |  | 
|  | 1103 | /* Restore set */ | 
|  | 1104 | outb(set, iobase+SSR); | 
|  | 1105 |  | 
|  | 1106 | return new_icr; | 
|  | 1107 | } | 
|  | 1108 |  | 
|  | 1109 | /* | 
|  | 1110 | * Function w83977af_interrupt (irq, dev_id, regs) | 
|  | 1111 | * | 
|  | 1112 | *    An interrupt from the chip has arrived. Time to do some work | 
|  | 1113 | * | 
|  | 1114 | */ | 
|  | 1115 | static irqreturn_t w83977af_interrupt(int irq, void *dev_id, | 
|  | 1116 | struct pt_regs *regs) | 
|  | 1117 | { | 
|  | 1118 | struct net_device *dev = (struct net_device *) dev_id; | 
|  | 1119 | struct w83977af_ir *self; | 
|  | 1120 | __u8 set, icr, isr; | 
|  | 1121 | int iobase; | 
|  | 1122 |  | 
|  | 1123 | if (!dev) { | 
|  | 1124 | printk(KERN_WARNING "%s: irq %d for unknown device.\n", | 
|  | 1125 | driver_name, irq); | 
|  | 1126 | return IRQ_NONE; | 
|  | 1127 | } | 
|  | 1128 | self = (struct w83977af_ir *) dev->priv; | 
|  | 1129 |  | 
|  | 1130 | iobase = self->io.fir_base; | 
|  | 1131 |  | 
|  | 1132 | /* Save current bank */ | 
|  | 1133 | set = inb(iobase+SSR); | 
|  | 1134 | switch_bank(iobase, SET0); | 
|  | 1135 |  | 
|  | 1136 | icr = inb(iobase+ICR); | 
|  | 1137 | isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */ | 
|  | 1138 |  | 
|  | 1139 | outb(0, iobase+ICR); /* Disable interrupts */ | 
|  | 1140 |  | 
|  | 1141 | if (isr) { | 
|  | 1142 | /* Dispatch interrupt handler for the current speed */ | 
|  | 1143 | if (self->io.speed > PIO_MAX_SPEED ) | 
|  | 1144 | icr = w83977af_fir_interrupt(self, isr); | 
|  | 1145 | else | 
|  | 1146 | icr = w83977af_sir_interrupt(self, isr); | 
|  | 1147 | } | 
|  | 1148 |  | 
|  | 1149 | outb(icr, iobase+ICR);    /* Restore (new) interrupts */ | 
|  | 1150 | outb(set, iobase+SSR);    /* Restore bank register */ | 
|  | 1151 | return IRQ_RETVAL(isr); | 
|  | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | /* | 
|  | 1155 | * Function w83977af_is_receiving (self) | 
|  | 1156 | * | 
|  | 1157 | *    Return TRUE is we are currently receiving a frame | 
|  | 1158 | * | 
|  | 1159 | */ | 
|  | 1160 | static int w83977af_is_receiving(struct w83977af_ir *self) | 
|  | 1161 | { | 
|  | 1162 | int status = FALSE; | 
|  | 1163 | int iobase; | 
|  | 1164 | __u8 set; | 
|  | 1165 |  | 
|  | 1166 | IRDA_ASSERT(self != NULL, return FALSE;); | 
|  | 1167 |  | 
|  | 1168 | if (self->io.speed > 115200) { | 
|  | 1169 | iobase = self->io.fir_base; | 
|  | 1170 |  | 
|  | 1171 | /* Check if rx FIFO is not empty */ | 
|  | 1172 | set = inb(iobase+SSR); | 
|  | 1173 | switch_bank(iobase, SET2); | 
|  | 1174 | if ((inb(iobase+RXFDTH) & 0x3f) != 0) { | 
|  | 1175 | /* We are receiving something */ | 
|  | 1176 | status =  TRUE; | 
|  | 1177 | } | 
|  | 1178 | outb(set, iobase+SSR); | 
|  | 1179 | } else | 
|  | 1180 | status = (self->rx_buff.state != OUTSIDE_FRAME); | 
|  | 1181 |  | 
|  | 1182 | return status; | 
|  | 1183 | } | 
|  | 1184 |  | 
|  | 1185 | /* | 
|  | 1186 | * Function w83977af_net_open (dev) | 
|  | 1187 | * | 
|  | 1188 | *    Start the device | 
|  | 1189 | * | 
|  | 1190 | */ | 
|  | 1191 | static int w83977af_net_open(struct net_device *dev) | 
|  | 1192 | { | 
|  | 1193 | struct w83977af_ir *self; | 
|  | 1194 | int iobase; | 
|  | 1195 | char hwname[32]; | 
|  | 1196 | __u8 set; | 
|  | 1197 |  | 
|  | 1198 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 
|  | 1199 |  | 
|  | 1200 | IRDA_ASSERT(dev != NULL, return -1;); | 
|  | 1201 | self = (struct w83977af_ir *) dev->priv; | 
|  | 1202 |  | 
|  | 1203 | IRDA_ASSERT(self != NULL, return 0;); | 
|  | 1204 |  | 
|  | 1205 | iobase = self->io.fir_base; | 
|  | 1206 |  | 
|  | 1207 | if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name, | 
|  | 1208 | (void *) dev)) { | 
|  | 1209 | return -EAGAIN; | 
|  | 1210 | } | 
|  | 1211 | /* | 
|  | 1212 | * Always allocate the DMA channel after the IRQ, | 
|  | 1213 | * and clean up on failure. | 
|  | 1214 | */ | 
|  | 1215 | if (request_dma(self->io.dma, dev->name)) { | 
|  | 1216 | free_irq(self->io.irq, self); | 
|  | 1217 | return -EAGAIN; | 
|  | 1218 | } | 
|  | 1219 |  | 
|  | 1220 | /* Save current set */ | 
|  | 1221 | set = inb(iobase+SSR); | 
|  | 1222 |  | 
|  | 1223 | /* Enable some interrupts so we can receive frames again */ | 
|  | 1224 | switch_bank(iobase, SET0); | 
|  | 1225 | if (self->io.speed > 115200) { | 
|  | 1226 | outb(ICR_EFSFI, iobase+ICR); | 
|  | 1227 | w83977af_dma_receive(self); | 
|  | 1228 | } else | 
|  | 1229 | outb(ICR_ERBRI, iobase+ICR); | 
|  | 1230 |  | 
|  | 1231 | /* Restore bank register */ | 
|  | 1232 | outb(set, iobase+SSR); | 
|  | 1233 |  | 
|  | 1234 | /* Ready to play! */ | 
|  | 1235 | netif_start_queue(dev); | 
|  | 1236 |  | 
|  | 1237 | /* Give self a hardware name */ | 
|  | 1238 | sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base); | 
|  | 1239 |  | 
|  | 1240 | /* | 
|  | 1241 | * Open new IrLAP layer instance, now that everything should be | 
|  | 1242 | * initialized properly | 
|  | 1243 | */ | 
|  | 1244 | self->irlap = irlap_open(dev, &self->qos, hwname); | 
|  | 1245 |  | 
|  | 1246 | return 0; | 
|  | 1247 | } | 
|  | 1248 |  | 
|  | 1249 | /* | 
|  | 1250 | * Function w83977af_net_close (dev) | 
|  | 1251 | * | 
|  | 1252 | *    Stop the device | 
|  | 1253 | * | 
|  | 1254 | */ | 
|  | 1255 | static int w83977af_net_close(struct net_device *dev) | 
|  | 1256 | { | 
|  | 1257 | struct w83977af_ir *self; | 
|  | 1258 | int iobase; | 
|  | 1259 | __u8 set; | 
|  | 1260 |  | 
|  | 1261 | IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); | 
|  | 1262 |  | 
|  | 1263 | IRDA_ASSERT(dev != NULL, return -1;); | 
|  | 1264 |  | 
|  | 1265 | self = (struct w83977af_ir *) dev->priv; | 
|  | 1266 |  | 
|  | 1267 | IRDA_ASSERT(self != NULL, return 0;); | 
|  | 1268 |  | 
|  | 1269 | iobase = self->io.fir_base; | 
|  | 1270 |  | 
|  | 1271 | /* Stop device */ | 
|  | 1272 | netif_stop_queue(dev); | 
|  | 1273 |  | 
|  | 1274 | /* Stop and remove instance of IrLAP */ | 
|  | 1275 | if (self->irlap) | 
|  | 1276 | irlap_close(self->irlap); | 
|  | 1277 | self->irlap = NULL; | 
|  | 1278 |  | 
|  | 1279 | disable_dma(self->io.dma); | 
|  | 1280 |  | 
|  | 1281 | /* Save current set */ | 
|  | 1282 | set = inb(iobase+SSR); | 
|  | 1283 |  | 
|  | 1284 | /* Disable interrupts */ | 
|  | 1285 | switch_bank(iobase, SET0); | 
|  | 1286 | outb(0, iobase+ICR); | 
|  | 1287 |  | 
|  | 1288 | free_irq(self->io.irq, dev); | 
|  | 1289 | free_dma(self->io.dma); | 
|  | 1290 |  | 
|  | 1291 | /* Restore bank register */ | 
|  | 1292 | outb(set, iobase+SSR); | 
|  | 1293 |  | 
|  | 1294 | return 0; | 
|  | 1295 | } | 
|  | 1296 |  | 
|  | 1297 | /* | 
|  | 1298 | * Function w83977af_net_ioctl (dev, rq, cmd) | 
|  | 1299 | * | 
|  | 1300 | *    Process IOCTL commands for this device | 
|  | 1301 | * | 
|  | 1302 | */ | 
|  | 1303 | static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 
|  | 1304 | { | 
|  | 1305 | struct if_irda_req *irq = (struct if_irda_req *) rq; | 
|  | 1306 | struct w83977af_ir *self; | 
|  | 1307 | unsigned long flags; | 
|  | 1308 | int ret = 0; | 
|  | 1309 |  | 
|  | 1310 | IRDA_ASSERT(dev != NULL, return -1;); | 
|  | 1311 |  | 
|  | 1312 | self = dev->priv; | 
|  | 1313 |  | 
|  | 1314 | IRDA_ASSERT(self != NULL, return -1;); | 
|  | 1315 |  | 
|  | 1316 | IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd); | 
|  | 1317 |  | 
|  | 1318 | spin_lock_irqsave(&self->lock, flags); | 
|  | 1319 |  | 
|  | 1320 | switch (cmd) { | 
|  | 1321 | case SIOCSBANDWIDTH: /* Set bandwidth */ | 
|  | 1322 | if (!capable(CAP_NET_ADMIN)) { | 
|  | 1323 | ret = -EPERM; | 
|  | 1324 | goto out; | 
|  | 1325 | } | 
|  | 1326 | w83977af_change_speed(self, irq->ifr_baudrate); | 
|  | 1327 | break; | 
|  | 1328 | case SIOCSMEDIABUSY: /* Set media busy */ | 
|  | 1329 | if (!capable(CAP_NET_ADMIN)) { | 
|  | 1330 | ret = -EPERM; | 
|  | 1331 | goto out; | 
|  | 1332 | } | 
|  | 1333 | irda_device_set_media_busy(self->netdev, TRUE); | 
|  | 1334 | break; | 
|  | 1335 | case SIOCGRECEIVING: /* Check if we are receiving right now */ | 
|  | 1336 | irq->ifr_receiving = w83977af_is_receiving(self); | 
|  | 1337 | break; | 
|  | 1338 | default: | 
|  | 1339 | ret = -EOPNOTSUPP; | 
|  | 1340 | } | 
|  | 1341 | out: | 
|  | 1342 | spin_unlock_irqrestore(&self->lock, flags); | 
|  | 1343 | return ret; | 
|  | 1344 | } | 
|  | 1345 |  | 
|  | 1346 | static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev) | 
|  | 1347 | { | 
|  | 1348 | struct w83977af_ir *self = (struct w83977af_ir *) dev->priv; | 
|  | 1349 |  | 
|  | 1350 | return &self->stats; | 
|  | 1351 | } | 
|  | 1352 |  | 
|  | 1353 | MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); | 
|  | 1354 | MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver"); | 
|  | 1355 | MODULE_LICENSE("GPL"); | 
|  | 1356 |  | 
|  | 1357 |  | 
|  | 1358 | module_param(qos_mtt_bits, int, 0); | 
|  | 1359 | MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time"); | 
|  | 1360 | module_param_array(io, int, NULL, 0); | 
|  | 1361 | MODULE_PARM_DESC(io, "Base I/O addresses"); | 
|  | 1362 | module_param_array(irq, int, NULL, 0); | 
|  | 1363 | MODULE_PARM_DESC(irq, "IRQ lines"); | 
|  | 1364 |  | 
|  | 1365 | /* | 
|  | 1366 | * Function init_module (void) | 
|  | 1367 | * | 
|  | 1368 | * | 
|  | 1369 | * | 
|  | 1370 | */ | 
|  | 1371 | module_init(w83977af_init); | 
|  | 1372 |  | 
|  | 1373 | /* | 
|  | 1374 | * Function cleanup_module (void) | 
|  | 1375 | * | 
|  | 1376 | * | 
|  | 1377 | * | 
|  | 1378 | */ | 
|  | 1379 | module_exit(w83977af_cleanup); |