| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |   Madge Ambassador ATM Adapter driver. | 
 | 3 |   Copyright (C) 1995-1999  Madge Networks Ltd. | 
 | 4 |  | 
 | 5 |   This program is free software; you can redistribute it and/or modify | 
 | 6 |   it under the terms of the GNU General Public License as published by | 
 | 7 |   the Free Software Foundation; either version 2 of the License, or | 
 | 8 |   (at your option) any later version. | 
 | 9 |  | 
 | 10 |   This program is distributed in the hope that it will be useful, | 
 | 11 |   but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 13 |   GNU General Public License for more details. | 
 | 14 |  | 
 | 15 |   You should have received a copy of the GNU General Public License | 
 | 16 |   along with this program; if not, write to the Free Software | 
 | 17 |   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
 | 18 |  | 
 | 19 |   The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian | 
 | 20 |   system and in the file COPYING in the Linux kernel source. | 
 | 21 | */ | 
 | 22 |  | 
 | 23 | /* * dedicated to the memory of Graham Gordon 1971-1998 * */ | 
 | 24 |  | 
 | 25 | #include <linux/module.h> | 
 | 26 | #include <linux/types.h> | 
 | 27 | #include <linux/pci.h> | 
 | 28 | #include <linux/kernel.h> | 
 | 29 | #include <linux/init.h> | 
 | 30 | #include <linux/ioport.h> | 
 | 31 | #include <linux/atmdev.h> | 
 | 32 | #include <linux/delay.h> | 
 | 33 | #include <linux/interrupt.h> | 
| Randy Dunlap | 3c6b377 | 2006-07-03 19:48:25 -0700 | [diff] [blame] | 34 | #include <linux/poison.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 |  | 
 | 36 | #include <asm/atomic.h> | 
 | 37 | #include <asm/io.h> | 
 | 38 | #include <asm/byteorder.h> | 
 | 39 |  | 
 | 40 | #include "ambassador.h" | 
 | 41 |  | 
 | 42 | #define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>" | 
 | 43 | #define description_string "Madge ATM Ambassador driver" | 
 | 44 | #define version_string "1.2.4" | 
 | 45 |  | 
 | 46 | static inline void __init show_version (void) { | 
 | 47 |   printk ("%s version %s\n", description_string, version_string); | 
 | 48 | } | 
 | 49 |  | 
 | 50 | /* | 
 | 51 |    | 
 | 52 |   Theory of Operation | 
 | 53 |    | 
 | 54 |   I Hardware, detection, initialisation and shutdown. | 
 | 55 |    | 
 | 56 |   1. Supported Hardware | 
 | 57 |    | 
 | 58 |   This driver is for the PCI ATMizer-based Ambassador card (except | 
 | 59 |   very early versions). It is not suitable for the similar EISA "TR7" | 
 | 60 |   card. Commercially, both cards are known as Collage Server ATM | 
 | 61 |   adapters. | 
 | 62 |    | 
 | 63 |   The loader supports image transfer to the card, image start and few | 
 | 64 |   other miscellaneous commands. | 
 | 65 |    | 
 | 66 |   Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023. | 
 | 67 |    | 
 | 68 |   The cards are big-endian. | 
 | 69 |    | 
 | 70 |   2. Detection | 
 | 71 |    | 
 | 72 |   Standard PCI stuff, the early cards are detected and rejected. | 
 | 73 |    | 
 | 74 |   3. Initialisation | 
 | 75 |    | 
 | 76 |   The cards are reset and the self-test results are checked. The | 
 | 77 |   microcode image is then transferred and started. This waits for a | 
 | 78 |   pointer to a descriptor containing details of the host-based queues | 
 | 79 |   and buffers and various parameters etc. Once they are processed | 
 | 80 |   normal operations may begin. The BIA is read using a microcode | 
 | 81 |   command. | 
 | 82 |    | 
 | 83 |   4. Shutdown | 
 | 84 |    | 
 | 85 |   This may be accomplished either by a card reset or via the microcode | 
 | 86 |   shutdown command. Further investigation required. | 
 | 87 |    | 
 | 88 |   5. Persistent state | 
 | 89 |    | 
 | 90 |   The card reset does not affect PCI configuration (good) or the | 
 | 91 |   contents of several other "shared run-time registers" (bad) which | 
 | 92 |   include doorbell and interrupt control as well as EEPROM and PCI | 
 | 93 |   control. The driver must be careful when modifying these registers | 
 | 94 |   not to touch bits it does not use and to undo any changes at exit. | 
 | 95 |    | 
 | 96 |   II Driver software | 
 | 97 |    | 
 | 98 |   0. Generalities | 
 | 99 |    | 
 | 100 |   The adapter is quite intelligent (fast) and has a simple interface | 
 | 101 |   (few features). VPI is always zero, 1024 VCIs are supported. There | 
 | 102 |   is limited cell rate support. UBR channels can be capped and ABR | 
 | 103 |   (explicit rate, but not EFCI) is supported. There is no CBR or VBR | 
 | 104 |   support. | 
 | 105 |    | 
 | 106 |   1. Driver <-> Adapter Communication | 
 | 107 |    | 
 | 108 |   Apart from the basic loader commands, the driver communicates | 
 | 109 |   through three entities: the command queue (CQ), the transmit queue | 
 | 110 |   pair (TXQ) and the receive queue pairs (RXQ). These three entities | 
 | 111 |   are set up by the host and passed to the microcode just after it has | 
 | 112 |   been started. | 
 | 113 |    | 
 | 114 |   All queues are host-based circular queues. They are contiguous and | 
 | 115 |   (due to hardware limitations) have some restrictions as to their | 
 | 116 |   locations in (bus) memory. They are of the "full means the same as | 
 | 117 |   empty so don't do that" variety since the adapter uses pointers | 
 | 118 |   internally. | 
 | 119 |    | 
 | 120 |   The queue pairs work as follows: one queue is for supply to the | 
 | 121 |   adapter, items in it are pending and are owned by the adapter; the | 
 | 122 |   other is the queue for return from the adapter, items in it have | 
 | 123 |   been dealt with by the adapter. The host adds items to the supply | 
 | 124 |   (TX descriptors and free RX buffer descriptors) and removes items | 
 | 125 |   from the return (TX and RX completions). The adapter deals with out | 
 | 126 |   of order completions. | 
 | 127 |    | 
 | 128 |   Interrupts (card to host) and the doorbell (host to card) are used | 
 | 129 |   for signalling. | 
 | 130 |    | 
 | 131 |   1. CQ | 
 | 132 |    | 
 | 133 |   This is to communicate "open VC", "close VC", "get stats" etc. to | 
 | 134 |   the adapter. At most one command is retired every millisecond by the | 
 | 135 |   card. There is no out of order completion or notification. The | 
 | 136 |   driver needs to check the return code of the command, waiting as | 
 | 137 |   appropriate. | 
 | 138 |    | 
 | 139 |   2. TXQ | 
 | 140 |    | 
 | 141 |   TX supply items are of variable length (scatter gather support) and | 
 | 142 |   so the queue items are (more or less) pointers to the real thing. | 
 | 143 |   Each TX supply item contains a unique, host-supplied handle (the skb | 
 | 144 |   bus address seems most sensible as this works for Alphas as well, | 
 | 145 |   there is no need to do any endian conversions on the handles). | 
 | 146 |    | 
 | 147 |   TX return items consist of just the handles above. | 
 | 148 |    | 
 | 149 |   3. RXQ (up to 4 of these with different lengths and buffer sizes) | 
 | 150 |    | 
 | 151 |   RX supply items consist of a unique, host-supplied handle (the skb | 
 | 152 |   bus address again) and a pointer to the buffer data area. | 
 | 153 |    | 
 | 154 |   RX return items consist of the handle above, the VC, length and a | 
 | 155 |   status word. This just screams "oh so easy" doesn't it? | 
 | 156 |  | 
 | 157 |   Note on RX pool sizes: | 
 | 158 |     | 
 | 159 |   Each pool should have enough buffers to handle a back-to-back stream | 
 | 160 |   of minimum sized frames on a single VC. For example: | 
 | 161 |    | 
 | 162 |     frame spacing = 3us (about right) | 
 | 163 |      | 
 | 164 |     delay = IRQ lat + RX handling + RX buffer replenish = 20 (us)  (a guess) | 
 | 165 |      | 
 | 166 |     min number of buffers for one VC = 1 + delay/spacing (buffers) | 
 | 167 |  | 
 | 168 |     delay/spacing = latency = (20+2)/3 = 7 (buffers)  (rounding up) | 
 | 169 |      | 
 | 170 |   The 20us delay assumes that there is no need to sleep; if we need to | 
 | 171 |   sleep to get buffers we are going to drop frames anyway. | 
 | 172 |    | 
 | 173 |   In fact, each pool should have enough buffers to support the | 
 | 174 |   simultaneous reassembly of a separate frame on each VC and cope with | 
 | 175 |   the case in which frames complete in round robin cell fashion on | 
 | 176 |   each VC. | 
 | 177 |    | 
 | 178 |   Only one frame can complete at each cell arrival, so if "n" VCs are | 
 | 179 |   open, the worst case is to have them all complete frames together | 
 | 180 |   followed by all starting new frames together. | 
 | 181 |    | 
 | 182 |     desired number of buffers = n + delay/spacing | 
 | 183 |      | 
 | 184 |   These are the extreme requirements, however, they are "n+k" for some | 
 | 185 |   "k" so we have only the constant to choose. This is the argument | 
 | 186 |   rx_lats which current defaults to 7. | 
 | 187 |    | 
 | 188 |   Actually, "n ? n+k : 0" is better and this is what is implemented, | 
 | 189 |   subject to the limit given by the pool size. | 
 | 190 |    | 
 | 191 |   4. Driver locking | 
 | 192 |    | 
 | 193 |   Simple spinlocks are used around the TX and RX queue mechanisms. | 
 | 194 |   Anyone with a faster, working method is welcome to implement it. | 
 | 195 |    | 
 | 196 |   The adapter command queue is protected with a spinlock. We always | 
 | 197 |   wait for commands to complete. | 
 | 198 |    | 
 | 199 |   A more complex form of locking is used around parts of the VC open | 
 | 200 |   and close functions. There are three reasons for a lock: 1. we need | 
 | 201 |   to do atomic rate reservation and release (not used yet), 2. Opening | 
 | 202 |   sometimes involves two adapter commands which must not be separated | 
 | 203 |   by another command on the same VC, 3. the changes to RX pool size | 
 | 204 |   must be atomic. The lock needs to work over context switches, so we | 
 | 205 |   use a semaphore. | 
 | 206 |    | 
 | 207 |   III Hardware Features and Microcode Bugs | 
 | 208 |    | 
 | 209 |   1. Byte Ordering | 
 | 210 |    | 
 | 211 |   *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*! | 
 | 212 |    | 
 | 213 |   2. Memory access | 
 | 214 |    | 
 | 215 |   All structures that are not accessed using DMA must be 4-byte | 
 | 216 |   aligned (not a problem) and must not cross 4MB boundaries. | 
 | 217 |    | 
 | 218 |   There is a DMA memory hole at E0000000-E00000FF (groan). | 
 | 219 |    | 
 | 220 |   TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB | 
 | 221 |   but for a hardware bug). | 
 | 222 |    | 
 | 223 |   RX buffers (DMA write) must not cross 16MB boundaries and must | 
 | 224 |   include spare trailing bytes up to the next 4-byte boundary; they | 
 | 225 |   will be written with rubbish. | 
 | 226 |    | 
 | 227 |   The PLX likes to prefetch; if reading up to 4 u32 past the end of | 
 | 228 |   each TX fragment is not a problem, then TX can be made to go a | 
 | 229 |   little faster by passing a flag at init that disables a prefetch | 
 | 230 |   workaround. We do not pass this flag. (new microcode only) | 
 | 231 |    | 
 | 232 |   Now we: | 
 | 233 |   . Note that alloc_skb rounds up size to a 16byte boundary.   | 
 | 234 |   . Ensure all areas do not traverse 4MB boundaries. | 
 | 235 |   . Ensure all areas do not start at a E00000xx bus address. | 
 | 236 |   (I cannot be certain, but this may always hold with Linux) | 
 | 237 |   . Make all failures cause a loud message. | 
 | 238 |   . Discard non-conforming SKBs (causes TX failure or RX fill delay). | 
 | 239 |   . Discard non-conforming TX fragment descriptors (the TX fails). | 
 | 240 |   In the future we could: | 
 | 241 |   . Allow RX areas that traverse 4MB (but not 16MB) boundaries. | 
 | 242 |   . Segment TX areas into some/more fragments, when necessary. | 
 | 243 |   . Relax checks for non-DMA items (ignore hole). | 
 | 244 |   . Give scatter-gather (iovec) requirements using ???. (?) | 
 | 245 |    | 
 | 246 |   3. VC close is broken (only for new microcode) | 
 | 247 |    | 
 | 248 |   The VC close adapter microcode command fails to do anything if any | 
 | 249 |   frames have been received on the VC but none have been transmitted. | 
 | 250 |   Frames continue to be reassembled and passed (with IRQ) to the | 
 | 251 |   driver. | 
 | 252 |    | 
 | 253 |   IV To Do List | 
 | 254 |    | 
 | 255 |   . Fix bugs! | 
 | 256 |    | 
 | 257 |   . Timer code may be broken. | 
 | 258 |    | 
 | 259 |   . Deal with buggy VC close (somehow) in microcode 12. | 
 | 260 |    | 
 | 261 |   . Handle interrupted and/or non-blocking writes - is this a job for | 
 | 262 |     the protocol layer? | 
 | 263 |    | 
 | 264 |   . Add code to break up TX fragments when they span 4MB boundaries. | 
 | 265 |    | 
 | 266 |   . Add SUNI phy layer (need to know where SUNI lives on card). | 
 | 267 |    | 
 | 268 |   . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b) | 
 | 269 |     leave extra headroom space for Ambassador TX descriptors. | 
 | 270 |    | 
 | 271 |   . Understand these elements of struct atm_vcc: recvq (proto?), | 
 | 272 |     sleep, callback, listenq, backlog_quota, reply and user_back. | 
 | 273 |    | 
 | 274 |   . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable). | 
 | 275 |    | 
 | 276 |   . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow. | 
 | 277 |    | 
 | 278 |   . Decide whether RX buffer recycling is or can be made completely safe; | 
 | 279 |     turn it back on. It looks like Werner is going to axe this. | 
 | 280 |    | 
 | 281 |   . Implement QoS changes on open VCs (involves extracting parts of VC open | 
 | 282 |     and close into separate functions and using them to make changes). | 
 | 283 |    | 
 | 284 |   . Hack on command queue so that someone can issue multiple commands and wait | 
 | 285 |     on the last one (OR only "no-op" or "wait" commands are waited for). | 
 | 286 |    | 
 | 287 |   . Eliminate need for while-schedule around do_command. | 
 | 288 |    | 
 | 289 | */ | 
 | 290 |  | 
 | 291 | /********** microcode **********/ | 
 | 292 |  | 
 | 293 | #ifdef AMB_NEW_MICROCODE | 
 | 294 | #define UCODE(x) UCODE2(atmsar12.x) | 
 | 295 | #else | 
 | 296 | #define UCODE(x) UCODE2(atmsar11.x) | 
 | 297 | #endif | 
 | 298 | #define UCODE2(x) #x | 
 | 299 |  | 
 | 300 | static u32 __devinitdata ucode_start = | 
 | 301 | #include UCODE(start) | 
 | 302 | ; | 
 | 303 |  | 
 | 304 | static region __devinitdata ucode_regions[] = { | 
 | 305 | #include UCODE(regions) | 
 | 306 |   { 0, 0 } | 
 | 307 | }; | 
 | 308 |  | 
 | 309 | static u32 __devinitdata ucode_data[] = { | 
 | 310 | #include UCODE(data) | 
 | 311 |   0xdeadbeef | 
 | 312 | }; | 
 | 313 |  | 
 | 314 | static void do_housekeeping (unsigned long arg); | 
 | 315 | /********** globals **********/ | 
 | 316 |  | 
 | 317 | static unsigned short debug = 0; | 
 | 318 | static unsigned int cmds = 8; | 
 | 319 | static unsigned int txs = 32; | 
 | 320 | static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 }; | 
 | 321 | static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 }; | 
 | 322 | static unsigned int rx_lats = 7; | 
 | 323 | static unsigned char pci_lat = 0; | 
 | 324 |  | 
 | 325 | static const unsigned long onegigmask = -1 << 30; | 
 | 326 |  | 
 | 327 | /********** access to adapter **********/ | 
 | 328 |  | 
 | 329 | static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) { | 
 | 330 |   PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x", addr, data); | 
 | 331 | #ifdef AMB_MMIO | 
 | 332 |   dev->membase[addr / sizeof(u32)] = data; | 
 | 333 | #else | 
 | 334 |   outl (data, dev->iobase + addr); | 
 | 335 | #endif | 
 | 336 | } | 
 | 337 |  | 
 | 338 | static inline u32 rd_plain (const amb_dev * dev, size_t addr) { | 
 | 339 | #ifdef AMB_MMIO | 
 | 340 |   u32 data = dev->membase[addr / sizeof(u32)]; | 
 | 341 | #else | 
 | 342 |   u32 data = inl (dev->iobase + addr); | 
 | 343 | #endif | 
 | 344 |   PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x", addr, data); | 
 | 345 |   return data; | 
 | 346 | } | 
 | 347 |  | 
 | 348 | static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) { | 
 | 349 |   __be32 be = cpu_to_be32 (data); | 
 | 350 |   PRINTD (DBG_FLOW|DBG_REGS, "wr: %08zx <- %08x b[%08x]", addr, data, be); | 
 | 351 | #ifdef AMB_MMIO | 
 | 352 |   dev->membase[addr / sizeof(u32)] = be; | 
 | 353 | #else | 
 | 354 |   outl (be, dev->iobase + addr); | 
 | 355 | #endif | 
 | 356 | } | 
 | 357 |  | 
 | 358 | static inline u32 rd_mem (const amb_dev * dev, size_t addr) { | 
 | 359 | #ifdef AMB_MMIO | 
 | 360 |   __be32 be = dev->membase[addr / sizeof(u32)]; | 
 | 361 | #else | 
 | 362 |   __be32 be = inl (dev->iobase + addr); | 
 | 363 | #endif | 
 | 364 |   u32 data = be32_to_cpu (be); | 
 | 365 |   PRINTD (DBG_FLOW|DBG_REGS, "rd: %08zx -> %08x b[%08x]", addr, data, be); | 
 | 366 |   return data; | 
 | 367 | } | 
 | 368 |  | 
 | 369 | /********** dump routines **********/ | 
 | 370 |  | 
 | 371 | static inline void dump_registers (const amb_dev * dev) { | 
 | 372 | #ifdef DEBUG_AMBASSADOR | 
 | 373 |   if (debug & DBG_REGS) { | 
 | 374 |     size_t i; | 
 | 375 |     PRINTD (DBG_REGS, "reading PLX control: "); | 
 | 376 |     for (i = 0x00; i < 0x30; i += sizeof(u32)) | 
 | 377 |       rd_mem (dev, i); | 
 | 378 |     PRINTD (DBG_REGS, "reading mailboxes: "); | 
 | 379 |     for (i = 0x40; i < 0x60; i += sizeof(u32)) | 
 | 380 |       rd_mem (dev, i); | 
 | 381 |     PRINTD (DBG_REGS, "reading doorb irqev irqen reset:"); | 
 | 382 |     for (i = 0x60; i < 0x70; i += sizeof(u32)) | 
 | 383 |       rd_mem (dev, i); | 
 | 384 |   } | 
 | 385 | #else | 
 | 386 |   (void) dev; | 
 | 387 | #endif | 
 | 388 |   return; | 
 | 389 | } | 
 | 390 |  | 
 | 391 | static inline void dump_loader_block (volatile loader_block * lb) { | 
 | 392 | #ifdef DEBUG_AMBASSADOR | 
 | 393 |   unsigned int i; | 
 | 394 |   PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:", | 
 | 395 | 	   lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command)); | 
 | 396 |   for (i = 0; i < MAX_COMMAND_DATA; ++i) | 
 | 397 |     PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i])); | 
 | 398 |   PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid)); | 
 | 399 | #else | 
 | 400 |   (void) lb; | 
 | 401 | #endif | 
 | 402 |   return; | 
 | 403 | } | 
 | 404 |  | 
 | 405 | static inline void dump_command (command * cmd) { | 
 | 406 | #ifdef DEBUG_AMBASSADOR | 
 | 407 |   unsigned int i; | 
 | 408 |   PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:", | 
 | 409 | 	   cmd, /*be32_to_cpu*/ (cmd->request)); | 
 | 410 |   for (i = 0; i < 3; ++i) | 
 | 411 |     PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i])); | 
 | 412 |   PRINTDE (DBG_CMD, ""); | 
 | 413 | #else | 
 | 414 |   (void) cmd; | 
 | 415 | #endif | 
 | 416 |   return; | 
 | 417 | } | 
 | 418 |  | 
 | 419 | static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) { | 
 | 420 | #ifdef DEBUG_AMBASSADOR | 
 | 421 |   unsigned int i; | 
 | 422 |   unsigned char * data = skb->data; | 
 | 423 |   PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc); | 
 | 424 |   for (i=0; i<skb->len && i < 256;i++) | 
 | 425 |     PRINTDM (DBG_DATA, "%02x ", data[i]); | 
 | 426 |   PRINTDE (DBG_DATA,""); | 
 | 427 | #else | 
 | 428 |   (void) prefix; | 
 | 429 |   (void) vc; | 
 | 430 |   (void) skb; | 
 | 431 | #endif | 
 | 432 |   return; | 
 | 433 | } | 
 | 434 |  | 
 | 435 | /********** check memory areas for use by Ambassador **********/ | 
 | 436 |  | 
 | 437 | /* see limitations under Hardware Features */ | 
 | 438 |  | 
 | 439 | static inline int check_area (void * start, size_t length) { | 
 | 440 |   // assumes length > 0 | 
 | 441 |   const u32 fourmegmask = -1 << 22; | 
 | 442 |   const u32 twofivesixmask = -1 << 8; | 
 | 443 |   const u32 starthole = 0xE0000000; | 
 | 444 |   u32 startaddress = virt_to_bus (start); | 
 | 445 |   u32 lastaddress = startaddress+length-1; | 
 | 446 |   if ((startaddress ^ lastaddress) & fourmegmask || | 
 | 447 |       (startaddress & twofivesixmask) == starthole) { | 
 | 448 |     PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!", | 
 | 449 | 	    startaddress, lastaddress); | 
 | 450 |     return -1; | 
 | 451 |   } else { | 
 | 452 |     return 0; | 
 | 453 |   } | 
 | 454 | } | 
 | 455 |  | 
 | 456 | /********** free an skb (as per ATM device driver documentation) **********/ | 
 | 457 |  | 
 | 458 | static inline void amb_kfree_skb (struct sk_buff * skb) { | 
 | 459 |   if (ATM_SKB(skb)->vcc->pop) { | 
 | 460 |     ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb); | 
 | 461 |   } else { | 
 | 462 |     dev_kfree_skb_any (skb); | 
 | 463 |   } | 
 | 464 | } | 
 | 465 |  | 
 | 466 | /********** TX completion **********/ | 
 | 467 |  | 
 | 468 | static inline void tx_complete (amb_dev * dev, tx_out * tx) { | 
 | 469 |   tx_simple * tx_descr = bus_to_virt (tx->handle); | 
 | 470 |   struct sk_buff * skb = tx_descr->skb; | 
 | 471 |    | 
 | 472 |   PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); | 
 | 473 |    | 
 | 474 |   // VC layer stats | 
 | 475 |   atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); | 
 | 476 |    | 
 | 477 |   // free the descriptor | 
 | 478 |   kfree (tx_descr); | 
 | 479 |    | 
 | 480 |   // free the skb | 
 | 481 |   amb_kfree_skb (skb); | 
 | 482 |    | 
 | 483 |   dev->stats.tx_ok++; | 
 | 484 |   return; | 
 | 485 | } | 
 | 486 |  | 
 | 487 | /********** RX completion **********/ | 
 | 488 |  | 
 | 489 | static void rx_complete (amb_dev * dev, rx_out * rx) { | 
 | 490 |   struct sk_buff * skb = bus_to_virt (rx->handle); | 
 | 491 |   u16 vc = be16_to_cpu (rx->vc); | 
 | 492 |   // unused: u16 lec_id = be16_to_cpu (rx->lec_id); | 
 | 493 |   u16 status = be16_to_cpu (rx->status); | 
 | 494 |   u16 rx_len = be16_to_cpu (rx->length); | 
 | 495 |    | 
 | 496 |   PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len); | 
 | 497 |    | 
 | 498 |   // XXX move this in and add to VC stats ??? | 
 | 499 |   if (!status) { | 
 | 500 |     struct atm_vcc * atm_vcc = dev->rxer[vc]; | 
 | 501 |     dev->stats.rx.ok++; | 
 | 502 |      | 
 | 503 |     if (atm_vcc) { | 
 | 504 |        | 
 | 505 |       if (rx_len <= atm_vcc->qos.rxtp.max_sdu) { | 
 | 506 | 	 | 
 | 507 | 	if (atm_charge (atm_vcc, skb->truesize)) { | 
 | 508 | 	   | 
 | 509 | 	  // prepare socket buffer | 
 | 510 | 	  ATM_SKB(skb)->vcc = atm_vcc; | 
 | 511 | 	  skb_put (skb, rx_len); | 
 | 512 | 	   | 
 | 513 | 	  dump_skb ("<<<", vc, skb); | 
 | 514 | 	   | 
 | 515 | 	  // VC layer stats | 
 | 516 | 	  atomic_inc(&atm_vcc->stats->rx); | 
| Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 517 | 	  __net_timestamp(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | 	  // end of our responsability | 
 | 519 | 	  atm_vcc->push (atm_vcc, skb); | 
 | 520 | 	  return; | 
 | 521 | 	   | 
 | 522 | 	} else { | 
 | 523 | 	  // someone fix this (message), please! | 
 | 524 | 	  PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize); | 
 | 525 | 	  // drop stats incremented in atm_charge | 
 | 526 | 	} | 
 | 527 | 	 | 
 | 528 |       } else { | 
 | 529 |       	PRINTK (KERN_INFO, "dropped over-size frame"); | 
 | 530 | 	// should we count this? | 
 | 531 | 	atomic_inc(&atm_vcc->stats->rx_drop); | 
 | 532 |       } | 
 | 533 |        | 
 | 534 |     } else { | 
 | 535 |       PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc); | 
 | 536 |       // this is an adapter bug, only in new version of microcode | 
 | 537 |     } | 
 | 538 |      | 
 | 539 |   } else { | 
 | 540 |     dev->stats.rx.error++; | 
 | 541 |     if (status & CRC_ERR) | 
 | 542 |       dev->stats.rx.badcrc++; | 
 | 543 |     if (status & LEN_ERR) | 
 | 544 |       dev->stats.rx.toolong++; | 
 | 545 |     if (status & ABORT_ERR) | 
 | 546 |       dev->stats.rx.aborted++; | 
 | 547 |     if (status & UNUSED_ERR) | 
 | 548 |       dev->stats.rx.unused++; | 
 | 549 |   } | 
 | 550 |    | 
 | 551 |   dev_kfree_skb_any (skb); | 
 | 552 |   return; | 
 | 553 | } | 
 | 554 |  | 
 | 555 | /* | 
 | 556 |    | 
 | 557 |   Note on queue handling. | 
 | 558 |    | 
 | 559 |   Here "give" and "take" refer to queue entries and a queue (pair) | 
 | 560 |   rather than frames to or from the host or adapter. Empty frame | 
 | 561 |   buffers are given to the RX queue pair and returned unused or | 
 | 562 |   containing RX frames. TX frames (well, pointers to TX fragment | 
 | 563 |   lists) are given to the TX queue pair, completions are returned. | 
 | 564 |    | 
 | 565 | */ | 
 | 566 |  | 
 | 567 | /********** command queue **********/ | 
 | 568 |  | 
 | 569 | // I really don't like this, but it's the best I can do at the moment | 
 | 570 |  | 
 | 571 | // also, the callers are responsible for byte order as the microcode | 
 | 572 | // sometimes does 16-bit accesses (yuk yuk yuk) | 
 | 573 |  | 
 | 574 | static int command_do (amb_dev * dev, command * cmd) { | 
 | 575 |   amb_cq * cq = &dev->cq; | 
 | 576 |   volatile amb_cq_ptrs * ptrs = &cq->ptrs; | 
 | 577 |   command * my_slot; | 
 | 578 |    | 
 | 579 |   PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev); | 
 | 580 |    | 
 | 581 |   if (test_bit (dead, &dev->flags)) | 
 | 582 |     return 0; | 
 | 583 |    | 
 | 584 |   spin_lock (&cq->lock); | 
 | 585 |    | 
 | 586 |   // if not full... | 
 | 587 |   if (cq->pending < cq->maximum) { | 
 | 588 |     // remember my slot for later | 
 | 589 |     my_slot = ptrs->in; | 
 | 590 |     PRINTD (DBG_CMD, "command in slot %p", my_slot); | 
 | 591 |      | 
 | 592 |     dump_command (cmd); | 
 | 593 |      | 
 | 594 |     // copy command in | 
 | 595 |     *ptrs->in = *cmd; | 
 | 596 |     cq->pending++; | 
 | 597 |     ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit); | 
 | 598 |      | 
 | 599 |     // mail the command | 
 | 600 |     wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in)); | 
 | 601 |      | 
 | 602 |     if (cq->pending > cq->high) | 
 | 603 |       cq->high = cq->pending; | 
 | 604 |     spin_unlock (&cq->lock); | 
 | 605 |      | 
 | 606 |     // these comments were in a while-loop before, msleep removes the loop | 
 | 607 |     // go to sleep | 
 | 608 |     // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout); | 
 | 609 |     msleep(cq->pending); | 
 | 610 |      | 
 | 611 |     // wait for my slot to be reached (all waiters are here or above, until...) | 
 | 612 |     while (ptrs->out != my_slot) { | 
 | 613 |       PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out); | 
 | 614 |       set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 615 |       schedule(); | 
 | 616 |     } | 
 | 617 |      | 
 | 618 |     // wait on my slot (... one gets to its slot, and... ) | 
 | 619 |     while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) { | 
 | 620 |       PRINTD (DBG_CMD, "wait: command slot completion"); | 
 | 621 |       set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 622 |       schedule(); | 
 | 623 |     } | 
 | 624 |      | 
 | 625 |     PRINTD (DBG_CMD, "command complete"); | 
 | 626 |     // update queue (... moves the queue along to the next slot) | 
 | 627 |     spin_lock (&cq->lock); | 
 | 628 |     cq->pending--; | 
 | 629 |     // copy command out | 
 | 630 |     *cmd = *ptrs->out; | 
 | 631 |     ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit); | 
 | 632 |     spin_unlock (&cq->lock); | 
 | 633 |      | 
 | 634 |     return 0; | 
 | 635 |   } else { | 
 | 636 |     cq->filled++; | 
 | 637 |     spin_unlock (&cq->lock); | 
 | 638 |     return -EAGAIN; | 
 | 639 |   } | 
 | 640 |    | 
 | 641 | } | 
 | 642 |  | 
 | 643 | /********** TX queue pair **********/ | 
 | 644 |  | 
 | 645 | static inline int tx_give (amb_dev * dev, tx_in * tx) { | 
 | 646 |   amb_txq * txq = &dev->txq; | 
 | 647 |   unsigned long flags; | 
 | 648 |    | 
 | 649 |   PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev); | 
 | 650 |  | 
 | 651 |   if (test_bit (dead, &dev->flags)) | 
 | 652 |     return 0; | 
 | 653 |    | 
 | 654 |   spin_lock_irqsave (&txq->lock, flags); | 
 | 655 |    | 
 | 656 |   if (txq->pending < txq->maximum) { | 
 | 657 |     PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr); | 
 | 658 |  | 
 | 659 |     *txq->in.ptr = *tx; | 
 | 660 |     txq->pending++; | 
 | 661 |     txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit); | 
 | 662 |     // hand over the TX and ring the bell | 
 | 663 |     wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr)); | 
 | 664 |     wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME); | 
 | 665 |      | 
 | 666 |     if (txq->pending > txq->high) | 
 | 667 |       txq->high = txq->pending; | 
 | 668 |     spin_unlock_irqrestore (&txq->lock, flags); | 
 | 669 |     return 0; | 
 | 670 |   } else { | 
 | 671 |     txq->filled++; | 
 | 672 |     spin_unlock_irqrestore (&txq->lock, flags); | 
 | 673 |     return -EAGAIN; | 
 | 674 |   } | 
 | 675 | } | 
 | 676 |  | 
 | 677 | static inline int tx_take (amb_dev * dev) { | 
 | 678 |   amb_txq * txq = &dev->txq; | 
 | 679 |   unsigned long flags; | 
 | 680 |    | 
 | 681 |   PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev); | 
 | 682 |    | 
 | 683 |   spin_lock_irqsave (&txq->lock, flags); | 
 | 684 |    | 
 | 685 |   if (txq->pending && txq->out.ptr->handle) { | 
 | 686 |     // deal with TX completion | 
 | 687 |     tx_complete (dev, txq->out.ptr); | 
 | 688 |     // mark unused again | 
 | 689 |     txq->out.ptr->handle = 0; | 
 | 690 |     // remove item | 
 | 691 |     txq->pending--; | 
 | 692 |     txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit); | 
 | 693 |      | 
 | 694 |     spin_unlock_irqrestore (&txq->lock, flags); | 
 | 695 |     return 0; | 
 | 696 |   } else { | 
 | 697 |      | 
 | 698 |     spin_unlock_irqrestore (&txq->lock, flags); | 
 | 699 |     return -1; | 
 | 700 |   } | 
 | 701 | } | 
 | 702 |  | 
 | 703 | /********** RX queue pairs **********/ | 
 | 704 |  | 
 | 705 | static inline int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { | 
 | 706 |   amb_rxq * rxq = &dev->rxq[pool]; | 
 | 707 |   unsigned long flags; | 
 | 708 |    | 
 | 709 |   PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); | 
 | 710 |    | 
 | 711 |   spin_lock_irqsave (&rxq->lock, flags); | 
 | 712 |    | 
 | 713 |   if (rxq->pending < rxq->maximum) { | 
 | 714 |     PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr); | 
 | 715 |  | 
 | 716 |     *rxq->in.ptr = *rx; | 
 | 717 |     rxq->pending++; | 
 | 718 |     rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); | 
 | 719 |     // hand over the RX buffer | 
 | 720 |     wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); | 
 | 721 |      | 
 | 722 |     spin_unlock_irqrestore (&rxq->lock, flags); | 
 | 723 |     return 0; | 
 | 724 |   } else { | 
 | 725 |     spin_unlock_irqrestore (&rxq->lock, flags); | 
 | 726 |     return -1; | 
 | 727 |   } | 
 | 728 | } | 
 | 729 |  | 
 | 730 | static inline int rx_take (amb_dev * dev, unsigned char pool) { | 
 | 731 |   amb_rxq * rxq = &dev->rxq[pool]; | 
 | 732 |   unsigned long flags; | 
 | 733 |    | 
 | 734 |   PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); | 
 | 735 |    | 
 | 736 |   spin_lock_irqsave (&rxq->lock, flags); | 
 | 737 |    | 
 | 738 |   if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) { | 
 | 739 |     // deal with RX completion | 
 | 740 |     rx_complete (dev, rxq->out.ptr); | 
 | 741 |     // mark unused again | 
 | 742 |     rxq->out.ptr->status = 0; | 
 | 743 |     rxq->out.ptr->length = 0; | 
 | 744 |     // remove item | 
 | 745 |     rxq->pending--; | 
 | 746 |     rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit); | 
 | 747 |      | 
 | 748 |     if (rxq->pending < rxq->low) | 
 | 749 |       rxq->low = rxq->pending; | 
 | 750 |     spin_unlock_irqrestore (&rxq->lock, flags); | 
 | 751 |     return 0; | 
 | 752 |   } else { | 
 | 753 |     if (!rxq->pending && rxq->buffers_wanted) | 
 | 754 |       rxq->emptied++; | 
 | 755 |     spin_unlock_irqrestore (&rxq->lock, flags); | 
 | 756 |     return -1; | 
 | 757 |   } | 
 | 758 | } | 
 | 759 |  | 
 | 760 | /********** RX Pool handling **********/ | 
 | 761 |  | 
 | 762 | /* pre: buffers_wanted = 0, post: pending = 0 */ | 
 | 763 | static inline void drain_rx_pool (amb_dev * dev, unsigned char pool) { | 
 | 764 |   amb_rxq * rxq = &dev->rxq[pool]; | 
 | 765 |    | 
 | 766 |   PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); | 
 | 767 |    | 
 | 768 |   if (test_bit (dead, &dev->flags)) | 
 | 769 |     return; | 
 | 770 |    | 
 | 771 |   /* we are not quite like the fill pool routines as we cannot just | 
 | 772 |      remove one buffer, we have to remove all of them, but we might as | 
 | 773 |      well pretend... */ | 
 | 774 |   if (rxq->pending > rxq->buffers_wanted) { | 
 | 775 |     command cmd; | 
 | 776 |     cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q); | 
 | 777 |     cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); | 
 | 778 |     while (command_do (dev, &cmd)) | 
 | 779 |       schedule(); | 
 | 780 |     /* the pool may also be emptied via the interrupt handler */ | 
 | 781 |     while (rxq->pending > rxq->buffers_wanted) | 
 | 782 |       if (rx_take (dev, pool)) | 
 | 783 | 	schedule(); | 
 | 784 |   } | 
 | 785 |    | 
 | 786 |   return; | 
 | 787 | } | 
 | 788 |  | 
 | 789 | static void drain_rx_pools (amb_dev * dev) { | 
 | 790 |   unsigned char pool; | 
 | 791 |    | 
 | 792 |   PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev); | 
 | 793 |    | 
 | 794 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 795 |     drain_rx_pool (dev, pool); | 
 | 796 | } | 
 | 797 |  | 
| Victor Fusco | 5938a7b | 2005-07-19 13:56:29 -0700 | [diff] [blame] | 798 | static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 799 |                                  gfp_t priority) | 
| Victor Fusco | 5938a7b | 2005-07-19 13:56:29 -0700 | [diff] [blame] | 800 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 |   rx_in rx; | 
 | 802 |   amb_rxq * rxq; | 
 | 803 |    | 
 | 804 |   PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority); | 
 | 805 |    | 
 | 806 |   if (test_bit (dead, &dev->flags)) | 
 | 807 |     return; | 
 | 808 |    | 
 | 809 |   rxq = &dev->rxq[pool]; | 
 | 810 |   while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) { | 
 | 811 |      | 
 | 812 |     struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority); | 
 | 813 |     if (!skb) { | 
 | 814 |       PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool); | 
 | 815 |       return; | 
 | 816 |     } | 
 | 817 |     if (check_area (skb->data, skb->truesize)) { | 
 | 818 |       dev_kfree_skb_any (skb); | 
 | 819 |       return; | 
 | 820 |     } | 
 | 821 |     // cast needed as there is no %? for pointer differences | 
 | 822 |     PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", | 
 | 823 | 	    skb, skb->head, (long) (skb->end - skb->head)); | 
 | 824 |     rx.handle = virt_to_bus (skb); | 
 | 825 |     rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); | 
 | 826 |     if (rx_give (dev, &rx, pool)) | 
 | 827 |       dev_kfree_skb_any (skb); | 
 | 828 |      | 
 | 829 |   } | 
 | 830 |    | 
 | 831 |   return; | 
 | 832 | } | 
 | 833 |  | 
 | 834 | // top up all RX pools (can also be called as a bottom half) | 
 | 835 | static void fill_rx_pools (amb_dev * dev) { | 
 | 836 |   unsigned char pool; | 
 | 837 |    | 
 | 838 |   PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev); | 
 | 839 |    | 
 | 840 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 841 |     fill_rx_pool (dev, pool, GFP_ATOMIC); | 
 | 842 |    | 
 | 843 |   return; | 
 | 844 | } | 
 | 845 |  | 
 | 846 | /********** enable host interrupts **********/ | 
 | 847 |  | 
 | 848 | static inline void interrupts_on (amb_dev * dev) { | 
 | 849 |   wr_plain (dev, offsetof(amb_mem, interrupt_control), | 
 | 850 | 	    rd_plain (dev, offsetof(amb_mem, interrupt_control)) | 
 | 851 | 	    | AMB_INTERRUPT_BITS); | 
 | 852 | } | 
 | 853 |  | 
 | 854 | /********** disable host interrupts **********/ | 
 | 855 |  | 
 | 856 | static inline void interrupts_off (amb_dev * dev) { | 
 | 857 |   wr_plain (dev, offsetof(amb_mem, interrupt_control), | 
 | 858 | 	    rd_plain (dev, offsetof(amb_mem, interrupt_control)) | 
 | 859 | 	    &~ AMB_INTERRUPT_BITS); | 
 | 860 | } | 
 | 861 |  | 
 | 862 | /********** interrupt handling **********/ | 
 | 863 |  | 
 | 864 | static irqreturn_t interrupt_handler(int irq, void *dev_id, | 
 | 865 | 					struct pt_regs *pt_regs) { | 
 | 866 |   amb_dev * dev = (amb_dev *) dev_id; | 
 | 867 |   (void) pt_regs; | 
 | 868 |    | 
 | 869 |   PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id); | 
 | 870 |    | 
 | 871 |   if (!dev_id) { | 
 | 872 |     PRINTD (DBG_IRQ|DBG_ERR, "irq with NULL dev_id: %d", irq); | 
 | 873 |     return IRQ_NONE; | 
 | 874 |   } | 
 | 875 |    | 
 | 876 |   { | 
 | 877 |     u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt)); | 
 | 878 |    | 
 | 879 |     // for us or someone else sharing the same interrupt | 
 | 880 |     if (!interrupt) { | 
 | 881 |       PRINTD (DBG_IRQ, "irq not for me: %d", irq); | 
 | 882 |       return IRQ_NONE; | 
 | 883 |     } | 
 | 884 |      | 
 | 885 |     // definitely for us | 
 | 886 |     PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt); | 
 | 887 |     wr_plain (dev, offsetof(amb_mem, interrupt), -1); | 
 | 888 |   } | 
 | 889 |    | 
 | 890 |   { | 
 | 891 |     unsigned int irq_work = 0; | 
 | 892 |     unsigned char pool; | 
 | 893 |     for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 894 |       while (!rx_take (dev, pool)) | 
 | 895 | 	++irq_work; | 
 | 896 |     while (!tx_take (dev)) | 
 | 897 |       ++irq_work; | 
 | 898 |    | 
 | 899 |     if (irq_work) { | 
 | 900 | #ifdef FILL_RX_POOLS_IN_BH | 
 | 901 |       schedule_work (&dev->bh); | 
 | 902 | #else | 
 | 903 |       fill_rx_pools (dev); | 
 | 904 | #endif | 
 | 905 |  | 
 | 906 |       PRINTD (DBG_IRQ, "work done: %u", irq_work); | 
 | 907 |     } else { | 
 | 908 |       PRINTD (DBG_IRQ|DBG_WARN, "no work done"); | 
 | 909 |     } | 
 | 910 |   } | 
 | 911 |    | 
 | 912 |   PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id); | 
 | 913 |   return IRQ_HANDLED; | 
 | 914 | } | 
 | 915 |  | 
 | 916 | /********** make rate (not quite as much fun as Horizon) **********/ | 
 | 917 |  | 
 | 918 | static unsigned int make_rate (unsigned int rate, rounding r, | 
 | 919 | 			       u16 * bits, unsigned int * actual) { | 
 | 920 |   unsigned char exp = -1; // hush gcc | 
 | 921 |   unsigned int man = -1;  // hush gcc | 
 | 922 |    | 
 | 923 |   PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate); | 
 | 924 |    | 
 | 925 |   // rates in cells per second, ITU format (nasty 16-bit floating-point) | 
 | 926 |   // given 5-bit e and 9-bit m: | 
 | 927 |   // rate = EITHER (1+m/2^9)*2^e    OR 0 | 
 | 928 |   // bits = EITHER 1<<14 | e<<9 | m OR 0 | 
 | 929 |   // (bit 15 is "reserved", bit 14 "non-zero") | 
 | 930 |   // smallest rate is 0 (special representation) | 
 | 931 |   // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1) | 
 | 932 |   // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0) | 
 | 933 |   // simple algorithm: | 
 | 934 |   // find position of top bit, this gives e | 
 | 935 |   // remove top bit and shift (rounding if feeling clever) by 9-e | 
 | 936 |    | 
 | 937 |   // ucode bug: please don't set bit 14! so 0 rate not representable | 
 | 938 |    | 
 | 939 |   if (rate > 0xffc00000U) { | 
 | 940 |     // larger than largest representable rate | 
 | 941 |      | 
 | 942 |     if (r == round_up) { | 
 | 943 | 	return -EINVAL; | 
 | 944 |     } else { | 
 | 945 |       exp = 31; | 
 | 946 |       man = 511; | 
 | 947 |     } | 
 | 948 |      | 
 | 949 |   } else if (rate) { | 
 | 950 |     // representable rate | 
 | 951 |      | 
 | 952 |     exp = 31; | 
 | 953 |     man = rate; | 
 | 954 |      | 
 | 955 |     // invariant: rate = man*2^(exp-31) | 
 | 956 |     while (!(man & (1<<31))) { | 
 | 957 |       exp = exp - 1; | 
 | 958 |       man = man<<1; | 
 | 959 |     } | 
 | 960 |      | 
 | 961 |     // man has top bit set | 
 | 962 |     // rate = (2^31+(man-2^31))*2^(exp-31) | 
 | 963 |     // rate = (1+(man-2^31)/2^31)*2^exp | 
 | 964 |     man = man<<1; | 
 | 965 |     man &= 0xffffffffU; // a nop on 32-bit systems | 
 | 966 |     // rate = (1+man/2^32)*2^exp | 
 | 967 |      | 
 | 968 |     // exp is in the range 0 to 31, man is in the range 0 to 2^32-1 | 
 | 969 |     // time to lose significance... we want m in the range 0 to 2^9-1 | 
 | 970 |     // rounding presents a minor problem... we first decide which way | 
 | 971 |     // we are rounding (based on given rounding direction and possibly | 
 | 972 |     // the bits of the mantissa that are to be discarded). | 
 | 973 |      | 
 | 974 |     switch (r) { | 
 | 975 |       case round_down: { | 
 | 976 | 	// just truncate | 
 | 977 | 	man = man>>(32-9); | 
 | 978 | 	break; | 
 | 979 |       } | 
 | 980 |       case round_up: { | 
 | 981 | 	// check all bits that we are discarding | 
 | 982 | 	if (man & (-1>>9)) { | 
 | 983 | 	  man = (man>>(32-9)) + 1; | 
 | 984 | 	  if (man == (1<<9)) { | 
 | 985 | 	    // no need to check for round up outside of range | 
 | 986 | 	    man = 0; | 
 | 987 | 	    exp += 1; | 
 | 988 | 	  } | 
 | 989 | 	} else { | 
 | 990 | 	  man = (man>>(32-9)); | 
 | 991 | 	} | 
 | 992 | 	break; | 
 | 993 |       } | 
 | 994 |       case round_nearest: { | 
 | 995 | 	// check msb that we are discarding | 
 | 996 | 	if (man & (1<<(32-9-1))) { | 
 | 997 | 	  man = (man>>(32-9)) + 1; | 
 | 998 | 	  if (man == (1<<9)) { | 
 | 999 | 	    // no need to check for round up outside of range | 
 | 1000 | 	    man = 0; | 
 | 1001 | 	    exp += 1; | 
 | 1002 | 	  } | 
 | 1003 | 	} else { | 
 | 1004 | 	  man = (man>>(32-9)); | 
 | 1005 | 	} | 
 | 1006 | 	break; | 
 | 1007 |       } | 
 | 1008 |     } | 
 | 1009 |      | 
 | 1010 |   } else { | 
 | 1011 |     // zero rate - not representable | 
 | 1012 |      | 
 | 1013 |     if (r == round_down) { | 
 | 1014 |       return -EINVAL; | 
 | 1015 |     } else { | 
 | 1016 |       exp = 0; | 
 | 1017 |       man = 0; | 
 | 1018 |     } | 
 | 1019 |      | 
 | 1020 |   } | 
 | 1021 |    | 
 | 1022 |   PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp); | 
 | 1023 |    | 
 | 1024 |   if (bits) | 
 | 1025 |     *bits = /* (1<<14) | */ (exp<<9) | man; | 
 | 1026 |    | 
 | 1027 |   if (actual) | 
 | 1028 |     *actual = (exp >= 9) | 
 | 1029 |       ? (1 << exp) + (man << (exp-9)) | 
 | 1030 |       : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp)); | 
 | 1031 |    | 
 | 1032 |   return 0; | 
 | 1033 | } | 
 | 1034 |  | 
 | 1035 | /********** Linux ATM Operations **********/ | 
 | 1036 |  | 
 | 1037 | // some are not yet implemented while others do not make sense for | 
 | 1038 | // this device | 
 | 1039 |  | 
 | 1040 | /********** Open a VC **********/ | 
 | 1041 |  | 
 | 1042 | static int amb_open (struct atm_vcc * atm_vcc) | 
 | 1043 | { | 
 | 1044 |   int error; | 
 | 1045 |    | 
 | 1046 |   struct atm_qos * qos; | 
 | 1047 |   struct atm_trafprm * txtp; | 
 | 1048 |   struct atm_trafprm * rxtp; | 
 | 1049 |   u16 tx_rate_bits; | 
 | 1050 |   u16 tx_vc_bits = -1; // hush gcc | 
 | 1051 |   u16 tx_frame_bits = -1; // hush gcc | 
 | 1052 |    | 
 | 1053 |   amb_dev * dev = AMB_DEV(atm_vcc->dev); | 
 | 1054 |   amb_vcc * vcc; | 
 | 1055 |   unsigned char pool = -1; // hush gcc | 
 | 1056 |   short vpi = atm_vcc->vpi; | 
 | 1057 |   int vci = atm_vcc->vci; | 
 | 1058 |    | 
 | 1059 |   PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci); | 
 | 1060 |    | 
 | 1061 | #ifdef ATM_VPI_UNSPEC | 
 | 1062 |   // UNSPEC is deprecated, remove this code eventually | 
 | 1063 |   if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) { | 
 | 1064 |     PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)"); | 
 | 1065 |     return -EINVAL; | 
 | 1066 |   } | 
 | 1067 | #endif | 
 | 1068 |    | 
 | 1069 |   if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) && | 
 | 1070 | 	0 <= vci && vci < (1<<NUM_VCI_BITS))) { | 
 | 1071 |     PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci); | 
 | 1072 |     return -EINVAL; | 
 | 1073 |   } | 
 | 1074 |    | 
 | 1075 |   qos = &atm_vcc->qos; | 
 | 1076 |    | 
 | 1077 |   if (qos->aal != ATM_AAL5) { | 
 | 1078 |     PRINTD (DBG_QOS, "AAL not supported"); | 
 | 1079 |     return -EINVAL; | 
 | 1080 |   } | 
 | 1081 |    | 
 | 1082 |   // traffic parameters | 
 | 1083 |    | 
 | 1084 |   PRINTD (DBG_QOS, "TX:"); | 
 | 1085 |   txtp = &qos->txtp; | 
 | 1086 |   if (txtp->traffic_class != ATM_NONE) { | 
 | 1087 |     switch (txtp->traffic_class) { | 
 | 1088 |       case ATM_UBR: { | 
 | 1089 | 	// we take "the PCR" as a rate-cap | 
 | 1090 | 	int pcr = atm_pcr_goal (txtp); | 
 | 1091 | 	if (!pcr) { | 
 | 1092 | 	  // no rate cap | 
 | 1093 | 	  tx_rate_bits = 0; | 
 | 1094 | 	  tx_vc_bits = TX_UBR; | 
 | 1095 | 	  tx_frame_bits = TX_FRAME_NOTCAP; | 
 | 1096 | 	} else { | 
 | 1097 | 	  rounding r; | 
 | 1098 | 	  if (pcr < 0) { | 
 | 1099 | 	    r = round_down; | 
 | 1100 | 	    pcr = -pcr; | 
 | 1101 | 	  } else { | 
 | 1102 | 	    r = round_up; | 
 | 1103 | 	  } | 
 | 1104 | 	  error = make_rate (pcr, r, &tx_rate_bits, NULL); | 
 | 1105 | 	  tx_vc_bits = TX_UBR_CAPPED; | 
 | 1106 | 	  tx_frame_bits = TX_FRAME_CAPPED; | 
 | 1107 | 	} | 
 | 1108 | 	break; | 
 | 1109 |       } | 
 | 1110 | #if 0 | 
 | 1111 |       case ATM_ABR: { | 
 | 1112 | 	pcr = atm_pcr_goal (txtp); | 
 | 1113 | 	PRINTD (DBG_QOS, "pcr goal = %d", pcr); | 
 | 1114 | 	break; | 
 | 1115 |       } | 
 | 1116 | #endif | 
 | 1117 |       default: { | 
 | 1118 | 	// PRINTD (DBG_QOS, "request for non-UBR/ABR denied"); | 
 | 1119 | 	PRINTD (DBG_QOS, "request for non-UBR denied"); | 
 | 1120 | 	return -EINVAL; | 
 | 1121 |       } | 
 | 1122 |     } | 
 | 1123 |     PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx", | 
 | 1124 | 	    tx_rate_bits, tx_vc_bits); | 
 | 1125 |   } | 
 | 1126 |    | 
 | 1127 |   PRINTD (DBG_QOS, "RX:"); | 
 | 1128 |   rxtp = &qos->rxtp; | 
 | 1129 |   if (rxtp->traffic_class == ATM_NONE) { | 
 | 1130 |     // do nothing | 
 | 1131 |   } else { | 
 | 1132 |     // choose an RX pool (arranged in increasing size) | 
 | 1133 |     for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 1134 |       if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) { | 
 | 1135 | 	PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)", | 
 | 1136 | 		pool, rxtp->max_sdu, dev->rxq[pool].buffer_size); | 
 | 1137 | 	break; | 
 | 1138 |       } | 
 | 1139 |     if (pool == NUM_RX_POOLS) { | 
 | 1140 |       PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL, | 
 | 1141 | 	      "no pool suitable for VC (RX max_sdu %d is too large)", | 
 | 1142 | 	      rxtp->max_sdu); | 
 | 1143 |       return -EINVAL; | 
 | 1144 |     } | 
 | 1145 |      | 
 | 1146 |     switch (rxtp->traffic_class) { | 
 | 1147 |       case ATM_UBR: { | 
 | 1148 | 	break; | 
 | 1149 |       } | 
 | 1150 | #if 0 | 
 | 1151 |       case ATM_ABR: { | 
 | 1152 | 	pcr = atm_pcr_goal (rxtp); | 
 | 1153 | 	PRINTD (DBG_QOS, "pcr goal = %d", pcr); | 
 | 1154 | 	break; | 
 | 1155 |       } | 
 | 1156 | #endif | 
 | 1157 |       default: { | 
 | 1158 | 	// PRINTD (DBG_QOS, "request for non-UBR/ABR denied"); | 
 | 1159 | 	PRINTD (DBG_QOS, "request for non-UBR denied"); | 
 | 1160 | 	return -EINVAL; | 
 | 1161 |       } | 
 | 1162 |     } | 
 | 1163 |   } | 
 | 1164 |    | 
 | 1165 |   // get space for our vcc stuff | 
 | 1166 |   vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL); | 
 | 1167 |   if (!vcc) { | 
 | 1168 |     PRINTK (KERN_ERR, "out of memory!"); | 
 | 1169 |     return -ENOMEM; | 
 | 1170 |   } | 
 | 1171 |   atm_vcc->dev_data = (void *) vcc; | 
 | 1172 |    | 
 | 1173 |   // no failures beyond this point | 
 | 1174 |    | 
 | 1175 |   // we are not really "immediately before allocating the connection | 
 | 1176 |   // identifier in hardware", but it will just have to do! | 
 | 1177 |   set_bit(ATM_VF_ADDR,&atm_vcc->flags); | 
 | 1178 |    | 
 | 1179 |   if (txtp->traffic_class != ATM_NONE) { | 
 | 1180 |     command cmd; | 
 | 1181 |      | 
 | 1182 |     vcc->tx_frame_bits = tx_frame_bits; | 
 | 1183 |      | 
 | 1184 |     down (&dev->vcc_sf); | 
 | 1185 |     if (dev->rxer[vci]) { | 
 | 1186 |       // RXer on the channel already, just modify rate... | 
 | 1187 |       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); | 
 | 1188 |       cmd.args.modify_rate.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1189 |       cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT); | 
 | 1190 |       while (command_do (dev, &cmd)) | 
 | 1191 | 	schedule(); | 
 | 1192 |       // ... and TX flags, preserving the RX pool | 
 | 1193 |       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); | 
 | 1194 |       cmd.args.modify_flags.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1195 |       cmd.args.modify_flags.flags = cpu_to_be32 | 
 | 1196 | 	( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT) | 
 | 1197 | 	  | (tx_vc_bits << SRB_FLAGS_SHIFT) ); | 
 | 1198 |       while (command_do (dev, &cmd)) | 
 | 1199 | 	schedule(); | 
 | 1200 |     } else { | 
 | 1201 |       // no RXer on the channel, just open (with pool zero) | 
 | 1202 |       cmd.request = cpu_to_be32 (SRB_OPEN_VC); | 
 | 1203 |       cmd.args.open.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1204 |       cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT); | 
 | 1205 |       cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT); | 
 | 1206 |       while (command_do (dev, &cmd)) | 
 | 1207 | 	schedule(); | 
 | 1208 |     } | 
 | 1209 |     dev->txer[vci].tx_present = 1; | 
 | 1210 |     up (&dev->vcc_sf); | 
 | 1211 |   } | 
 | 1212 |    | 
 | 1213 |   if (rxtp->traffic_class != ATM_NONE) { | 
 | 1214 |     command cmd; | 
 | 1215 |      | 
 | 1216 |     vcc->rx_info.pool = pool; | 
 | 1217 |      | 
 | 1218 |     down (&dev->vcc_sf);  | 
 | 1219 |     /* grow RX buffer pool */ | 
 | 1220 |     if (!dev->rxq[pool].buffers_wanted) | 
 | 1221 |       dev->rxq[pool].buffers_wanted = rx_lats; | 
 | 1222 |     dev->rxq[pool].buffers_wanted += 1; | 
 | 1223 |     fill_rx_pool (dev, pool, GFP_KERNEL); | 
 | 1224 |      | 
 | 1225 |     if (dev->txer[vci].tx_present) { | 
 | 1226 |       // TXer on the channel already | 
 | 1227 |       // switch (from pool zero) to this pool, preserving the TX bits | 
 | 1228 |       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); | 
 | 1229 |       cmd.args.modify_flags.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1230 |       cmd.args.modify_flags.flags = cpu_to_be32 | 
 | 1231 | 	( (pool << SRB_POOL_SHIFT) | 
 | 1232 | 	  | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) ); | 
 | 1233 |     } else { | 
 | 1234 |       // no TXer on the channel, open the VC (with no rate info) | 
 | 1235 |       cmd.request = cpu_to_be32 (SRB_OPEN_VC); | 
 | 1236 |       cmd.args.open.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1237 |       cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT); | 
 | 1238 |       cmd.args.open.rate = cpu_to_be32 (0); | 
 | 1239 |     } | 
 | 1240 |     while (command_do (dev, &cmd)) | 
 | 1241 |       schedule(); | 
 | 1242 |     // this link allows RX frames through | 
 | 1243 |     dev->rxer[vci] = atm_vcc; | 
 | 1244 |     up (&dev->vcc_sf); | 
 | 1245 |   } | 
 | 1246 |    | 
 | 1247 |   // indicate readiness | 
 | 1248 |   set_bit(ATM_VF_READY,&atm_vcc->flags); | 
 | 1249 |    | 
 | 1250 |   return 0; | 
 | 1251 | } | 
 | 1252 |  | 
 | 1253 | /********** Close a VC **********/ | 
 | 1254 |  | 
 | 1255 | static void amb_close (struct atm_vcc * atm_vcc) { | 
 | 1256 |   amb_dev * dev = AMB_DEV (atm_vcc->dev); | 
 | 1257 |   amb_vcc * vcc = AMB_VCC (atm_vcc); | 
 | 1258 |   u16 vci = atm_vcc->vci; | 
 | 1259 |    | 
 | 1260 |   PRINTD (DBG_VCC|DBG_FLOW, "amb_close"); | 
 | 1261 |    | 
 | 1262 |   // indicate unreadiness | 
 | 1263 |   clear_bit(ATM_VF_READY,&atm_vcc->flags); | 
 | 1264 |    | 
 | 1265 |   // disable TXing | 
 | 1266 |   if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { | 
 | 1267 |     command cmd; | 
 | 1268 |      | 
 | 1269 |     down (&dev->vcc_sf); | 
 | 1270 |     if (dev->rxer[vci]) { | 
 | 1271 |       // RXer still on the channel, just modify rate... XXX not really needed | 
 | 1272 |       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); | 
 | 1273 |       cmd.args.modify_rate.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1274 |       cmd.args.modify_rate.rate = cpu_to_be32 (0); | 
 | 1275 |       // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool | 
 | 1276 |     } else { | 
 | 1277 |       // no RXer on the channel, close channel | 
 | 1278 |       cmd.request = cpu_to_be32 (SRB_CLOSE_VC); | 
 | 1279 |       cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0 | 
 | 1280 |     } | 
 | 1281 |     dev->txer[vci].tx_present = 0; | 
 | 1282 |     while (command_do (dev, &cmd)) | 
 | 1283 |       schedule(); | 
 | 1284 |     up (&dev->vcc_sf); | 
 | 1285 |   } | 
 | 1286 |    | 
 | 1287 |   // disable RXing | 
 | 1288 |   if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) { | 
 | 1289 |     command cmd; | 
 | 1290 |      | 
 | 1291 |     // this is (the?) one reason why we need the amb_vcc struct | 
 | 1292 |     unsigned char pool = vcc->rx_info.pool; | 
 | 1293 |      | 
 | 1294 |     down (&dev->vcc_sf); | 
 | 1295 |     if (dev->txer[vci].tx_present) { | 
 | 1296 |       // TXer still on the channel, just go to pool zero XXX not really needed | 
 | 1297 |       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); | 
 | 1298 |       cmd.args.modify_flags.vc = cpu_to_be32 (vci);  // vpi 0 | 
 | 1299 |       cmd.args.modify_flags.flags = cpu_to_be32 | 
 | 1300 | 	(dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT); | 
 | 1301 |     } else { | 
 | 1302 |       // no TXer on the channel, close the VC | 
 | 1303 |       cmd.request = cpu_to_be32 (SRB_CLOSE_VC); | 
 | 1304 |       cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0 | 
 | 1305 |     } | 
 | 1306 |     // forget the rxer - no more skbs will be pushed | 
 | 1307 |     if (atm_vcc != dev->rxer[vci]) | 
 | 1308 |       PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p", | 
 | 1309 | 	      "arghhh! we're going to die!", | 
 | 1310 | 	      vcc, dev->rxer[vci]); | 
 | 1311 |     dev->rxer[vci] = NULL; | 
 | 1312 |     while (command_do (dev, &cmd)) | 
 | 1313 |       schedule(); | 
 | 1314 |      | 
 | 1315 |     /* shrink RX buffer pool */ | 
 | 1316 |     dev->rxq[pool].buffers_wanted -= 1; | 
 | 1317 |     if (dev->rxq[pool].buffers_wanted == rx_lats) { | 
 | 1318 |       dev->rxq[pool].buffers_wanted = 0; | 
 | 1319 |       drain_rx_pool (dev, pool); | 
 | 1320 |     } | 
 | 1321 |     up (&dev->vcc_sf); | 
 | 1322 |   } | 
 | 1323 |    | 
 | 1324 |   // free our structure | 
 | 1325 |   kfree (vcc); | 
 | 1326 |    | 
 | 1327 |   // say the VPI/VCI is free again | 
 | 1328 |   clear_bit(ATM_VF_ADDR,&atm_vcc->flags); | 
 | 1329 |  | 
 | 1330 |   return; | 
 | 1331 | } | 
 | 1332 |  | 
 | 1333 | /********** Set socket options for a VC **********/ | 
 | 1334 |  | 
 | 1335 | // int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen); | 
 | 1336 |  | 
 | 1337 | /********** Set socket options for a VC **********/ | 
 | 1338 |  | 
 | 1339 | // int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen); | 
 | 1340 |  | 
 | 1341 | /********** Send **********/ | 
 | 1342 |  | 
 | 1343 | static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { | 
 | 1344 |   amb_dev * dev = AMB_DEV(atm_vcc->dev); | 
 | 1345 |   amb_vcc * vcc = AMB_VCC(atm_vcc); | 
 | 1346 |   u16 vc = atm_vcc->vci; | 
 | 1347 |   unsigned int tx_len = skb->len; | 
 | 1348 |   unsigned char * tx_data = skb->data; | 
 | 1349 |   tx_simple * tx_descr; | 
 | 1350 |   tx_in tx; | 
 | 1351 |    | 
 | 1352 |   if (test_bit (dead, &dev->flags)) | 
 | 1353 |     return -EIO; | 
 | 1354 |    | 
 | 1355 |   PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u", | 
 | 1356 | 	  vc, tx_data, tx_len); | 
 | 1357 |    | 
 | 1358 |   dump_skb (">>>", vc, skb); | 
 | 1359 |    | 
 | 1360 |   if (!dev->txer[vc].tx_present) { | 
 | 1361 |     PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc); | 
 | 1362 |     return -EBADFD; | 
 | 1363 |   } | 
 | 1364 |    | 
 | 1365 |   // this is a driver private field so we have to set it ourselves, | 
 | 1366 |   // despite the fact that we are _required_ to use it to check for a | 
 | 1367 |   // pop function | 
 | 1368 |   ATM_SKB(skb)->vcc = atm_vcc; | 
 | 1369 |    | 
 | 1370 |   if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) { | 
 | 1371 |     PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping..."); | 
 | 1372 |     return -EIO; | 
 | 1373 |   } | 
 | 1374 |    | 
 | 1375 |   if (check_area (skb->data, skb->len)) { | 
 | 1376 |     atomic_inc(&atm_vcc->stats->tx_err); | 
 | 1377 |     return -ENOMEM; // ? | 
 | 1378 |   } | 
 | 1379 |    | 
 | 1380 |   // allocate memory for fragments | 
 | 1381 |   tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL); | 
 | 1382 |   if (!tx_descr) { | 
 | 1383 |     PRINTK (KERN_ERR, "could not allocate TX descriptor"); | 
 | 1384 |     return -ENOMEM; | 
 | 1385 |   } | 
 | 1386 |   if (check_area (tx_descr, sizeof(tx_simple))) { | 
 | 1387 |     kfree (tx_descr); | 
 | 1388 |     return -ENOMEM; | 
 | 1389 |   } | 
 | 1390 |   PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr); | 
 | 1391 |    | 
 | 1392 |   tx_descr->skb = skb; | 
 | 1393 |    | 
 | 1394 |   tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len); | 
 | 1395 |   tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data)); | 
 | 1396 |    | 
 | 1397 |   tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr); | 
 | 1398 |   tx_descr->tx_frag_end.vc = 0; | 
 | 1399 |   tx_descr->tx_frag_end.next_descriptor_length = 0; | 
 | 1400 |   tx_descr->tx_frag_end.next_descriptor = 0; | 
 | 1401 | #ifdef AMB_NEW_MICROCODE | 
 | 1402 |   tx_descr->tx_frag_end.cpcs_uu = 0; | 
 | 1403 |   tx_descr->tx_frag_end.cpi = 0; | 
 | 1404 |   tx_descr->tx_frag_end.pad = 0; | 
 | 1405 | #endif | 
 | 1406 |    | 
 | 1407 |   tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc); | 
 | 1408 |   tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end)); | 
 | 1409 |   tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag)); | 
 | 1410 |    | 
 | 1411 |   while (tx_give (dev, &tx)) | 
 | 1412 |     schedule(); | 
 | 1413 |   return 0; | 
 | 1414 | } | 
 | 1415 |  | 
 | 1416 | /********** Change QoS on a VC **********/ | 
 | 1417 |  | 
 | 1418 | // int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags); | 
 | 1419 |  | 
 | 1420 | /********** Free RX Socket Buffer **********/ | 
 | 1421 |  | 
 | 1422 | #if 0 | 
 | 1423 | static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) { | 
 | 1424 |   amb_dev * dev = AMB_DEV (atm_vcc->dev); | 
 | 1425 |   amb_vcc * vcc = AMB_VCC (atm_vcc); | 
 | 1426 |   unsigned char pool = vcc->rx_info.pool; | 
 | 1427 |   rx_in rx; | 
 | 1428 |    | 
 | 1429 |   // This may be unsafe for various reasons that I cannot really guess | 
 | 1430 |   // at. However, I note that the ATM layer calls kfree_skb rather | 
 | 1431 |   // than dev_kfree_skb at this point so we are least covered as far | 
 | 1432 |   // as buffer locking goes. There may be bugs if pcap clones RX skbs. | 
 | 1433 |  | 
 | 1434 |   PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)", | 
 | 1435 | 	  skb, atm_vcc, vcc); | 
 | 1436 |    | 
 | 1437 |   rx.handle = virt_to_bus (skb); | 
 | 1438 |   rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); | 
 | 1439 |    | 
 | 1440 |   skb->data = skb->head; | 
 | 1441 |   skb->tail = skb->head; | 
 | 1442 |   skb->len = 0; | 
 | 1443 |    | 
 | 1444 |   if (!rx_give (dev, &rx, pool)) { | 
 | 1445 |     // success | 
 | 1446 |     PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool); | 
 | 1447 |     return; | 
 | 1448 |   } | 
 | 1449 |    | 
 | 1450 |   // just do what the ATM layer would have done | 
 | 1451 |   dev_kfree_skb_any (skb); | 
 | 1452 |    | 
 | 1453 |   return; | 
 | 1454 | } | 
 | 1455 | #endif | 
 | 1456 |  | 
 | 1457 | /********** Proc File Output **********/ | 
 | 1458 |  | 
 | 1459 | static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) { | 
 | 1460 |   amb_dev * dev = AMB_DEV (atm_dev); | 
 | 1461 |   int left = *pos; | 
 | 1462 |   unsigned char pool; | 
 | 1463 |    | 
 | 1464 |   PRINTD (DBG_FLOW, "amb_proc_read"); | 
 | 1465 |    | 
 | 1466 |   /* more diagnostics here? */ | 
 | 1467 |    | 
 | 1468 |   if (!left--) { | 
 | 1469 |     amb_stats * s = &dev->stats; | 
 | 1470 |     return sprintf (page, | 
 | 1471 | 		    "frames: TX OK %lu, RX OK %lu, RX bad %lu " | 
 | 1472 | 		    "(CRC %lu, long %lu, aborted %lu, unused %lu).\n", | 
 | 1473 | 		    s->tx_ok, s->rx.ok, s->rx.error, | 
 | 1474 | 		    s->rx.badcrc, s->rx.toolong, | 
 | 1475 | 		    s->rx.aborted, s->rx.unused); | 
 | 1476 |   } | 
 | 1477 |    | 
 | 1478 |   if (!left--) { | 
 | 1479 |     amb_cq * c = &dev->cq; | 
 | 1480 |     return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ", | 
 | 1481 | 		    c->pending, c->high, c->maximum); | 
 | 1482 |   } | 
 | 1483 |    | 
 | 1484 |   if (!left--) { | 
 | 1485 |     amb_txq * t = &dev->txq; | 
 | 1486 |     return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n", | 
 | 1487 | 		    t->pending, t->maximum, t->high, t->filled); | 
 | 1488 |   } | 
 | 1489 |    | 
 | 1490 |   if (!left--) { | 
 | 1491 |     unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:"); | 
 | 1492 |     for (pool = 0; pool < NUM_RX_POOLS; ++pool) { | 
 | 1493 |       amb_rxq * r = &dev->rxq[pool]; | 
 | 1494 |       count += sprintf (page+count, " %u/%u/%u %u %u", | 
 | 1495 | 			r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied); | 
 | 1496 |     } | 
 | 1497 |     count += sprintf (page+count, ".\n"); | 
 | 1498 |     return count; | 
 | 1499 |   } | 
 | 1500 |    | 
 | 1501 |   if (!left--) { | 
 | 1502 |     unsigned int count = sprintf (page, "RX buffer sizes:"); | 
 | 1503 |     for (pool = 0; pool < NUM_RX_POOLS; ++pool) { | 
 | 1504 |       amb_rxq * r = &dev->rxq[pool]; | 
 | 1505 |       count += sprintf (page+count, " %u", r->buffer_size); | 
 | 1506 |     } | 
 | 1507 |     count += sprintf (page+count, ".\n"); | 
 | 1508 |     return count; | 
 | 1509 |   } | 
 | 1510 |    | 
 | 1511 | #if 0 | 
 | 1512 |   if (!left--) { | 
 | 1513 |     // suni block etc? | 
 | 1514 |   } | 
 | 1515 | #endif | 
 | 1516 |    | 
 | 1517 |   return 0; | 
 | 1518 | } | 
 | 1519 |  | 
 | 1520 | /********** Operation Structure **********/ | 
 | 1521 |  | 
 | 1522 | static const struct atmdev_ops amb_ops = { | 
 | 1523 |   .open         = amb_open, | 
 | 1524 |   .close	= amb_close, | 
 | 1525 |   .send         = amb_send, | 
 | 1526 |   .proc_read	= amb_proc_read, | 
 | 1527 |   .owner	= THIS_MODULE, | 
 | 1528 | }; | 
 | 1529 |  | 
 | 1530 | /********** housekeeping **********/ | 
 | 1531 | static void do_housekeeping (unsigned long arg) { | 
 | 1532 |   amb_dev * dev = (amb_dev *) arg; | 
 | 1533 |    | 
 | 1534 |   // could collect device-specific (not driver/atm-linux) stats here | 
 | 1535 |        | 
 | 1536 |   // last resort refill once every ten seconds | 
 | 1537 |   fill_rx_pools (dev); | 
 | 1538 |   mod_timer(&dev->housekeeping, jiffies + 10*HZ); | 
 | 1539 |    | 
 | 1540 |   return; | 
 | 1541 | } | 
 | 1542 |  | 
 | 1543 | /********** creation of communication queues **********/ | 
 | 1544 |  | 
 | 1545 | static int __devinit create_queues (amb_dev * dev, unsigned int cmds, | 
 | 1546 | 				 unsigned int txs, unsigned int * rxs, | 
 | 1547 | 				 unsigned int * rx_buffer_sizes) { | 
 | 1548 |   unsigned char pool; | 
 | 1549 |   size_t total = 0; | 
 | 1550 |   void * memory; | 
 | 1551 |   void * limit; | 
 | 1552 |    | 
 | 1553 |   PRINTD (DBG_FLOW, "create_queues %p", dev); | 
 | 1554 |    | 
 | 1555 |   total += cmds * sizeof(command); | 
 | 1556 |    | 
 | 1557 |   total += txs * (sizeof(tx_in) + sizeof(tx_out)); | 
 | 1558 |    | 
 | 1559 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 1560 |     total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out)); | 
 | 1561 |    | 
 | 1562 |   memory = kmalloc (total, GFP_KERNEL); | 
 | 1563 |   if (!memory) { | 
 | 1564 |     PRINTK (KERN_ERR, "could not allocate queues"); | 
 | 1565 |     return -ENOMEM; | 
 | 1566 |   } | 
 | 1567 |   if (check_area (memory, total)) { | 
 | 1568 |     PRINTK (KERN_ERR, "queues allocated in nasty area"); | 
 | 1569 |     kfree (memory); | 
 | 1570 |     return -ENOMEM; | 
 | 1571 |   } | 
 | 1572 |    | 
 | 1573 |   limit = memory + total; | 
 | 1574 |   PRINTD (DBG_INIT, "queues from %p to %p", memory, limit); | 
 | 1575 |    | 
 | 1576 |   PRINTD (DBG_CMD, "command queue at %p", memory); | 
 | 1577 |    | 
 | 1578 |   { | 
 | 1579 |     command * cmd = memory; | 
 | 1580 |     amb_cq * cq = &dev->cq; | 
 | 1581 |      | 
 | 1582 |     cq->pending = 0; | 
 | 1583 |     cq->high = 0; | 
 | 1584 |     cq->maximum = cmds - 1; | 
 | 1585 |      | 
 | 1586 |     cq->ptrs.start = cmd; | 
 | 1587 |     cq->ptrs.in = cmd; | 
 | 1588 |     cq->ptrs.out = cmd; | 
 | 1589 |     cq->ptrs.limit = cmd + cmds; | 
 | 1590 |      | 
 | 1591 |     memory = cq->ptrs.limit; | 
 | 1592 |   } | 
 | 1593 |    | 
 | 1594 |   PRINTD (DBG_TX, "TX queue pair at %p", memory); | 
 | 1595 |    | 
 | 1596 |   { | 
 | 1597 |     tx_in * in = memory; | 
 | 1598 |     tx_out * out; | 
 | 1599 |     amb_txq * txq = &dev->txq; | 
 | 1600 |      | 
 | 1601 |     txq->pending = 0; | 
 | 1602 |     txq->high = 0; | 
 | 1603 |     txq->filled = 0; | 
 | 1604 |     txq->maximum = txs - 1; | 
 | 1605 |      | 
 | 1606 |     txq->in.start = in; | 
 | 1607 |     txq->in.ptr = in; | 
 | 1608 |     txq->in.limit = in + txs; | 
 | 1609 |      | 
 | 1610 |     memory = txq->in.limit; | 
 | 1611 |     out = memory; | 
 | 1612 |      | 
 | 1613 |     txq->out.start = out; | 
 | 1614 |     txq->out.ptr = out; | 
 | 1615 |     txq->out.limit = out + txs; | 
 | 1616 |      | 
 | 1617 |     memory = txq->out.limit; | 
 | 1618 |   } | 
 | 1619 |    | 
 | 1620 |   PRINTD (DBG_RX, "RX queue pairs at %p", memory); | 
 | 1621 |    | 
 | 1622 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) { | 
 | 1623 |     rx_in * in = memory; | 
 | 1624 |     rx_out * out; | 
 | 1625 |     amb_rxq * rxq = &dev->rxq[pool]; | 
 | 1626 |      | 
 | 1627 |     rxq->buffer_size = rx_buffer_sizes[pool]; | 
 | 1628 |     rxq->buffers_wanted = 0; | 
 | 1629 |      | 
 | 1630 |     rxq->pending = 0; | 
 | 1631 |     rxq->low = rxs[pool] - 1; | 
 | 1632 |     rxq->emptied = 0; | 
 | 1633 |     rxq->maximum = rxs[pool] - 1; | 
 | 1634 |      | 
 | 1635 |     rxq->in.start = in; | 
 | 1636 |     rxq->in.ptr = in; | 
 | 1637 |     rxq->in.limit = in + rxs[pool]; | 
 | 1638 |      | 
 | 1639 |     memory = rxq->in.limit; | 
 | 1640 |     out = memory; | 
 | 1641 |      | 
 | 1642 |     rxq->out.start = out; | 
 | 1643 |     rxq->out.ptr = out; | 
 | 1644 |     rxq->out.limit = out + rxs[pool]; | 
 | 1645 |      | 
 | 1646 |     memory = rxq->out.limit; | 
 | 1647 |   } | 
 | 1648 |    | 
 | 1649 |   if (memory == limit) { | 
 | 1650 |     return 0; | 
 | 1651 |   } else { | 
 | 1652 |     PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit); | 
 | 1653 |     kfree (limit - total); | 
 | 1654 |     return -ENOMEM; | 
 | 1655 |   } | 
 | 1656 |    | 
 | 1657 | } | 
 | 1658 |  | 
 | 1659 | /********** destruction of communication queues **********/ | 
 | 1660 |  | 
 | 1661 | static void destroy_queues (amb_dev * dev) { | 
 | 1662 |   // all queues assumed empty | 
 | 1663 |   void * memory = dev->cq.ptrs.start; | 
 | 1664 |   // includes txq.in, txq.out, rxq[].in and rxq[].out | 
 | 1665 |    | 
 | 1666 |   PRINTD (DBG_FLOW, "destroy_queues %p", dev); | 
 | 1667 |    | 
 | 1668 |   PRINTD (DBG_INIT, "freeing queues at %p", memory); | 
 | 1669 |   kfree (memory); | 
 | 1670 |    | 
 | 1671 |   return; | 
 | 1672 | } | 
 | 1673 |  | 
 | 1674 | /********** basic loader commands and error handling **********/ | 
 | 1675 | // centisecond timeouts - guessing away here | 
 | 1676 | static unsigned int command_timeouts [] = { | 
 | 1677 | 	[host_memory_test]     = 15, | 
 | 1678 | 	[read_adapter_memory]  = 2, | 
 | 1679 | 	[write_adapter_memory] = 2, | 
 | 1680 | 	[adapter_start]        = 50, | 
 | 1681 | 	[get_version_number]   = 10, | 
 | 1682 | 	[interrupt_host]       = 1, | 
 | 1683 | 	[flash_erase_sector]   = 1, | 
 | 1684 | 	[adap_download_block]  = 1, | 
 | 1685 | 	[adap_erase_flash]     = 1, | 
 | 1686 | 	[adap_run_in_iram]     = 1, | 
 | 1687 | 	[adap_end_download]    = 1 | 
 | 1688 | }; | 
 | 1689 |  | 
 | 1690 |  | 
 | 1691 | static unsigned int command_successes [] = { | 
 | 1692 | 	[host_memory_test]     = COMMAND_PASSED_TEST, | 
 | 1693 | 	[read_adapter_memory]  = COMMAND_READ_DATA_OK, | 
 | 1694 | 	[write_adapter_memory] = COMMAND_WRITE_DATA_OK, | 
 | 1695 | 	[adapter_start]        = COMMAND_COMPLETE, | 
 | 1696 | 	[get_version_number]   = COMMAND_COMPLETE, | 
 | 1697 | 	[interrupt_host]       = COMMAND_COMPLETE, | 
 | 1698 | 	[flash_erase_sector]   = COMMAND_COMPLETE, | 
 | 1699 | 	[adap_download_block]  = COMMAND_COMPLETE, | 
 | 1700 | 	[adap_erase_flash]     = COMMAND_COMPLETE, | 
 | 1701 | 	[adap_run_in_iram]     = COMMAND_COMPLETE, | 
 | 1702 | 	[adap_end_download]    = COMMAND_COMPLETE | 
 | 1703 | }; | 
 | 1704 |    | 
 | 1705 | static  int decode_loader_result (loader_command cmd, u32 result) | 
 | 1706 | { | 
 | 1707 | 	int res; | 
 | 1708 | 	const char *msg; | 
 | 1709 |  | 
 | 1710 | 	if (result == command_successes[cmd]) | 
 | 1711 | 		return 0; | 
 | 1712 |  | 
 | 1713 | 	switch (result) { | 
 | 1714 | 		case BAD_COMMAND: | 
 | 1715 | 			res = -EINVAL; | 
 | 1716 | 			msg = "bad command"; | 
 | 1717 | 			break; | 
 | 1718 | 		case COMMAND_IN_PROGRESS: | 
 | 1719 | 			res = -ETIMEDOUT; | 
 | 1720 | 			msg = "command in progress"; | 
 | 1721 | 			break; | 
 | 1722 | 		case COMMAND_PASSED_TEST: | 
 | 1723 | 			res = 0; | 
 | 1724 | 			msg = "command passed test"; | 
 | 1725 | 			break; | 
 | 1726 | 		case COMMAND_FAILED_TEST: | 
 | 1727 | 			res = -EIO; | 
 | 1728 | 			msg = "command failed test"; | 
 | 1729 | 			break; | 
 | 1730 | 		case COMMAND_READ_DATA_OK: | 
 | 1731 | 			res = 0; | 
 | 1732 | 			msg = "command read data ok"; | 
 | 1733 | 			break; | 
 | 1734 | 		case COMMAND_READ_BAD_ADDRESS: | 
 | 1735 | 			res = -EINVAL; | 
 | 1736 | 			msg = "command read bad address"; | 
 | 1737 | 			break; | 
 | 1738 | 		case COMMAND_WRITE_DATA_OK: | 
 | 1739 | 			res = 0; | 
 | 1740 | 			msg = "command write data ok"; | 
 | 1741 | 			break; | 
 | 1742 | 		case COMMAND_WRITE_BAD_ADDRESS: | 
 | 1743 | 			res = -EINVAL; | 
 | 1744 | 			msg = "command write bad address"; | 
 | 1745 | 			break; | 
 | 1746 | 		case COMMAND_WRITE_FLASH_FAILURE: | 
 | 1747 | 			res = -EIO; | 
 | 1748 | 			msg = "command write flash failure"; | 
 | 1749 | 			break; | 
 | 1750 | 		case COMMAND_COMPLETE: | 
 | 1751 | 			res = 0; | 
 | 1752 | 			msg = "command complete"; | 
 | 1753 | 			break; | 
 | 1754 | 		case COMMAND_FLASH_ERASE_FAILURE: | 
 | 1755 | 			res = -EIO; | 
 | 1756 | 			msg = "command flash erase failure"; | 
 | 1757 | 			break; | 
 | 1758 | 		case COMMAND_WRITE_BAD_DATA: | 
 | 1759 | 			res = -EINVAL; | 
 | 1760 | 			msg = "command write bad data"; | 
 | 1761 | 			break; | 
 | 1762 | 		default: | 
 | 1763 | 			res = -EINVAL; | 
 | 1764 | 			msg = "unknown error"; | 
 | 1765 | 			PRINTD (DBG_LOAD|DBG_ERR, | 
 | 1766 | 				"decode_loader_result got %d=%x !", | 
 | 1767 | 				result, result); | 
 | 1768 | 			break; | 
 | 1769 | 	} | 
 | 1770 |  | 
 | 1771 | 	PRINTK (KERN_ERR, "%s", msg); | 
 | 1772 | 	return res; | 
 | 1773 | } | 
 | 1774 |  | 
 | 1775 | static int __devinit do_loader_command (volatile loader_block * lb, | 
 | 1776 | 				     const amb_dev * dev, loader_command cmd) { | 
 | 1777 |    | 
 | 1778 |   unsigned long timeout; | 
 | 1779 |    | 
 | 1780 |   PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command"); | 
 | 1781 |    | 
 | 1782 |   /* do a command | 
 | 1783 |       | 
 | 1784 |      Set the return value to zero, set the command type and set the | 
 | 1785 |      valid entry to the right magic value. The payload is already | 
 | 1786 |      correctly byte-ordered so we leave it alone. Hit the doorbell | 
 | 1787 |      with the bus address of this structure. | 
 | 1788 |       | 
 | 1789 |   */ | 
 | 1790 |    | 
 | 1791 |   lb->result = 0; | 
 | 1792 |   lb->command = cpu_to_be32 (cmd); | 
 | 1793 |   lb->valid = cpu_to_be32 (DMA_VALID); | 
 | 1794 |   // dump_registers (dev); | 
 | 1795 |   // dump_loader_block (lb); | 
 | 1796 |   wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask); | 
 | 1797 |    | 
 | 1798 |   timeout = command_timeouts[cmd] * 10; | 
 | 1799 |    | 
 | 1800 |   while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS)) | 
 | 1801 |     if (timeout) { | 
 | 1802 |       timeout = msleep_interruptible(timeout); | 
 | 1803 |     } else { | 
 | 1804 |       PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd); | 
 | 1805 |       dump_registers (dev); | 
 | 1806 |       dump_loader_block (lb); | 
 | 1807 |       return -ETIMEDOUT; | 
 | 1808 |     } | 
 | 1809 |    | 
 | 1810 |   if (cmd == adapter_start) { | 
 | 1811 |     // wait for start command to acknowledge... | 
 | 1812 |     timeout = 100; | 
 | 1813 |     while (rd_plain (dev, offsetof(amb_mem, doorbell))) | 
 | 1814 |       if (timeout) { | 
 | 1815 | 	timeout = msleep_interruptible(timeout); | 
 | 1816 |       } else { | 
 | 1817 | 	PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x", | 
 | 1818 | 		be32_to_cpu (lb->result)); | 
 | 1819 | 	dump_registers (dev); | 
 | 1820 | 	return -ETIMEDOUT; | 
 | 1821 |       } | 
 | 1822 |     return 0; | 
 | 1823 |   } else { | 
 | 1824 |     return decode_loader_result (cmd, be32_to_cpu (lb->result)); | 
 | 1825 |   } | 
 | 1826 |    | 
 | 1827 | } | 
 | 1828 |  | 
 | 1829 | /* loader: determine loader version */ | 
 | 1830 |  | 
 | 1831 | static int __devinit get_loader_version (loader_block * lb, | 
 | 1832 | 				      const amb_dev * dev, u32 * version) { | 
 | 1833 |   int res; | 
 | 1834 |    | 
 | 1835 |   PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version"); | 
 | 1836 |    | 
 | 1837 |   res = do_loader_command (lb, dev, get_version_number); | 
 | 1838 |   if (res) | 
 | 1839 |     return res; | 
 | 1840 |   if (version) | 
 | 1841 |     *version = be32_to_cpu (lb->payload.version); | 
 | 1842 |   return 0; | 
 | 1843 | } | 
 | 1844 |  | 
 | 1845 | /* loader: write memory data blocks */ | 
 | 1846 |  | 
 | 1847 | static int __devinit loader_write (loader_block * lb, | 
 | 1848 | 				const amb_dev * dev, const u32 * data, | 
 | 1849 | 				u32 address, unsigned int count) { | 
 | 1850 |   unsigned int i; | 
 | 1851 |   transfer_block * tb = &lb->payload.transfer; | 
 | 1852 |    | 
 | 1853 |   PRINTD (DBG_FLOW|DBG_LOAD, "loader_write"); | 
 | 1854 |    | 
 | 1855 |   if (count > MAX_TRANSFER_DATA) | 
 | 1856 |     return -EINVAL; | 
 | 1857 |   tb->address = cpu_to_be32 (address); | 
 | 1858 |   tb->count = cpu_to_be32 (count); | 
 | 1859 |   for (i = 0; i < count; ++i) | 
 | 1860 |     tb->data[i] = cpu_to_be32 (data[i]); | 
 | 1861 |   return do_loader_command (lb, dev, write_adapter_memory); | 
 | 1862 | } | 
 | 1863 |  | 
 | 1864 | /* loader: verify memory data blocks */ | 
 | 1865 |  | 
 | 1866 | static int __devinit loader_verify (loader_block * lb, | 
 | 1867 | 				 const amb_dev * dev, const u32 * data, | 
 | 1868 | 				 u32 address, unsigned int count) { | 
 | 1869 |   unsigned int i; | 
 | 1870 |   transfer_block * tb = &lb->payload.transfer; | 
 | 1871 |   int res; | 
 | 1872 |    | 
 | 1873 |   PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify"); | 
 | 1874 |    | 
 | 1875 |   if (count > MAX_TRANSFER_DATA) | 
 | 1876 |     return -EINVAL; | 
 | 1877 |   tb->address = cpu_to_be32 (address); | 
 | 1878 |   tb->count = cpu_to_be32 (count); | 
 | 1879 |   res = do_loader_command (lb, dev, read_adapter_memory); | 
 | 1880 |   if (!res) | 
 | 1881 |     for (i = 0; i < count; ++i) | 
 | 1882 |       if (tb->data[i] != cpu_to_be32 (data[i])) { | 
 | 1883 | 	res = -EINVAL; | 
 | 1884 | 	break; | 
 | 1885 |       } | 
 | 1886 |   return res; | 
 | 1887 | } | 
 | 1888 |  | 
 | 1889 | /* loader: start microcode */ | 
 | 1890 |  | 
 | 1891 | static int __devinit loader_start (loader_block * lb, | 
 | 1892 | 				const amb_dev * dev, u32 address) { | 
 | 1893 |   PRINTD (DBG_FLOW|DBG_LOAD, "loader_start"); | 
 | 1894 |    | 
 | 1895 |   lb->payload.start = cpu_to_be32 (address); | 
 | 1896 |   return do_loader_command (lb, dev, adapter_start); | 
 | 1897 | } | 
 | 1898 |  | 
 | 1899 | /********** reset card **********/ | 
 | 1900 |  | 
 | 1901 | static inline void sf (const char * msg) | 
 | 1902 | { | 
 | 1903 | 	PRINTK (KERN_ERR, "self-test failed: %s", msg); | 
 | 1904 | } | 
 | 1905 |  | 
 | 1906 | static int amb_reset (amb_dev * dev, int diags) { | 
 | 1907 |   u32 word; | 
 | 1908 |    | 
 | 1909 |   PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset"); | 
 | 1910 |    | 
 | 1911 |   word = rd_plain (dev, offsetof(amb_mem, reset_control)); | 
 | 1912 |   // put card into reset state | 
 | 1913 |   wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS); | 
 | 1914 |   // wait a short while | 
 | 1915 |   udelay (10); | 
 | 1916 | #if 1 | 
 | 1917 |   // put card into known good state | 
 | 1918 |   wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS); | 
 | 1919 |   // clear all interrupts just in case | 
 | 1920 |   wr_plain (dev, offsetof(amb_mem, interrupt), -1); | 
 | 1921 | #endif | 
 | 1922 |   // clear self-test done flag | 
 | 1923 |   wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0); | 
 | 1924 |   // take card out of reset state | 
 | 1925 |   wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS); | 
 | 1926 |    | 
 | 1927 |   if (diags) {  | 
 | 1928 |     unsigned long timeout; | 
 | 1929 |     // 4.2 second wait | 
 | 1930 |     msleep(4200); | 
 | 1931 |     // half second time-out | 
 | 1932 |     timeout = 500; | 
 | 1933 |     while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready))) | 
 | 1934 |       if (timeout) { | 
 | 1935 | 	timeout = msleep_interruptible(timeout); | 
 | 1936 |       } else { | 
 | 1937 | 	PRINTD (DBG_LOAD|DBG_ERR, "reset timed out"); | 
 | 1938 | 	return -ETIMEDOUT; | 
 | 1939 |       } | 
 | 1940 |      | 
 | 1941 |     // get results of self-test | 
 | 1942 |     // XXX double check byte-order | 
 | 1943 |     word = rd_mem (dev, offsetof(amb_mem, mb.loader.result)); | 
 | 1944 |     if (word & SELF_TEST_FAILURE) { | 
 | 1945 |       if (word & GPINT_TST_FAILURE) | 
 | 1946 | 	sf ("interrupt"); | 
 | 1947 |       if (word & SUNI_DATA_PATTERN_FAILURE) | 
 | 1948 | 	sf ("SUNI data pattern"); | 
 | 1949 |       if (word & SUNI_DATA_BITS_FAILURE) | 
 | 1950 | 	sf ("SUNI data bits"); | 
 | 1951 |       if (word & SUNI_UTOPIA_FAILURE) | 
 | 1952 | 	sf ("SUNI UTOPIA interface"); | 
 | 1953 |       if (word & SUNI_FIFO_FAILURE) | 
 | 1954 | 	sf ("SUNI cell buffer FIFO"); | 
 | 1955 |       if (word & SRAM_FAILURE) | 
 | 1956 | 	sf ("bad SRAM"); | 
 | 1957 |       // better return value? | 
 | 1958 |       return -EIO; | 
 | 1959 |     } | 
 | 1960 |      | 
 | 1961 |   } | 
 | 1962 |   return 0; | 
 | 1963 | } | 
 | 1964 |  | 
 | 1965 | /********** transfer and start the microcode **********/ | 
 | 1966 |  | 
 | 1967 | static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { | 
 | 1968 |   unsigned int i = 0; | 
 | 1969 |   unsigned int total = 0; | 
 | 1970 |   const u32 * pointer = ucode_data; | 
 | 1971 |   u32 address; | 
 | 1972 |   unsigned int count; | 
 | 1973 |   int res; | 
 | 1974 |    | 
 | 1975 |   PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init"); | 
 | 1976 |    | 
 | 1977 |   while (address = ucode_regions[i].start, | 
 | 1978 | 	 count = ucode_regions[i].count) { | 
 | 1979 |     PRINTD (DBG_LOAD, "starting region (%x, %u)", address, count); | 
 | 1980 |     while (count) { | 
 | 1981 |       unsigned int words; | 
 | 1982 |       if (count <= MAX_TRANSFER_DATA) | 
 | 1983 | 	words = count; | 
 | 1984 |       else | 
 | 1985 | 	words = MAX_TRANSFER_DATA; | 
 | 1986 |       total += words; | 
 | 1987 |       res = loader_write (lb, dev, pointer, address, words); | 
 | 1988 |       if (res) | 
 | 1989 | 	return res; | 
 | 1990 |       res = loader_verify (lb, dev, pointer, address, words); | 
 | 1991 |       if (res) | 
 | 1992 | 	return res; | 
 | 1993 |       count -= words; | 
 | 1994 |       address += sizeof(u32) * words; | 
 | 1995 |       pointer += words; | 
 | 1996 |     } | 
 | 1997 |     i += 1; | 
 | 1998 |   } | 
| Randy Dunlap | 3c6b377 | 2006-07-03 19:48:25 -0700 | [diff] [blame] | 1999 |   if (*pointer == ATM_POISON) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2000 |     return loader_start (lb, dev, ucode_start); | 
 | 2001 |   } else { | 
 | 2002 |     // cast needed as there is no %? for pointer differnces | 
 | 2003 |     PRINTD (DBG_LOAD|DBG_ERR, | 
 | 2004 | 	    "offset=%li, *pointer=%x, address=%x, total=%u", | 
 | 2005 | 	    (long) (pointer - ucode_data), *pointer, address, total); | 
 | 2006 |     PRINTK (KERN_ERR, "incorrect microcode data"); | 
 | 2007 |     return -ENOMEM; | 
 | 2008 |   } | 
 | 2009 | } | 
 | 2010 |  | 
 | 2011 | /********** give adapter parameters **********/ | 
 | 2012 |    | 
 | 2013 | static inline __be32 bus_addr(void * addr) { | 
 | 2014 |     return cpu_to_be32 (virt_to_bus (addr)); | 
 | 2015 | } | 
 | 2016 |  | 
 | 2017 | static int __devinit amb_talk (amb_dev * dev) { | 
 | 2018 |   adap_talk_block a; | 
 | 2019 |   unsigned char pool; | 
 | 2020 |   unsigned long timeout; | 
 | 2021 |    | 
 | 2022 |   PRINTD (DBG_FLOW, "amb_talk %p", dev); | 
 | 2023 |    | 
 | 2024 |   a.command_start = bus_addr (dev->cq.ptrs.start); | 
 | 2025 |   a.command_end   = bus_addr (dev->cq.ptrs.limit); | 
 | 2026 |   a.tx_start      = bus_addr (dev->txq.in.start); | 
 | 2027 |   a.tx_end        = bus_addr (dev->txq.in.limit); | 
 | 2028 |   a.txcom_start   = bus_addr (dev->txq.out.start); | 
 | 2029 |   a.txcom_end     = bus_addr (dev->txq.out.limit); | 
 | 2030 |    | 
 | 2031 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) { | 
 | 2032 |     // the other "a" items are set up by the adapter | 
 | 2033 |     a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start); | 
 | 2034 |     a.rec_struct[pool].buffer_end   = bus_addr (dev->rxq[pool].in.limit); | 
 | 2035 |     a.rec_struct[pool].rx_start     = bus_addr (dev->rxq[pool].out.start); | 
 | 2036 |     a.rec_struct[pool].rx_end       = bus_addr (dev->rxq[pool].out.limit); | 
 | 2037 |     a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size); | 
 | 2038 |   } | 
 | 2039 |    | 
 | 2040 | #ifdef AMB_NEW_MICROCODE | 
 | 2041 |   // disable fast PLX prefetching | 
 | 2042 |   a.init_flags = 0; | 
 | 2043 | #endif | 
 | 2044 |    | 
 | 2045 |   // pass the structure | 
 | 2046 |   wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a)); | 
 | 2047 |    | 
 | 2048 |   // 2.2 second wait (must not touch doorbell during 2 second DMA test) | 
 | 2049 |   msleep(2200); | 
 | 2050 |   // give the adapter another half second? | 
 | 2051 |   timeout = 500; | 
 | 2052 |   while (rd_plain (dev, offsetof(amb_mem, doorbell))) | 
 | 2053 |     if (timeout) { | 
 | 2054 |       timeout = msleep_interruptible(timeout); | 
 | 2055 |     } else { | 
 | 2056 |       PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out"); | 
 | 2057 |       return -ETIMEDOUT; | 
 | 2058 |     } | 
 | 2059 |    | 
 | 2060 |   return 0; | 
 | 2061 | } | 
 | 2062 |  | 
 | 2063 | // get microcode version | 
 | 2064 | static void __devinit amb_ucode_version (amb_dev * dev) { | 
 | 2065 |   u32 major; | 
 | 2066 |   u32 minor; | 
 | 2067 |   command cmd; | 
 | 2068 |   cmd.request = cpu_to_be32 (SRB_GET_VERSION); | 
 | 2069 |   while (command_do (dev, &cmd)) { | 
 | 2070 |     set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 2071 |     schedule(); | 
 | 2072 |   } | 
 | 2073 |   major = be32_to_cpu (cmd.args.version.major); | 
 | 2074 |   minor = be32_to_cpu (cmd.args.version.minor); | 
 | 2075 |   PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor); | 
 | 2076 | } | 
 | 2077 |    | 
 | 2078 | // swap bits within byte to get Ethernet ordering | 
 | 2079 | static u8 bit_swap (u8 byte) | 
 | 2080 | { | 
 | 2081 |     const u8 swap[] = { | 
 | 2082 |       0x0, 0x8, 0x4, 0xc, | 
 | 2083 |       0x2, 0xa, 0x6, 0xe, | 
 | 2084 |       0x1, 0x9, 0x5, 0xd, | 
 | 2085 |       0x3, 0xb, 0x7, 0xf | 
 | 2086 |     }; | 
 | 2087 |     return ((swap[byte & 0xf]<<4) | swap[byte>>4]); | 
 | 2088 | } | 
 | 2089 |  | 
 | 2090 | // get end station address | 
 | 2091 | static void __devinit amb_esi (amb_dev * dev, u8 * esi) { | 
 | 2092 |   u32 lower4; | 
 | 2093 |   u16 upper2; | 
 | 2094 |   command cmd; | 
 | 2095 |    | 
 | 2096 |   cmd.request = cpu_to_be32 (SRB_GET_BIA); | 
 | 2097 |   while (command_do (dev, &cmd)) { | 
 | 2098 |     set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 2099 |     schedule(); | 
 | 2100 |   } | 
 | 2101 |   lower4 = be32_to_cpu (cmd.args.bia.lower4); | 
 | 2102 |   upper2 = be32_to_cpu (cmd.args.bia.upper2); | 
 | 2103 |   PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2); | 
 | 2104 |    | 
 | 2105 |   if (esi) { | 
 | 2106 |     unsigned int i; | 
 | 2107 |      | 
 | 2108 |     PRINTDB (DBG_INIT, "ESI:"); | 
 | 2109 |     for (i = 0; i < ESI_LEN; ++i) { | 
 | 2110 |       if (i < 4) | 
 | 2111 | 	  esi[i] = bit_swap (lower4>>(8*i)); | 
 | 2112 |       else | 
 | 2113 | 	  esi[i] = bit_swap (upper2>>(8*(i-4))); | 
 | 2114 |       PRINTDM (DBG_INIT, " %02x", esi[i]); | 
 | 2115 |     } | 
 | 2116 |      | 
 | 2117 |     PRINTDE (DBG_INIT, ""); | 
 | 2118 |   } | 
 | 2119 |    | 
 | 2120 |   return; | 
 | 2121 | } | 
 | 2122 |    | 
 | 2123 | static void fixup_plx_window (amb_dev *dev, loader_block *lb) | 
 | 2124 | { | 
 | 2125 | 	// fix up the PLX-mapped window base address to match the block | 
 | 2126 | 	unsigned long blb; | 
 | 2127 | 	u32 mapreg; | 
 | 2128 | 	blb = virt_to_bus(lb); | 
 | 2129 | 	// the kernel stack had better not ever cross a 1Gb boundary! | 
 | 2130 | 	mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10])); | 
 | 2131 | 	mapreg &= ~onegigmask; | 
 | 2132 | 	mapreg |= blb & onegigmask; | 
 | 2133 | 	wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg); | 
 | 2134 | 	return; | 
 | 2135 | } | 
 | 2136 |  | 
 | 2137 | static int __devinit amb_init (amb_dev * dev) | 
 | 2138 | { | 
 | 2139 |   loader_block lb; | 
 | 2140 |    | 
 | 2141 |   u32 version; | 
 | 2142 |    | 
 | 2143 |   if (amb_reset (dev, 1)) { | 
 | 2144 |     PRINTK (KERN_ERR, "card reset failed!"); | 
 | 2145 |   } else { | 
 | 2146 |     fixup_plx_window (dev, &lb); | 
 | 2147 |      | 
 | 2148 |     if (get_loader_version (&lb, dev, &version)) { | 
 | 2149 |       PRINTK (KERN_INFO, "failed to get loader version"); | 
 | 2150 |     } else { | 
 | 2151 |       PRINTK (KERN_INFO, "loader version is %08x", version); | 
 | 2152 |        | 
 | 2153 |       if (ucode_init (&lb, dev)) { | 
 | 2154 | 	PRINTK (KERN_ERR, "microcode failure"); | 
 | 2155 |       } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) { | 
 | 2156 | 	PRINTK (KERN_ERR, "failed to get memory for queues"); | 
 | 2157 |       } else { | 
 | 2158 | 	 | 
 | 2159 | 	if (amb_talk (dev)) { | 
 | 2160 | 	  PRINTK (KERN_ERR, "adapter did not accept queues"); | 
 | 2161 | 	} else { | 
 | 2162 | 	   | 
 | 2163 | 	  amb_ucode_version (dev); | 
 | 2164 | 	  return 0; | 
 | 2165 | 	   | 
 | 2166 | 	} /* amb_talk */ | 
 | 2167 | 	 | 
 | 2168 | 	destroy_queues (dev); | 
 | 2169 |       } /* create_queues, ucode_init */ | 
 | 2170 |        | 
 | 2171 |       amb_reset (dev, 0); | 
 | 2172 |     } /* get_loader_version */ | 
 | 2173 |      | 
 | 2174 |   } /* amb_reset */ | 
 | 2175 |    | 
 | 2176 |   return -EINVAL; | 
 | 2177 | } | 
 | 2178 |  | 
 | 2179 | static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)  | 
 | 2180 | { | 
 | 2181 |       unsigned char pool; | 
 | 2182 |       memset (dev, 0, sizeof(amb_dev)); | 
 | 2183 |        | 
 | 2184 |       // set up known dev items straight away | 
 | 2185 |       dev->pci_dev = pci_dev;  | 
 | 2186 |       pci_set_drvdata(pci_dev, dev); | 
 | 2187 |        | 
 | 2188 |       dev->iobase = pci_resource_start (pci_dev, 1); | 
 | 2189 |       dev->irq = pci_dev->irq;  | 
 | 2190 |       dev->membase = bus_to_virt(pci_resource_start(pci_dev, 0)); | 
 | 2191 |        | 
 | 2192 |       // flags (currently only dead) | 
 | 2193 |       dev->flags = 0; | 
 | 2194 |        | 
 | 2195 |       // Allocate cell rates (fibre) | 
 | 2196 |       // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53 | 
 | 2197 |       // to be really pedantic, this should be ATM_OC3c_PCR | 
 | 2198 |       dev->tx_avail = ATM_OC3_PCR; | 
 | 2199 |       dev->rx_avail = ATM_OC3_PCR; | 
 | 2200 |        | 
 | 2201 | #ifdef FILL_RX_POOLS_IN_BH | 
 | 2202 |       // initialise bottom half | 
 | 2203 |       INIT_WORK(&dev->bh, (void (*)(void *)) fill_rx_pools, dev); | 
 | 2204 | #endif | 
 | 2205 |        | 
 | 2206 |       // semaphore for txer/rxer modifications - we cannot use a | 
 | 2207 |       // spinlock as the critical region needs to switch processes | 
 | 2208 |       init_MUTEX (&dev->vcc_sf); | 
 | 2209 |       // queue manipulation spinlocks; we want atomic reads and | 
 | 2210 |       // writes to the queue descriptors (handles IRQ and SMP) | 
 | 2211 |       // consider replacing "int pending" -> "atomic_t available" | 
 | 2212 |       // => problem related to who gets to move queue pointers | 
 | 2213 |       spin_lock_init (&dev->cq.lock); | 
 | 2214 |       spin_lock_init (&dev->txq.lock); | 
 | 2215 |       for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 2216 | 	spin_lock_init (&dev->rxq[pool].lock); | 
 | 2217 | } | 
 | 2218 |  | 
 | 2219 | static void setup_pci_dev(struct pci_dev *pci_dev) | 
 | 2220 | { | 
 | 2221 | 	unsigned char lat; | 
 | 2222 |        | 
 | 2223 | 	// enable bus master accesses | 
 | 2224 | 	pci_set_master(pci_dev); | 
 | 2225 |  | 
 | 2226 | 	// frobnicate latency (upwards, usually) | 
 | 2227 | 	pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat); | 
 | 2228 |  | 
 | 2229 | 	if (!pci_lat) | 
 | 2230 | 		pci_lat = (lat < MIN_PCI_LATENCY) ? MIN_PCI_LATENCY : lat; | 
 | 2231 |  | 
 | 2232 | 	if (lat != pci_lat) { | 
 | 2233 | 		PRINTK (KERN_INFO, "Changing PCI latency timer from %hu to %hu", | 
 | 2234 | 			lat, pci_lat); | 
 | 2235 | 		pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat); | 
 | 2236 | 	} | 
 | 2237 | } | 
 | 2238 |  | 
 | 2239 | static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) | 
 | 2240 | { | 
 | 2241 | 	amb_dev * dev; | 
 | 2242 | 	int err; | 
 | 2243 | 	unsigned int irq; | 
 | 2244 |        | 
 | 2245 | 	err = pci_enable_device(pci_dev); | 
 | 2246 | 	if (err < 0) { | 
 | 2247 | 		PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); | 
 | 2248 | 		goto out; | 
 | 2249 | 	} | 
 | 2250 |  | 
 | 2251 | 	// read resources from PCI configuration space | 
 | 2252 | 	irq = pci_dev->irq; | 
 | 2253 |  | 
 | 2254 | 	if (pci_dev->device == PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD) { | 
 | 2255 | 		PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card"); | 
 | 2256 | 		err = -EINVAL; | 
 | 2257 | 		goto out_disable; | 
 | 2258 | 	} | 
 | 2259 |  | 
 | 2260 | 	PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at" | 
| Greg Kroah-Hartman | e29419f | 2006-06-12 15:20:16 -0700 | [diff] [blame] | 2261 | 		" IO %llx, IRQ %u, MEM %p", | 
 | 2262 | 		(unsigned long long)pci_resource_start(pci_dev, 1), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2263 | 		irq, bus_to_virt(pci_resource_start(pci_dev, 0))); | 
 | 2264 |  | 
 | 2265 | 	// check IO region | 
 | 2266 | 	err = pci_request_region(pci_dev, 1, DEV_LABEL); | 
 | 2267 | 	if (err < 0) { | 
 | 2268 | 		PRINTK (KERN_ERR, "IO range already in use!"); | 
 | 2269 | 		goto out_disable; | 
 | 2270 | 	} | 
 | 2271 |  | 
 | 2272 | 	dev = kmalloc (sizeof(amb_dev), GFP_KERNEL); | 
 | 2273 | 	if (!dev) { | 
 | 2274 | 		PRINTK (KERN_ERR, "out of memory!"); | 
 | 2275 | 		err = -ENOMEM; | 
 | 2276 | 		goto out_release; | 
 | 2277 | 	} | 
 | 2278 |  | 
 | 2279 | 	setup_dev(dev, pci_dev); | 
 | 2280 |  | 
 | 2281 | 	err = amb_init(dev); | 
 | 2282 | 	if (err < 0) { | 
 | 2283 | 		PRINTK (KERN_ERR, "adapter initialisation failure"); | 
 | 2284 | 		goto out_free; | 
 | 2285 | 	} | 
 | 2286 |  | 
 | 2287 | 	setup_pci_dev(pci_dev); | 
 | 2288 |  | 
 | 2289 | 	// grab (but share) IRQ and install handler | 
| Thomas Gleixner | dace145 | 2006-07-01 19:29:38 -0700 | [diff] [blame] | 2290 | 	err = request_irq(irq, interrupt_handler, IRQF_SHARED, DEV_LABEL, dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2291 | 	if (err < 0) { | 
 | 2292 | 		PRINTK (KERN_ERR, "request IRQ failed!"); | 
 | 2293 | 		goto out_reset; | 
 | 2294 | 	} | 
 | 2295 |  | 
 | 2296 | 	dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL); | 
 | 2297 | 	if (!dev->atm_dev) { | 
 | 2298 | 		PRINTD (DBG_ERR, "failed to register Madge ATM adapter"); | 
 | 2299 | 		err = -EINVAL; | 
 | 2300 | 		goto out_free_irq; | 
 | 2301 | 	} | 
 | 2302 |  | 
 | 2303 | 	PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", | 
 | 2304 | 		dev->atm_dev->number, dev, dev->atm_dev); | 
 | 2305 | 		dev->atm_dev->dev_data = (void *) dev; | 
 | 2306 |  | 
 | 2307 | 	// register our address | 
 | 2308 | 	amb_esi (dev, dev->atm_dev->esi); | 
 | 2309 |  | 
 | 2310 | 	// 0 bits for vpi, 10 bits for vci | 
 | 2311 | 	dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS; | 
 | 2312 | 	dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS; | 
 | 2313 |  | 
 | 2314 | 	init_timer(&dev->housekeeping); | 
 | 2315 | 	dev->housekeeping.function = do_housekeeping; | 
 | 2316 | 	dev->housekeeping.data = (unsigned long) dev; | 
 | 2317 | 	mod_timer(&dev->housekeeping, jiffies); | 
 | 2318 |  | 
 | 2319 | 	// enable host interrupts | 
 | 2320 | 	interrupts_on (dev); | 
 | 2321 |  | 
 | 2322 | out: | 
 | 2323 | 	return err; | 
 | 2324 |  | 
 | 2325 | out_free_irq: | 
 | 2326 | 	free_irq(irq, dev); | 
 | 2327 | out_reset: | 
 | 2328 | 	amb_reset(dev, 0); | 
 | 2329 | out_free: | 
 | 2330 | 	kfree(dev); | 
 | 2331 | out_release: | 
 | 2332 | 	pci_release_region(pci_dev, 1); | 
 | 2333 | out_disable: | 
 | 2334 | 	pci_disable_device(pci_dev); | 
 | 2335 | 	goto out; | 
 | 2336 | } | 
 | 2337 |  | 
 | 2338 |  | 
 | 2339 | static void __devexit amb_remove_one(struct pci_dev *pci_dev) | 
 | 2340 | { | 
 | 2341 | 	struct amb_dev *dev; | 
 | 2342 |  | 
 | 2343 | 	dev = pci_get_drvdata(pci_dev); | 
 | 2344 |  | 
 | 2345 | 	PRINTD(DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev); | 
 | 2346 | 	del_timer_sync(&dev->housekeeping); | 
 | 2347 | 	// the drain should not be necessary | 
 | 2348 | 	drain_rx_pools(dev); | 
 | 2349 | 	interrupts_off(dev); | 
 | 2350 | 	amb_reset(dev, 0); | 
 | 2351 | 	free_irq(dev->irq, dev); | 
 | 2352 | 	pci_disable_device(pci_dev); | 
 | 2353 | 	destroy_queues(dev); | 
 | 2354 | 	atm_dev_deregister(dev->atm_dev); | 
 | 2355 | 	kfree(dev); | 
 | 2356 | 	pci_release_region(pci_dev, 1); | 
 | 2357 | } | 
 | 2358 |  | 
 | 2359 | static void __init amb_check_args (void) { | 
 | 2360 |   unsigned char pool; | 
 | 2361 |   unsigned int max_rx_size; | 
 | 2362 |    | 
 | 2363 | #ifdef DEBUG_AMBASSADOR | 
 | 2364 |   PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK); | 
 | 2365 | #else | 
 | 2366 |   if (debug) | 
 | 2367 |     PRINTK (KERN_NOTICE, "no debugging support"); | 
 | 2368 | #endif | 
 | 2369 |    | 
 | 2370 |   if (cmds < MIN_QUEUE_SIZE) | 
 | 2371 |     PRINTK (KERN_NOTICE, "cmds has been raised to %u", | 
 | 2372 | 	    cmds = MIN_QUEUE_SIZE); | 
 | 2373 |    | 
 | 2374 |   if (txs < MIN_QUEUE_SIZE) | 
 | 2375 |     PRINTK (KERN_NOTICE, "txs has been raised to %u", | 
 | 2376 | 	    txs = MIN_QUEUE_SIZE); | 
 | 2377 |    | 
 | 2378 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 2379 |     if (rxs[pool] < MIN_QUEUE_SIZE) | 
 | 2380 |       PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u", | 
 | 2381 | 	      pool, rxs[pool] = MIN_QUEUE_SIZE); | 
 | 2382 |    | 
 | 2383 |   // buffers sizes should be greater than zero and strictly increasing | 
 | 2384 |   max_rx_size = 0; | 
 | 2385 |   for (pool = 0; pool < NUM_RX_POOLS; ++pool) | 
 | 2386 |     if (rxs_bs[pool] <= max_rx_size) | 
 | 2387 |       PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)", | 
 | 2388 | 	      pool, rxs_bs[pool]); | 
 | 2389 |     else | 
 | 2390 |       max_rx_size = rxs_bs[pool]; | 
 | 2391 |    | 
 | 2392 |   if (rx_lats < MIN_RX_BUFFERS) | 
 | 2393 |     PRINTK (KERN_NOTICE, "rx_lats has been raised to %u", | 
 | 2394 | 	    rx_lats = MIN_RX_BUFFERS); | 
 | 2395 |    | 
 | 2396 |   return; | 
 | 2397 | } | 
 | 2398 |  | 
 | 2399 | /********** module stuff **********/ | 
 | 2400 |  | 
 | 2401 | MODULE_AUTHOR(maintainer_string); | 
 | 2402 | MODULE_DESCRIPTION(description_string); | 
 | 2403 | MODULE_LICENSE("GPL"); | 
 | 2404 | module_param(debug,   ushort, 0644); | 
 | 2405 | module_param(cmds,    uint, 0); | 
 | 2406 | module_param(txs,     uint, 0); | 
 | 2407 | module_param_array(rxs,     uint, NULL, 0); | 
 | 2408 | module_param_array(rxs_bs,  uint, NULL, 0); | 
 | 2409 | module_param(rx_lats, uint, 0); | 
 | 2410 | module_param(pci_lat, byte, 0); | 
 | 2411 | MODULE_PARM_DESC(debug,   "debug bitmap, see .h file"); | 
 | 2412 | MODULE_PARM_DESC(cmds,    "number of command queue entries"); | 
 | 2413 | MODULE_PARM_DESC(txs,     "number of TX queue entries"); | 
 | 2414 | MODULE_PARM_DESC(rxs,     "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]"); | 
 | 2415 | MODULE_PARM_DESC(rxs_bs,  "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]"); | 
 | 2416 | MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies"); | 
 | 2417 | MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); | 
 | 2418 |  | 
 | 2419 | /********** module entry **********/ | 
 | 2420 |  | 
 | 2421 | static struct pci_device_id amb_pci_tbl[] = { | 
 | 2422 | 	{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR, PCI_ANY_ID, PCI_ANY_ID, | 
 | 2423 | 	  0, 0, 0 }, | 
 | 2424 | 	{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD, PCI_ANY_ID, PCI_ANY_ID, | 
 | 2425 | 	  0, 0, 0 }, | 
 | 2426 | 	{ 0, } | 
 | 2427 | }; | 
 | 2428 |  | 
 | 2429 | MODULE_DEVICE_TABLE(pci, amb_pci_tbl); | 
 | 2430 |  | 
 | 2431 | static struct pci_driver amb_driver = { | 
 | 2432 | 	.name =		"amb", | 
 | 2433 | 	.probe =	amb_probe, | 
 | 2434 | 	.remove =	__devexit_p(amb_remove_one), | 
 | 2435 | 	.id_table =	amb_pci_tbl, | 
 | 2436 | }; | 
 | 2437 |  | 
 | 2438 | static int __init amb_module_init (void) | 
 | 2439 | { | 
 | 2440 |   PRINTD (DBG_FLOW|DBG_INIT, "init_module"); | 
 | 2441 |    | 
 | 2442 |   // sanity check - cast needed as printk does not support %Zu | 
 | 2443 |   if (sizeof(amb_mem) != 4*16 + 4*12) { | 
 | 2444 |     PRINTK (KERN_ERR, "Fix amb_mem (is %lu words).", | 
 | 2445 | 	    (unsigned long) sizeof(amb_mem)); | 
 | 2446 |     return -ENOMEM; | 
 | 2447 |   } | 
 | 2448 |    | 
 | 2449 |   show_version(); | 
 | 2450 |    | 
 | 2451 |   amb_check_args(); | 
 | 2452 |    | 
 | 2453 |   // get the juice | 
 | 2454 |   return pci_register_driver(&amb_driver); | 
 | 2455 | } | 
 | 2456 |  | 
 | 2457 | /********** module exit **********/ | 
 | 2458 |  | 
 | 2459 | static void __exit amb_module_exit (void) | 
 | 2460 | { | 
 | 2461 |   PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module"); | 
 | 2462 |    | 
 | 2463 |   return pci_unregister_driver(&amb_driver); | 
 | 2464 | } | 
 | 2465 |  | 
 | 2466 | module_init(amb_module_init); | 
 | 2467 | module_exit(amb_module_exit); |