| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *	This program is free software; you can redistribute it and/or | 
|  | 3 | *	modify it under the terms of the GNU General Public License | 
|  | 4 | *	as published by the Free Software Foundation; either version | 
|  | 5 | *	2 of the License, or (at your option) any later version. | 
|  | 6 | * | 
|  | 7 | *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk> | 
|  | 8 | *	(c) Copyright 2000, 2001 Red Hat Inc | 
|  | 9 | * | 
|  | 10 | *	Development of this driver was funded by Equiinet Ltd | 
|  | 11 | *			http://www.equiinet.com | 
|  | 12 | * | 
|  | 13 | *	ChangeLog: | 
|  | 14 | * | 
|  | 15 | *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the | 
|  | 16 | *	unification of all the Z85x30 asynchronous drivers for real. | 
|  | 17 | * | 
|  | 18 | *	DMA now uses get_free_page as kmalloc buffers may span a 64K | 
|  | 19 | *	boundary. | 
|  | 20 | * | 
|  | 21 | *	Modified for SMP safety and SMP locking by Alan Cox <alan@redhat.com> | 
|  | 22 | * | 
|  | 23 | *	Performance | 
|  | 24 | * | 
|  | 25 | *	Z85230: | 
|  | 26 | *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud | 
|  | 27 | *	X.25 is not unrealistic on all machines. DMA mode can in theory | 
|  | 28 | *	handle T1/E1 quite nicely. In practice the limit seems to be about | 
|  | 29 | *	512Kbit->1Mbit depending on motherboard. | 
|  | 30 | * | 
|  | 31 | *	Z85C30: | 
|  | 32 | *	64K will take DMA, 9600 baud X.25 should be ok. | 
|  | 33 | * | 
|  | 34 | *	Z8530: | 
|  | 35 | *	Synchronous mode without DMA is unlikely to pass about 2400 baud. | 
|  | 36 | */ | 
|  | 37 |  | 
|  | 38 | #include <linux/module.h> | 
|  | 39 | #include <linux/kernel.h> | 
|  | 40 | #include <linux/mm.h> | 
|  | 41 | #include <linux/net.h> | 
|  | 42 | #include <linux/skbuff.h> | 
|  | 43 | #include <linux/netdevice.h> | 
|  | 44 | #include <linux/if_arp.h> | 
|  | 45 | #include <linux/delay.h> | 
|  | 46 | #include <linux/ioport.h> | 
|  | 47 | #include <linux/init.h> | 
|  | 48 | #include <asm/dma.h> | 
|  | 49 | #include <asm/io.h> | 
|  | 50 | #define RT_LOCK | 
|  | 51 | #define RT_UNLOCK | 
|  | 52 | #include <linux/spinlock.h> | 
|  | 53 |  | 
|  | 54 | #include <net/syncppp.h> | 
|  | 55 | #include "z85230.h" | 
|  | 56 |  | 
|  | 57 |  | 
|  | 58 | /** | 
|  | 59 | *	z8530_read_port - Architecture specific interface function | 
|  | 60 | *	@p: port to read | 
|  | 61 | * | 
|  | 62 | *	Provided port access methods. The Comtrol SV11 requires no delays | 
|  | 63 | *	between accesses and uses PC I/O. Some drivers may need a 5uS delay | 
|  | 64 | * | 
|  | 65 | *	In the longer term this should become an architecture specific | 
|  | 66 | *	section so that this can become a generic driver interface for all | 
|  | 67 | *	platforms. For now we only handle PC I/O ports with or without the | 
|  | 68 | *	dread 5uS sanity delay. | 
|  | 69 | * | 
|  | 70 | *	The caller must hold sufficient locks to avoid violating the horrible | 
|  | 71 | *	5uS delay rule. | 
|  | 72 | */ | 
|  | 73 |  | 
|  | 74 | static inline int z8530_read_port(unsigned long p) | 
|  | 75 | { | 
|  | 76 | u8 r=inb(Z8530_PORT_OF(p)); | 
|  | 77 | if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */ | 
|  | 78 | udelay(5); | 
|  | 79 | return r; | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | /** | 
|  | 83 | *	z8530_write_port - Architecture specific interface function | 
|  | 84 | *	@p: port to write | 
|  | 85 | *	@d: value to write | 
|  | 86 | * | 
|  | 87 | *	Write a value to a port with delays if need be. Note that the | 
|  | 88 | *	caller must hold locks to avoid read/writes from other contexts | 
|  | 89 | *	violating the 5uS rule | 
|  | 90 | * | 
|  | 91 | *	In the longer term this should become an architecture specific | 
|  | 92 | *	section so that this can become a generic driver interface for all | 
|  | 93 | *	platforms. For now we only handle PC I/O ports with or without the | 
|  | 94 | *	dread 5uS sanity delay. | 
|  | 95 | */ | 
|  | 96 |  | 
|  | 97 |  | 
|  | 98 | static inline void z8530_write_port(unsigned long p, u8 d) | 
|  | 99 | { | 
|  | 100 | outb(d,Z8530_PORT_OF(p)); | 
|  | 101 | if(p&Z8530_PORT_SLEEP) | 
|  | 102 | udelay(5); | 
|  | 103 | } | 
|  | 104 |  | 
|  | 105 |  | 
|  | 106 |  | 
|  | 107 | static void z8530_rx_done(struct z8530_channel *c); | 
|  | 108 | static void z8530_tx_done(struct z8530_channel *c); | 
|  | 109 |  | 
|  | 110 |  | 
|  | 111 | /** | 
|  | 112 | *	read_zsreg - Read a register from a Z85230 | 
|  | 113 | *	@c: Z8530 channel to read from (2 per chip) | 
|  | 114 | *	@reg: Register to read | 
|  | 115 | *	FIXME: Use a spinlock. | 
|  | 116 | * | 
|  | 117 | *	Most of the Z8530 registers are indexed off the control registers. | 
|  | 118 | *	A read is done by writing to the control register and reading the | 
|  | 119 | *	register back.  The caller must hold the lock | 
|  | 120 | */ | 
|  | 121 |  | 
|  | 122 | static inline u8 read_zsreg(struct z8530_channel *c, u8 reg) | 
|  | 123 | { | 
|  | 124 | if(reg) | 
|  | 125 | z8530_write_port(c->ctrlio, reg); | 
|  | 126 | return z8530_read_port(c->ctrlio); | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | /** | 
|  | 130 | *	read_zsdata - Read the data port of a Z8530 channel | 
|  | 131 | *	@c: The Z8530 channel to read the data port from | 
|  | 132 | * | 
|  | 133 | *	The data port provides fast access to some things. We still | 
|  | 134 | *	have all the 5uS delays to worry about. | 
|  | 135 | */ | 
|  | 136 |  | 
|  | 137 | static inline u8 read_zsdata(struct z8530_channel *c) | 
|  | 138 | { | 
|  | 139 | u8 r; | 
|  | 140 | r=z8530_read_port(c->dataio); | 
|  | 141 | return r; | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | /** | 
|  | 145 | *	write_zsreg - Write to a Z8530 channel register | 
|  | 146 | *	@c: The Z8530 channel | 
|  | 147 | *	@reg: Register number | 
|  | 148 | *	@val: Value to write | 
|  | 149 | * | 
|  | 150 | *	Write a value to an indexed register. The caller must hold the lock | 
|  | 151 | *	to honour the irritating delay rules. We know about register 0 | 
|  | 152 | *	being fast to access. | 
|  | 153 | * | 
|  | 154 | *      Assumes c->lock is held. | 
|  | 155 | */ | 
|  | 156 | static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val) | 
|  | 157 | { | 
|  | 158 | if(reg) | 
|  | 159 | z8530_write_port(c->ctrlio, reg); | 
|  | 160 | z8530_write_port(c->ctrlio, val); | 
|  | 161 |  | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | /** | 
|  | 165 | *	write_zsctrl - Write to a Z8530 control register | 
|  | 166 | *	@c: The Z8530 channel | 
|  | 167 | *	@val: Value to write | 
|  | 168 | * | 
|  | 169 | *	Write directly to the control register on the Z8530 | 
|  | 170 | */ | 
|  | 171 |  | 
|  | 172 | static inline void write_zsctrl(struct z8530_channel *c, u8 val) | 
|  | 173 | { | 
|  | 174 | z8530_write_port(c->ctrlio, val); | 
|  | 175 | } | 
|  | 176 |  | 
|  | 177 | /** | 
|  | 178 | *	write_zsdata - Write to a Z8530 control register | 
|  | 179 | *	@c: The Z8530 channel | 
|  | 180 | *	@val: Value to write | 
|  | 181 | * | 
|  | 182 | *	Write directly to the data register on the Z8530 | 
|  | 183 | */ | 
|  | 184 |  | 
|  | 185 |  | 
|  | 186 | static inline void write_zsdata(struct z8530_channel *c, u8 val) | 
|  | 187 | { | 
|  | 188 | z8530_write_port(c->dataio, val); | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | /* | 
|  | 192 | *	Register loading parameters for a dead port | 
|  | 193 | */ | 
|  | 194 |  | 
|  | 195 | u8 z8530_dead_port[]= | 
|  | 196 | { | 
|  | 197 | 255 | 
|  | 198 | }; | 
|  | 199 |  | 
|  | 200 | EXPORT_SYMBOL(z8530_dead_port); | 
|  | 201 |  | 
|  | 202 | /* | 
|  | 203 | *	Register loading parameters for currently supported circuit types | 
|  | 204 | */ | 
|  | 205 |  | 
|  | 206 |  | 
|  | 207 | /* | 
|  | 208 | *	Data clocked by telco end. This is the correct data for the UK | 
|  | 209 | *	"kilostream" service, and most other similar services. | 
|  | 210 | */ | 
|  | 211 |  | 
|  | 212 | u8 z8530_hdlc_kilostream[]= | 
|  | 213 | { | 
|  | 214 | 4,	SYNC_ENAB|SDLC|X1CLK, | 
|  | 215 | 2,	0,	/* No vector */ | 
|  | 216 | 1,	0, | 
|  | 217 | 3,	ENT_HM|RxCRC_ENAB|Rx8, | 
|  | 218 | 5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR, | 
|  | 219 | 9,	0,		/* Disable interrupts */ | 
|  | 220 | 6,	0xFF, | 
|  | 221 | 7,	FLAG, | 
|  | 222 | 10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/ | 
|  | 223 | 11,	TCTRxCP, | 
|  | 224 | 14,	DISDPLL, | 
|  | 225 | 15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE, | 
|  | 226 | 1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx, | 
|  | 227 | 9,	NV|MIE|NORESET, | 
|  | 228 | 255 | 
|  | 229 | }; | 
|  | 230 |  | 
|  | 231 | EXPORT_SYMBOL(z8530_hdlc_kilostream); | 
|  | 232 |  | 
|  | 233 | /* | 
|  | 234 | *	As above but for enhanced chips. | 
|  | 235 | */ | 
|  | 236 |  | 
|  | 237 | u8 z8530_hdlc_kilostream_85230[]= | 
|  | 238 | { | 
|  | 239 | 4,	SYNC_ENAB|SDLC|X1CLK, | 
|  | 240 | 2,	0,	/* No vector */ | 
|  | 241 | 1,	0, | 
|  | 242 | 3,	ENT_HM|RxCRC_ENAB|Rx8, | 
|  | 243 | 5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR, | 
|  | 244 | 9,	0,		/* Disable interrupts */ | 
|  | 245 | 6,	0xFF, | 
|  | 246 | 7,	FLAG, | 
|  | 247 | 10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */ | 
|  | 248 | 11,	TCTRxCP, | 
|  | 249 | 14,	DISDPLL, | 
|  | 250 | 15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE, | 
|  | 251 | 1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx, | 
|  | 252 | 9,	NV|MIE|NORESET, | 
|  | 253 | 23,	3,		/* Extended mode AUTO TX and EOM*/ | 
|  | 254 |  | 
|  | 255 | 255 | 
|  | 256 | }; | 
|  | 257 |  | 
|  | 258 | EXPORT_SYMBOL(z8530_hdlc_kilostream_85230); | 
|  | 259 |  | 
|  | 260 | /** | 
|  | 261 | *	z8530_flush_fifo - Flush on chip RX FIFO | 
|  | 262 | *	@c: Channel to flush | 
|  | 263 | * | 
|  | 264 | *	Flush the receive FIFO. There is no specific option for this, we | 
|  | 265 | *	blindly read bytes and discard them. Reading when there is no data | 
|  | 266 | *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes. | 
|  | 267 | * | 
|  | 268 | *	All locking is handled for the caller. On return data may still be | 
|  | 269 | *	present if it arrived during the flush. | 
|  | 270 | */ | 
|  | 271 |  | 
|  | 272 | static void z8530_flush_fifo(struct z8530_channel *c) | 
|  | 273 | { | 
|  | 274 | read_zsreg(c, R1); | 
|  | 275 | read_zsreg(c, R1); | 
|  | 276 | read_zsreg(c, R1); | 
|  | 277 | read_zsreg(c, R1); | 
|  | 278 | if(c->dev->type==Z85230) | 
|  | 279 | { | 
|  | 280 | read_zsreg(c, R1); | 
|  | 281 | read_zsreg(c, R1); | 
|  | 282 | read_zsreg(c, R1); | 
|  | 283 | read_zsreg(c, R1); | 
|  | 284 | } | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | /** | 
|  | 288 | *	z8530_rtsdtr - Control the outgoing DTS/RTS line | 
|  | 289 | *	@c: The Z8530 channel to control; | 
|  | 290 | *	@set: 1 to set, 0 to clear | 
|  | 291 | * | 
|  | 292 | *	Sets or clears DTR/RTS on the requested line. All locking is handled | 
|  | 293 | *	by the caller. For now we assume all boards use the actual RTS/DTR | 
|  | 294 | *	on the chip. Apparently one or two don't. We'll scream about them | 
|  | 295 | *	later. | 
|  | 296 | */ | 
|  | 297 |  | 
|  | 298 | static void z8530_rtsdtr(struct z8530_channel *c, int set) | 
|  | 299 | { | 
|  | 300 | if (set) | 
|  | 301 | c->regs[5] |= (RTS | DTR); | 
|  | 302 | else | 
|  | 303 | c->regs[5] &= ~(RTS | DTR); | 
|  | 304 | write_zsreg(c, R5, c->regs[5]); | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | /** | 
|  | 308 | *	z8530_rx - Handle a PIO receive event | 
|  | 309 | *	@c: Z8530 channel to process | 
|  | 310 | * | 
|  | 311 | *	Receive handler for receiving in PIO mode. This is much like the | 
|  | 312 | *	async one but not quite the same or as complex | 
|  | 313 | * | 
|  | 314 | *	Note: Its intended that this handler can easily be separated from | 
|  | 315 | *	the main code to run realtime. That'll be needed for some machines | 
|  | 316 | *	(eg to ever clock 64kbits on a sparc ;)). | 
|  | 317 | * | 
|  | 318 | *	The RT_LOCK macros don't do anything now. Keep the code covered | 
|  | 319 | *	by them as short as possible in all circumstances - clocks cost | 
|  | 320 | *	baud. The interrupt handler is assumed to be atomic w.r.t. to | 
|  | 321 | *	other code - this is true in the RT case too. | 
|  | 322 | * | 
|  | 323 | *	We only cover the sync cases for this. If you want 2Mbit async | 
|  | 324 | *	do it yourself but consider medical assistance first. This non DMA | 
|  | 325 | *	synchronous mode is portable code. The DMA mode assumes PCI like | 
|  | 326 | *	ISA DMA | 
|  | 327 | * | 
|  | 328 | *	Called with the device lock held | 
|  | 329 | */ | 
|  | 330 |  | 
|  | 331 | static void z8530_rx(struct z8530_channel *c) | 
|  | 332 | { | 
|  | 333 | u8 ch,stat; | 
|  | 334 | spin_lock(c->lock); | 
|  | 335 |  | 
|  | 336 | while(1) | 
|  | 337 | { | 
|  | 338 | /* FIFO empty ? */ | 
|  | 339 | if(!(read_zsreg(c, R0)&1)) | 
|  | 340 | break; | 
|  | 341 | ch=read_zsdata(c); | 
|  | 342 | stat=read_zsreg(c, R1); | 
|  | 343 |  | 
|  | 344 | /* | 
|  | 345 | *	Overrun ? | 
|  | 346 | */ | 
|  | 347 | if(c->count < c->max) | 
|  | 348 | { | 
|  | 349 | *c->dptr++=ch; | 
|  | 350 | c->count++; | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | if(stat&END_FR) | 
|  | 354 | { | 
|  | 355 |  | 
|  | 356 | /* | 
|  | 357 | *	Error ? | 
|  | 358 | */ | 
|  | 359 | if(stat&(Rx_OVR|CRC_ERR)) | 
|  | 360 | { | 
|  | 361 | /* Rewind the buffer and return */ | 
|  | 362 | if(c->skb) | 
|  | 363 | c->dptr=c->skb->data; | 
|  | 364 | c->count=0; | 
|  | 365 | if(stat&Rx_OVR) | 
|  | 366 | { | 
|  | 367 | printk(KERN_WARNING "%s: overrun\n", c->dev->name); | 
|  | 368 | c->rx_overrun++; | 
|  | 369 | } | 
|  | 370 | if(stat&CRC_ERR) | 
|  | 371 | { | 
|  | 372 | c->rx_crc_err++; | 
|  | 373 | /* printk("crc error\n"); */ | 
|  | 374 | } | 
|  | 375 | /* Shove the frame upstream */ | 
|  | 376 | } | 
|  | 377 | else | 
|  | 378 | { | 
|  | 379 | /* | 
|  | 380 | *	Drop the lock for RX processing, or | 
|  | 381 | *	there are deadlocks | 
|  | 382 | */ | 
|  | 383 | z8530_rx_done(c); | 
|  | 384 | write_zsctrl(c, RES_Rx_CRC); | 
|  | 385 | } | 
|  | 386 | } | 
|  | 387 | } | 
|  | 388 | /* | 
|  | 389 | *	Clear irq | 
|  | 390 | */ | 
|  | 391 | write_zsctrl(c, ERR_RES); | 
|  | 392 | write_zsctrl(c, RES_H_IUS); | 
|  | 393 | spin_unlock(c->lock); | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 |  | 
|  | 397 | /** | 
|  | 398 | *	z8530_tx - Handle a PIO transmit event | 
|  | 399 | *	@c: Z8530 channel to process | 
|  | 400 | * | 
|  | 401 | *	Z8530 transmit interrupt handler for the PIO mode. The basic | 
|  | 402 | *	idea is to attempt to keep the FIFO fed. We fill as many bytes | 
|  | 403 | *	in as possible, its quite possible that we won't keep up with the | 
|  | 404 | *	data rate otherwise. | 
|  | 405 | */ | 
|  | 406 |  | 
|  | 407 | static void z8530_tx(struct z8530_channel *c) | 
|  | 408 | { | 
|  | 409 | spin_lock(c->lock); | 
|  | 410 | while(c->txcount) { | 
|  | 411 | /* FIFO full ? */ | 
|  | 412 | if(!(read_zsreg(c, R0)&4)) | 
|  | 413 | break; | 
|  | 414 | c->txcount--; | 
|  | 415 | /* | 
|  | 416 | *	Shovel out the byte | 
|  | 417 | */ | 
|  | 418 | write_zsreg(c, R8, *c->tx_ptr++); | 
|  | 419 | write_zsctrl(c, RES_H_IUS); | 
|  | 420 | /* We are about to underflow */ | 
|  | 421 | if(c->txcount==0) | 
|  | 422 | { | 
|  | 423 | write_zsctrl(c, RES_EOM_L); | 
|  | 424 | write_zsreg(c, R10, c->regs[10]&~ABUNDER); | 
|  | 425 | } | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 |  | 
|  | 429 | /* | 
|  | 430 | *	End of frame TX - fire another one | 
|  | 431 | */ | 
|  | 432 |  | 
|  | 433 | write_zsctrl(c, RES_Tx_P); | 
|  | 434 |  | 
|  | 435 | z8530_tx_done(c); | 
|  | 436 | write_zsctrl(c, RES_H_IUS); | 
|  | 437 | spin_unlock(c->lock); | 
|  | 438 | } | 
|  | 439 |  | 
|  | 440 | /** | 
|  | 441 | *	z8530_status - Handle a PIO status exception | 
|  | 442 | *	@chan: Z8530 channel to process | 
|  | 443 | * | 
|  | 444 | *	A status event occurred in PIO synchronous mode. There are several | 
|  | 445 | *	reasons the chip will bother us here. A transmit underrun means we | 
|  | 446 | *	failed to feed the chip fast enough and just broke a packet. A DCD | 
|  | 447 | *	change is a line up or down. We communicate that back to the protocol | 
|  | 448 | *	layer for synchronous PPP to renegotiate. | 
|  | 449 | */ | 
|  | 450 |  | 
|  | 451 | static void z8530_status(struct z8530_channel *chan) | 
|  | 452 | { | 
|  | 453 | u8 status, altered; | 
|  | 454 |  | 
|  | 455 | spin_lock(chan->lock); | 
|  | 456 | status=read_zsreg(chan, R0); | 
|  | 457 | altered=chan->status^status; | 
|  | 458 |  | 
|  | 459 | chan->status=status; | 
|  | 460 |  | 
|  | 461 | if(status&TxEOM) | 
|  | 462 | { | 
|  | 463 | /*		printk("%s: Tx underrun.\n", chan->dev->name); */ | 
|  | 464 | chan->stats.tx_fifo_errors++; | 
|  | 465 | write_zsctrl(chan, ERR_RES); | 
|  | 466 | z8530_tx_done(chan); | 
|  | 467 | } | 
|  | 468 |  | 
|  | 469 | if(altered&chan->dcdcheck) | 
|  | 470 | { | 
|  | 471 | if(status&chan->dcdcheck) | 
|  | 472 | { | 
|  | 473 | printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); | 
|  | 474 | write_zsreg(chan, R3, chan->regs[3]|RxENABLE); | 
|  | 475 | if(chan->netdevice && | 
|  | 476 | ((chan->netdevice->type == ARPHRD_HDLC) || | 
|  | 477 | (chan->netdevice->type == ARPHRD_PPP))) | 
|  | 478 | sppp_reopen(chan->netdevice); | 
|  | 479 | } | 
|  | 480 | else | 
|  | 481 | { | 
|  | 482 | printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); | 
|  | 483 | write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); | 
|  | 484 | z8530_flush_fifo(chan); | 
|  | 485 | } | 
|  | 486 |  | 
|  | 487 | } | 
|  | 488 | write_zsctrl(chan, RES_EXT_INT); | 
|  | 489 | write_zsctrl(chan, RES_H_IUS); | 
|  | 490 | spin_unlock(chan->lock); | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | struct z8530_irqhandler z8530_sync= | 
|  | 494 | { | 
|  | 495 | z8530_rx, | 
|  | 496 | z8530_tx, | 
|  | 497 | z8530_status | 
|  | 498 | }; | 
|  | 499 |  | 
|  | 500 | EXPORT_SYMBOL(z8530_sync); | 
|  | 501 |  | 
|  | 502 | /** | 
|  | 503 | *	z8530_dma_rx - Handle a DMA RX event | 
|  | 504 | *	@chan: Channel to handle | 
|  | 505 | * | 
|  | 506 | *	Non bus mastering DMA interfaces for the Z8x30 devices. This | 
|  | 507 | *	is really pretty PC specific. The DMA mode means that most receive | 
|  | 508 | *	events are handled by the DMA hardware. We get a kick here only if | 
|  | 509 | *	a frame ended. | 
|  | 510 | */ | 
|  | 511 |  | 
|  | 512 | static void z8530_dma_rx(struct z8530_channel *chan) | 
|  | 513 | { | 
|  | 514 | spin_lock(chan->lock); | 
|  | 515 | if(chan->rxdma_on) | 
|  | 516 | { | 
|  | 517 | /* Special condition check only */ | 
|  | 518 | u8 status; | 
|  | 519 |  | 
|  | 520 | read_zsreg(chan, R7); | 
|  | 521 | read_zsreg(chan, R6); | 
|  | 522 |  | 
|  | 523 | status=read_zsreg(chan, R1); | 
|  | 524 |  | 
|  | 525 | if(status&END_FR) | 
|  | 526 | { | 
|  | 527 | z8530_rx_done(chan);	/* Fire up the next one */ | 
|  | 528 | } | 
|  | 529 | write_zsctrl(chan, ERR_RES); | 
|  | 530 | write_zsctrl(chan, RES_H_IUS); | 
|  | 531 | } | 
|  | 532 | else | 
|  | 533 | { | 
|  | 534 | /* DMA is off right now, drain the slow way */ | 
|  | 535 | z8530_rx(chan); | 
|  | 536 | } | 
|  | 537 | spin_unlock(chan->lock); | 
|  | 538 | } | 
|  | 539 |  | 
|  | 540 | /** | 
|  | 541 | *	z8530_dma_tx - Handle a DMA TX event | 
|  | 542 | *	@chan:	The Z8530 channel to handle | 
|  | 543 | * | 
|  | 544 | *	We have received an interrupt while doing DMA transmissions. It | 
|  | 545 | *	shouldn't happen. Scream loudly if it does. | 
|  | 546 | */ | 
|  | 547 |  | 
|  | 548 | static void z8530_dma_tx(struct z8530_channel *chan) | 
|  | 549 | { | 
|  | 550 | spin_lock(chan->lock); | 
|  | 551 | if(!chan->dma_tx) | 
|  | 552 | { | 
|  | 553 | printk(KERN_WARNING "Hey who turned the DMA off?\n"); | 
|  | 554 | z8530_tx(chan); | 
|  | 555 | return; | 
|  | 556 | } | 
|  | 557 | /* This shouldnt occur in DMA mode */ | 
|  | 558 | printk(KERN_ERR "DMA tx - bogus event!\n"); | 
|  | 559 | z8530_tx(chan); | 
|  | 560 | spin_unlock(chan->lock); | 
|  | 561 | } | 
|  | 562 |  | 
|  | 563 | /** | 
|  | 564 | *	z8530_dma_status - Handle a DMA status exception | 
|  | 565 | *	@chan: Z8530 channel to process | 
|  | 566 | * | 
|  | 567 | *	A status event occurred on the Z8530. We receive these for two reasons | 
|  | 568 | *	when in DMA mode. Firstly if we finished a packet transfer we get one | 
|  | 569 | *	and kick the next packet out. Secondly we may see a DCD change and | 
|  | 570 | *	have to poke the protocol layer. | 
|  | 571 | * | 
|  | 572 | */ | 
|  | 573 |  | 
|  | 574 | static void z8530_dma_status(struct z8530_channel *chan) | 
|  | 575 | { | 
|  | 576 | u8 status, altered; | 
|  | 577 |  | 
|  | 578 | status=read_zsreg(chan, R0); | 
|  | 579 | altered=chan->status^status; | 
|  | 580 |  | 
|  | 581 | chan->status=status; | 
|  | 582 |  | 
|  | 583 |  | 
|  | 584 | if(chan->dma_tx) | 
|  | 585 | { | 
|  | 586 | if(status&TxEOM) | 
|  | 587 | { | 
|  | 588 | unsigned long flags; | 
|  | 589 |  | 
|  | 590 | flags=claim_dma_lock(); | 
|  | 591 | disable_dma(chan->txdma); | 
|  | 592 | clear_dma_ff(chan->txdma); | 
|  | 593 | chan->txdma_on=0; | 
|  | 594 | release_dma_lock(flags); | 
|  | 595 | z8530_tx_done(chan); | 
|  | 596 | } | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | spin_lock(chan->lock); | 
|  | 600 | if(altered&chan->dcdcheck) | 
|  | 601 | { | 
|  | 602 | if(status&chan->dcdcheck) | 
|  | 603 | { | 
|  | 604 | printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); | 
|  | 605 | write_zsreg(chan, R3, chan->regs[3]|RxENABLE); | 
|  | 606 | if(chan->netdevice && | 
|  | 607 | ((chan->netdevice->type == ARPHRD_HDLC) || | 
|  | 608 | (chan->netdevice->type == ARPHRD_PPP))) | 
|  | 609 | sppp_reopen(chan->netdevice); | 
|  | 610 | } | 
|  | 611 | else | 
|  | 612 | { | 
|  | 613 | printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); | 
|  | 614 | write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); | 
|  | 615 | z8530_flush_fifo(chan); | 
|  | 616 | } | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | write_zsctrl(chan, RES_EXT_INT); | 
|  | 620 | write_zsctrl(chan, RES_H_IUS); | 
|  | 621 | spin_unlock(chan->lock); | 
|  | 622 | } | 
|  | 623 |  | 
|  | 624 | struct z8530_irqhandler z8530_dma_sync= | 
|  | 625 | { | 
|  | 626 | z8530_dma_rx, | 
|  | 627 | z8530_dma_tx, | 
|  | 628 | z8530_dma_status | 
|  | 629 | }; | 
|  | 630 |  | 
|  | 631 | EXPORT_SYMBOL(z8530_dma_sync); | 
|  | 632 |  | 
|  | 633 | struct z8530_irqhandler z8530_txdma_sync= | 
|  | 634 | { | 
|  | 635 | z8530_rx, | 
|  | 636 | z8530_dma_tx, | 
|  | 637 | z8530_dma_status | 
|  | 638 | }; | 
|  | 639 |  | 
|  | 640 | EXPORT_SYMBOL(z8530_txdma_sync); | 
|  | 641 |  | 
|  | 642 | /** | 
|  | 643 | *	z8530_rx_clear - Handle RX events from a stopped chip | 
|  | 644 | *	@c: Z8530 channel to shut up | 
|  | 645 | * | 
|  | 646 | *	Receive interrupt vectors for a Z8530 that is in 'parked' mode. | 
|  | 647 | *	For machines with PCI Z85x30 cards, or level triggered interrupts | 
|  | 648 | *	(eg the MacII) we must clear the interrupt cause or die. | 
|  | 649 | */ | 
|  | 650 |  | 
|  | 651 |  | 
|  | 652 | static void z8530_rx_clear(struct z8530_channel *c) | 
|  | 653 | { | 
|  | 654 | /* | 
|  | 655 | *	Data and status bytes | 
|  | 656 | */ | 
|  | 657 | u8 stat; | 
|  | 658 |  | 
|  | 659 | read_zsdata(c); | 
|  | 660 | stat=read_zsreg(c, R1); | 
|  | 661 |  | 
|  | 662 | if(stat&END_FR) | 
|  | 663 | write_zsctrl(c, RES_Rx_CRC); | 
|  | 664 | /* | 
|  | 665 | *	Clear irq | 
|  | 666 | */ | 
|  | 667 | write_zsctrl(c, ERR_RES); | 
|  | 668 | write_zsctrl(c, RES_H_IUS); | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | /** | 
|  | 672 | *	z8530_tx_clear - Handle TX events from a stopped chip | 
|  | 673 | *	@c: Z8530 channel to shut up | 
|  | 674 | * | 
|  | 675 | *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode. | 
|  | 676 | *	For machines with PCI Z85x30 cards, or level triggered interrupts | 
|  | 677 | *	(eg the MacII) we must clear the interrupt cause or die. | 
|  | 678 | */ | 
|  | 679 |  | 
|  | 680 | static void z8530_tx_clear(struct z8530_channel *c) | 
|  | 681 | { | 
|  | 682 | write_zsctrl(c, RES_Tx_P); | 
|  | 683 | write_zsctrl(c, RES_H_IUS); | 
|  | 684 | } | 
|  | 685 |  | 
|  | 686 | /** | 
|  | 687 | *	z8530_status_clear - Handle status events from a stopped chip | 
|  | 688 | *	@chan: Z8530 channel to shut up | 
|  | 689 | * | 
|  | 690 | *	Status interrupt vectors for a Z8530 that is in 'parked' mode. | 
|  | 691 | *	For machines with PCI Z85x30 cards, or level triggered interrupts | 
|  | 692 | *	(eg the MacII) we must clear the interrupt cause or die. | 
|  | 693 | */ | 
|  | 694 |  | 
|  | 695 | static void z8530_status_clear(struct z8530_channel *chan) | 
|  | 696 | { | 
|  | 697 | u8 status=read_zsreg(chan, R0); | 
|  | 698 | if(status&TxEOM) | 
|  | 699 | write_zsctrl(chan, ERR_RES); | 
|  | 700 | write_zsctrl(chan, RES_EXT_INT); | 
|  | 701 | write_zsctrl(chan, RES_H_IUS); | 
|  | 702 | } | 
|  | 703 |  | 
|  | 704 | struct z8530_irqhandler z8530_nop= | 
|  | 705 | { | 
|  | 706 | z8530_rx_clear, | 
|  | 707 | z8530_tx_clear, | 
|  | 708 | z8530_status_clear | 
|  | 709 | }; | 
|  | 710 |  | 
|  | 711 |  | 
|  | 712 | EXPORT_SYMBOL(z8530_nop); | 
|  | 713 |  | 
|  | 714 | /** | 
|  | 715 | *	z8530_interrupt - Handle an interrupt from a Z8530 | 
|  | 716 | *	@irq: 	Interrupt number | 
|  | 717 | *	@dev_id: The Z8530 device that is interrupting. | 
|  | 718 | *	@regs: unused | 
|  | 719 | * | 
|  | 720 | *	A Z85[2]30 device has stuck its hand in the air for attention. | 
|  | 721 | *	We scan both the channels on the chip for events and then call | 
|  | 722 | *	the channel specific call backs for each channel that has events. | 
|  | 723 | *	We have to use callback functions because the two channels can be | 
|  | 724 | *	in different modes. | 
|  | 725 | * | 
|  | 726 | *	Locking is done for the handlers. Note that locking is done | 
|  | 727 | *	at the chip level (the 5uS delay issue is per chip not per | 
|  | 728 | *	channel). c->lock for both channels points to dev->lock | 
|  | 729 | */ | 
|  | 730 |  | 
|  | 731 | irqreturn_t z8530_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 
|  | 732 | { | 
|  | 733 | struct z8530_dev *dev=dev_id; | 
|  | 734 | u8 intr; | 
|  | 735 | static volatile int locker=0; | 
|  | 736 | int work=0; | 
|  | 737 | struct z8530_irqhandler *irqs; | 
|  | 738 |  | 
|  | 739 | if(locker) | 
|  | 740 | { | 
|  | 741 | printk(KERN_ERR "IRQ re-enter\n"); | 
|  | 742 | return IRQ_NONE; | 
|  | 743 | } | 
|  | 744 | locker=1; | 
|  | 745 |  | 
|  | 746 | spin_lock(&dev->lock); | 
|  | 747 |  | 
|  | 748 | while(++work<5000) | 
|  | 749 | { | 
|  | 750 |  | 
|  | 751 | intr = read_zsreg(&dev->chanA, R3); | 
|  | 752 | if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT))) | 
|  | 753 | break; | 
|  | 754 |  | 
|  | 755 | /* This holds the IRQ status. On the 8530 you must read it from chan | 
|  | 756 | A even though it applies to the whole chip */ | 
|  | 757 |  | 
|  | 758 | /* Now walk the chip and see what it is wanting - it may be | 
|  | 759 | an IRQ for someone else remember */ | 
|  | 760 |  | 
|  | 761 | irqs=dev->chanA.irqs; | 
|  | 762 |  | 
|  | 763 | if(intr & (CHARxIP|CHATxIP|CHAEXT)) | 
|  | 764 | { | 
|  | 765 | if(intr&CHARxIP) | 
|  | 766 | irqs->rx(&dev->chanA); | 
|  | 767 | if(intr&CHATxIP) | 
|  | 768 | irqs->tx(&dev->chanA); | 
|  | 769 | if(intr&CHAEXT) | 
|  | 770 | irqs->status(&dev->chanA); | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | irqs=dev->chanB.irqs; | 
|  | 774 |  | 
|  | 775 | if(intr & (CHBRxIP|CHBTxIP|CHBEXT)) | 
|  | 776 | { | 
|  | 777 | if(intr&CHBRxIP) | 
|  | 778 | irqs->rx(&dev->chanB); | 
|  | 779 | if(intr&CHBTxIP) | 
|  | 780 | irqs->tx(&dev->chanB); | 
|  | 781 | if(intr&CHBEXT) | 
|  | 782 | irqs->status(&dev->chanB); | 
|  | 783 | } | 
|  | 784 | } | 
|  | 785 | spin_unlock(&dev->lock); | 
|  | 786 | if(work==5000) | 
|  | 787 | printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr); | 
|  | 788 | /* Ok all done */ | 
|  | 789 | locker=0; | 
|  | 790 | return IRQ_HANDLED; | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | EXPORT_SYMBOL(z8530_interrupt); | 
|  | 794 |  | 
|  | 795 | static char reg_init[16]= | 
|  | 796 | { | 
|  | 797 | 0,0,0,0, | 
|  | 798 | 0,0,0,0, | 
|  | 799 | 0,0,0,0, | 
|  | 800 | 0x55,0,0,0 | 
|  | 801 | }; | 
|  | 802 |  | 
|  | 803 |  | 
|  | 804 | /** | 
|  | 805 | *	z8530_sync_open - Open a Z8530 channel for PIO | 
|  | 806 | *	@dev:	The network interface we are using | 
|  | 807 | *	@c:	The Z8530 channel to open in synchronous PIO mode | 
|  | 808 | * | 
|  | 809 | *	Switch a Z8530 into synchronous mode without DMA assist. We | 
|  | 810 | *	raise the RTS/DTR and commence network operation. | 
|  | 811 | */ | 
|  | 812 |  | 
|  | 813 | int z8530_sync_open(struct net_device *dev, struct z8530_channel *c) | 
|  | 814 | { | 
|  | 815 | unsigned long flags; | 
|  | 816 |  | 
|  | 817 | spin_lock_irqsave(c->lock, flags); | 
|  | 818 |  | 
|  | 819 | c->sync = 1; | 
|  | 820 | c->mtu = dev->mtu+64; | 
|  | 821 | c->count = 0; | 
|  | 822 | c->skb = NULL; | 
|  | 823 | c->skb2 = NULL; | 
|  | 824 | c->irqs = &z8530_sync; | 
|  | 825 |  | 
|  | 826 | /* This loads the double buffer up */ | 
|  | 827 | z8530_rx_done(c);	/* Load the frame ring */ | 
|  | 828 | z8530_rx_done(c);	/* Load the backup frame */ | 
|  | 829 | z8530_rtsdtr(c,1); | 
|  | 830 | c->dma_tx = 0; | 
|  | 831 | c->regs[R1]|=TxINT_ENAB; | 
|  | 832 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 833 | write_zsreg(c, R3, c->regs[R3]|RxENABLE); | 
|  | 834 |  | 
|  | 835 | spin_unlock_irqrestore(c->lock, flags); | 
|  | 836 | return 0; | 
|  | 837 | } | 
|  | 838 |  | 
|  | 839 |  | 
|  | 840 | EXPORT_SYMBOL(z8530_sync_open); | 
|  | 841 |  | 
|  | 842 | /** | 
|  | 843 | *	z8530_sync_close - Close a PIO Z8530 channel | 
|  | 844 | *	@dev: Network device to close | 
|  | 845 | *	@c: Z8530 channel to disassociate and move to idle | 
|  | 846 | * | 
|  | 847 | *	Close down a Z8530 interface and switch its interrupt handlers | 
|  | 848 | *	to discard future events. | 
|  | 849 | */ | 
|  | 850 |  | 
|  | 851 | int z8530_sync_close(struct net_device *dev, struct z8530_channel *c) | 
|  | 852 | { | 
|  | 853 | u8 chk; | 
|  | 854 | unsigned long flags; | 
|  | 855 |  | 
|  | 856 | spin_lock_irqsave(c->lock, flags); | 
|  | 857 | c->irqs = &z8530_nop; | 
|  | 858 | c->max = 0; | 
|  | 859 | c->sync = 0; | 
|  | 860 |  | 
|  | 861 | chk=read_zsreg(c,R0); | 
|  | 862 | write_zsreg(c, R3, c->regs[R3]); | 
|  | 863 | z8530_rtsdtr(c,0); | 
|  | 864 |  | 
|  | 865 | spin_unlock_irqrestore(c->lock, flags); | 
|  | 866 | return 0; | 
|  | 867 | } | 
|  | 868 |  | 
|  | 869 | EXPORT_SYMBOL(z8530_sync_close); | 
|  | 870 |  | 
|  | 871 | /** | 
|  | 872 | *	z8530_sync_dma_open - Open a Z8530 for DMA I/O | 
|  | 873 | *	@dev: The network device to attach | 
|  | 874 | *	@c: The Z8530 channel to configure in sync DMA mode. | 
|  | 875 | * | 
|  | 876 | *	Set up a Z85x30 device for synchronous DMA in both directions. Two | 
|  | 877 | *	ISA DMA channels must be available for this to work. We assume ISA | 
|  | 878 | *	DMA driven I/O and PC limits on access. | 
|  | 879 | */ | 
|  | 880 |  | 
|  | 881 | int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c) | 
|  | 882 | { | 
|  | 883 | unsigned long cflags, dflags; | 
|  | 884 |  | 
|  | 885 | c->sync = 1; | 
|  | 886 | c->mtu = dev->mtu+64; | 
|  | 887 | c->count = 0; | 
|  | 888 | c->skb = NULL; | 
|  | 889 | c->skb2 = NULL; | 
|  | 890 | /* | 
|  | 891 | *	Load the DMA interfaces up | 
|  | 892 | */ | 
|  | 893 | c->rxdma_on = 0; | 
|  | 894 | c->txdma_on = 0; | 
|  | 895 |  | 
|  | 896 | /* | 
|  | 897 | *	Allocate the DMA flip buffers. Limit by page size. | 
|  | 898 | *	Everyone runs 1500 mtu or less on wan links so this | 
|  | 899 | *	should be fine. | 
|  | 900 | */ | 
|  | 901 |  | 
|  | 902 | if(c->mtu  > PAGE_SIZE/2) | 
|  | 903 | return -EMSGSIZE; | 
|  | 904 |  | 
|  | 905 | c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 
|  | 906 | if(c->rx_buf[0]==NULL) | 
|  | 907 | return -ENOBUFS; | 
|  | 908 | c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2; | 
|  | 909 |  | 
|  | 910 | c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 
|  | 911 | if(c->tx_dma_buf[0]==NULL) | 
|  | 912 | { | 
|  | 913 | free_page((unsigned long)c->rx_buf[0]); | 
|  | 914 | c->rx_buf[0]=NULL; | 
|  | 915 | return -ENOBUFS; | 
|  | 916 | } | 
|  | 917 | c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2; | 
|  | 918 |  | 
|  | 919 | c->tx_dma_used=0; | 
|  | 920 | c->dma_tx = 1; | 
|  | 921 | c->dma_num=0; | 
|  | 922 | c->dma_ready=1; | 
|  | 923 |  | 
|  | 924 | /* | 
|  | 925 | *	Enable DMA control mode | 
|  | 926 | */ | 
|  | 927 |  | 
|  | 928 | spin_lock_irqsave(c->lock, cflags); | 
|  | 929 |  | 
|  | 930 | /* | 
|  | 931 | *	TX DMA via DIR/REQ | 
|  | 932 | */ | 
|  | 933 |  | 
|  | 934 | c->regs[R14]|= DTRREQ; | 
|  | 935 | write_zsreg(c, R14, c->regs[R14]); | 
|  | 936 |  | 
|  | 937 | c->regs[R1]&= ~TxINT_ENAB; | 
|  | 938 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 939 |  | 
|  | 940 | /* | 
|  | 941 | *	RX DMA via W/Req | 
|  | 942 | */ | 
|  | 943 |  | 
|  | 944 | c->regs[R1]|= WT_FN_RDYFN; | 
|  | 945 | c->regs[R1]|= WT_RDY_RT; | 
|  | 946 | c->regs[R1]|= INT_ERR_Rx; | 
|  | 947 | c->regs[R1]&= ~TxINT_ENAB; | 
|  | 948 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 949 | c->regs[R1]|= WT_RDY_ENAB; | 
|  | 950 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 951 |  | 
|  | 952 | /* | 
|  | 953 | *	DMA interrupts | 
|  | 954 | */ | 
|  | 955 |  | 
|  | 956 | /* | 
|  | 957 | *	Set up the DMA configuration | 
|  | 958 | */ | 
|  | 959 |  | 
|  | 960 | dflags=claim_dma_lock(); | 
|  | 961 |  | 
|  | 962 | disable_dma(c->rxdma); | 
|  | 963 | clear_dma_ff(c->rxdma); | 
|  | 964 | set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); | 
|  | 965 | set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0])); | 
|  | 966 | set_dma_count(c->rxdma, c->mtu); | 
|  | 967 | enable_dma(c->rxdma); | 
|  | 968 |  | 
|  | 969 | disable_dma(c->txdma); | 
|  | 970 | clear_dma_ff(c->txdma); | 
|  | 971 | set_dma_mode(c->txdma, DMA_MODE_WRITE); | 
|  | 972 | disable_dma(c->txdma); | 
|  | 973 |  | 
|  | 974 | release_dma_lock(dflags); | 
|  | 975 |  | 
|  | 976 | /* | 
|  | 977 | *	Select the DMA interrupt handlers | 
|  | 978 | */ | 
|  | 979 |  | 
|  | 980 | c->rxdma_on = 1; | 
|  | 981 | c->txdma_on = 1; | 
|  | 982 | c->tx_dma_used = 1; | 
|  | 983 |  | 
|  | 984 | c->irqs = &z8530_dma_sync; | 
|  | 985 | z8530_rtsdtr(c,1); | 
|  | 986 | write_zsreg(c, R3, c->regs[R3]|RxENABLE); | 
|  | 987 |  | 
|  | 988 | spin_unlock_irqrestore(c->lock, cflags); | 
|  | 989 |  | 
|  | 990 | return 0; | 
|  | 991 | } | 
|  | 992 |  | 
|  | 993 | EXPORT_SYMBOL(z8530_sync_dma_open); | 
|  | 994 |  | 
|  | 995 | /** | 
|  | 996 | *	z8530_sync_dma_close - Close down DMA I/O | 
|  | 997 | *	@dev: Network device to detach | 
|  | 998 | *	@c: Z8530 channel to move into discard mode | 
|  | 999 | * | 
|  | 1000 | *	Shut down a DMA mode synchronous interface. Halt the DMA, and | 
|  | 1001 | *	free the buffers. | 
|  | 1002 | */ | 
|  | 1003 |  | 
|  | 1004 | int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c) | 
|  | 1005 | { | 
|  | 1006 | u8 chk; | 
|  | 1007 | unsigned long flags; | 
|  | 1008 |  | 
|  | 1009 | c->irqs = &z8530_nop; | 
|  | 1010 | c->max = 0; | 
|  | 1011 | c->sync = 0; | 
|  | 1012 |  | 
|  | 1013 | /* | 
|  | 1014 | *	Disable the PC DMA channels | 
|  | 1015 | */ | 
|  | 1016 |  | 
|  | 1017 | flags=claim_dma_lock(); | 
|  | 1018 | disable_dma(c->rxdma); | 
|  | 1019 | clear_dma_ff(c->rxdma); | 
|  | 1020 |  | 
|  | 1021 | c->rxdma_on = 0; | 
|  | 1022 |  | 
|  | 1023 | disable_dma(c->txdma); | 
|  | 1024 | clear_dma_ff(c->txdma); | 
|  | 1025 | release_dma_lock(flags); | 
|  | 1026 |  | 
|  | 1027 | c->txdma_on = 0; | 
|  | 1028 | c->tx_dma_used = 0; | 
|  | 1029 |  | 
|  | 1030 | spin_lock_irqsave(c->lock, flags); | 
|  | 1031 |  | 
|  | 1032 | /* | 
|  | 1033 | *	Disable DMA control mode | 
|  | 1034 | */ | 
|  | 1035 |  | 
|  | 1036 | c->regs[R1]&= ~WT_RDY_ENAB; | 
|  | 1037 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 1038 | c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); | 
|  | 1039 | c->regs[R1]|= INT_ALL_Rx; | 
|  | 1040 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 1041 | c->regs[R14]&= ~DTRREQ; | 
|  | 1042 | write_zsreg(c, R14, c->regs[R14]); | 
|  | 1043 |  | 
|  | 1044 | if(c->rx_buf[0]) | 
|  | 1045 | { | 
|  | 1046 | free_page((unsigned long)c->rx_buf[0]); | 
|  | 1047 | c->rx_buf[0]=NULL; | 
|  | 1048 | } | 
|  | 1049 | if(c->tx_dma_buf[0]) | 
|  | 1050 | { | 
|  | 1051 | free_page((unsigned  long)c->tx_dma_buf[0]); | 
|  | 1052 | c->tx_dma_buf[0]=NULL; | 
|  | 1053 | } | 
|  | 1054 | chk=read_zsreg(c,R0); | 
|  | 1055 | write_zsreg(c, R3, c->regs[R3]); | 
|  | 1056 | z8530_rtsdtr(c,0); | 
|  | 1057 |  | 
|  | 1058 | spin_unlock_irqrestore(c->lock, flags); | 
|  | 1059 |  | 
|  | 1060 | return 0; | 
|  | 1061 | } | 
|  | 1062 |  | 
|  | 1063 | EXPORT_SYMBOL(z8530_sync_dma_close); | 
|  | 1064 |  | 
|  | 1065 | /** | 
|  | 1066 | *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA | 
|  | 1067 | *	@dev: The network device to attach | 
|  | 1068 | *	@c: The Z8530 channel to configure in sync DMA mode. | 
|  | 1069 | * | 
|  | 1070 | *	Set up a Z85x30 device for synchronous DMA tranmission. One | 
|  | 1071 | *	ISA DMA channel must be available for this to work. The receive | 
|  | 1072 | *	side is run in PIO mode, but then it has the bigger FIFO. | 
|  | 1073 | */ | 
|  | 1074 |  | 
|  | 1075 | int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c) | 
|  | 1076 | { | 
|  | 1077 | unsigned long cflags, dflags; | 
|  | 1078 |  | 
|  | 1079 | printk("Opening sync interface for TX-DMA\n"); | 
|  | 1080 | c->sync = 1; | 
|  | 1081 | c->mtu = dev->mtu+64; | 
|  | 1082 | c->count = 0; | 
|  | 1083 | c->skb = NULL; | 
|  | 1084 | c->skb2 = NULL; | 
|  | 1085 |  | 
|  | 1086 | /* | 
|  | 1087 | *	Allocate the DMA flip buffers. Limit by page size. | 
|  | 1088 | *	Everyone runs 1500 mtu or less on wan links so this | 
|  | 1089 | *	should be fine. | 
|  | 1090 | */ | 
|  | 1091 |  | 
|  | 1092 | if(c->mtu  > PAGE_SIZE/2) | 
|  | 1093 | return -EMSGSIZE; | 
|  | 1094 |  | 
|  | 1095 | c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 
|  | 1096 | if(c->tx_dma_buf[0]==NULL) | 
|  | 1097 | return -ENOBUFS; | 
|  | 1098 |  | 
|  | 1099 | c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2; | 
|  | 1100 |  | 
|  | 1101 |  | 
|  | 1102 | spin_lock_irqsave(c->lock, cflags); | 
|  | 1103 |  | 
|  | 1104 | /* | 
|  | 1105 | *	Load the PIO receive ring | 
|  | 1106 | */ | 
|  | 1107 |  | 
|  | 1108 | z8530_rx_done(c); | 
|  | 1109 | z8530_rx_done(c); | 
|  | 1110 |  | 
|  | 1111 | /* | 
|  | 1112 | *	Load the DMA interfaces up | 
|  | 1113 | */ | 
|  | 1114 |  | 
|  | 1115 | c->rxdma_on = 0; | 
|  | 1116 | c->txdma_on = 0; | 
|  | 1117 |  | 
|  | 1118 | c->tx_dma_used=0; | 
|  | 1119 | c->dma_num=0; | 
|  | 1120 | c->dma_ready=1; | 
|  | 1121 | c->dma_tx = 1; | 
|  | 1122 |  | 
|  | 1123 | /* | 
|  | 1124 | *	Enable DMA control mode | 
|  | 1125 | */ | 
|  | 1126 |  | 
|  | 1127 | /* | 
|  | 1128 | *	TX DMA via DIR/REQ | 
|  | 1129 | */ | 
|  | 1130 | c->regs[R14]|= DTRREQ; | 
|  | 1131 | write_zsreg(c, R14, c->regs[R14]); | 
|  | 1132 |  | 
|  | 1133 | c->regs[R1]&= ~TxINT_ENAB; | 
|  | 1134 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 1135 |  | 
|  | 1136 | /* | 
|  | 1137 | *	Set up the DMA configuration | 
|  | 1138 | */ | 
|  | 1139 |  | 
|  | 1140 | dflags = claim_dma_lock(); | 
|  | 1141 |  | 
|  | 1142 | disable_dma(c->txdma); | 
|  | 1143 | clear_dma_ff(c->txdma); | 
|  | 1144 | set_dma_mode(c->txdma, DMA_MODE_WRITE); | 
|  | 1145 | disable_dma(c->txdma); | 
|  | 1146 |  | 
|  | 1147 | release_dma_lock(dflags); | 
|  | 1148 |  | 
|  | 1149 | /* | 
|  | 1150 | *	Select the DMA interrupt handlers | 
|  | 1151 | */ | 
|  | 1152 |  | 
|  | 1153 | c->rxdma_on = 0; | 
|  | 1154 | c->txdma_on = 1; | 
|  | 1155 | c->tx_dma_used = 1; | 
|  | 1156 |  | 
|  | 1157 | c->irqs = &z8530_txdma_sync; | 
|  | 1158 | z8530_rtsdtr(c,1); | 
|  | 1159 | write_zsreg(c, R3, c->regs[R3]|RxENABLE); | 
|  | 1160 | spin_unlock_irqrestore(c->lock, cflags); | 
|  | 1161 |  | 
|  | 1162 | return 0; | 
|  | 1163 | } | 
|  | 1164 |  | 
|  | 1165 | EXPORT_SYMBOL(z8530_sync_txdma_open); | 
|  | 1166 |  | 
|  | 1167 | /** | 
|  | 1168 | *	z8530_sync_txdma_close - Close down a TX driven DMA channel | 
|  | 1169 | *	@dev: Network device to detach | 
|  | 1170 | *	@c: Z8530 channel to move into discard mode | 
|  | 1171 | * | 
|  | 1172 | *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA, | 
|  | 1173 | *	and  free the buffers. | 
|  | 1174 | */ | 
|  | 1175 |  | 
|  | 1176 | int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c) | 
|  | 1177 | { | 
|  | 1178 | unsigned long dflags, cflags; | 
|  | 1179 | u8 chk; | 
|  | 1180 |  | 
|  | 1181 |  | 
|  | 1182 | spin_lock_irqsave(c->lock, cflags); | 
|  | 1183 |  | 
|  | 1184 | c->irqs = &z8530_nop; | 
|  | 1185 | c->max = 0; | 
|  | 1186 | c->sync = 0; | 
|  | 1187 |  | 
|  | 1188 | /* | 
|  | 1189 | *	Disable the PC DMA channels | 
|  | 1190 | */ | 
|  | 1191 |  | 
|  | 1192 | dflags = claim_dma_lock(); | 
|  | 1193 |  | 
|  | 1194 | disable_dma(c->txdma); | 
|  | 1195 | clear_dma_ff(c->txdma); | 
|  | 1196 | c->txdma_on = 0; | 
|  | 1197 | c->tx_dma_used = 0; | 
|  | 1198 |  | 
|  | 1199 | release_dma_lock(dflags); | 
|  | 1200 |  | 
|  | 1201 | /* | 
|  | 1202 | *	Disable DMA control mode | 
|  | 1203 | */ | 
|  | 1204 |  | 
|  | 1205 | c->regs[R1]&= ~WT_RDY_ENAB; | 
|  | 1206 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 1207 | c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx); | 
|  | 1208 | c->regs[R1]|= INT_ALL_Rx; | 
|  | 1209 | write_zsreg(c, R1, c->regs[R1]); | 
|  | 1210 | c->regs[R14]&= ~DTRREQ; | 
|  | 1211 | write_zsreg(c, R14, c->regs[R14]); | 
|  | 1212 |  | 
|  | 1213 | if(c->tx_dma_buf[0]) | 
|  | 1214 | { | 
|  | 1215 | free_page((unsigned long)c->tx_dma_buf[0]); | 
|  | 1216 | c->tx_dma_buf[0]=NULL; | 
|  | 1217 | } | 
|  | 1218 | chk=read_zsreg(c,R0); | 
|  | 1219 | write_zsreg(c, R3, c->regs[R3]); | 
|  | 1220 | z8530_rtsdtr(c,0); | 
|  | 1221 |  | 
|  | 1222 | spin_unlock_irqrestore(c->lock, cflags); | 
|  | 1223 | return 0; | 
|  | 1224 | } | 
|  | 1225 |  | 
|  | 1226 |  | 
|  | 1227 | EXPORT_SYMBOL(z8530_sync_txdma_close); | 
|  | 1228 |  | 
|  | 1229 |  | 
|  | 1230 | /* | 
|  | 1231 | *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny | 
|  | 1232 | *	it exists... | 
|  | 1233 | */ | 
|  | 1234 |  | 
|  | 1235 | static char *z8530_type_name[]={ | 
|  | 1236 | "Z8530", | 
|  | 1237 | "Z85C30", | 
|  | 1238 | "Z85230" | 
|  | 1239 | }; | 
|  | 1240 |  | 
|  | 1241 | /** | 
|  | 1242 | *	z8530_describe - Uniformly describe a Z8530 port | 
|  | 1243 | *	@dev: Z8530 device to describe | 
|  | 1244 | *	@mapping: string holding mapping type (eg "I/O" or "Mem") | 
|  | 1245 | *	@io: the port value in question | 
|  | 1246 | * | 
|  | 1247 | *	Describe a Z8530 in a standard format. We must pass the I/O as | 
|  | 1248 | *	the port offset isnt predictable. The main reason for this function | 
|  | 1249 | *	is to try and get a common format of report. | 
|  | 1250 | */ | 
|  | 1251 |  | 
|  | 1252 | void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io) | 
|  | 1253 | { | 
|  | 1254 | printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n", | 
|  | 1255 | dev->name, | 
|  | 1256 | z8530_type_name[dev->type], | 
|  | 1257 | mapping, | 
|  | 1258 | Z8530_PORT_OF(io), | 
|  | 1259 | dev->irq); | 
|  | 1260 | } | 
|  | 1261 |  | 
|  | 1262 | EXPORT_SYMBOL(z8530_describe); | 
|  | 1263 |  | 
|  | 1264 | /* | 
|  | 1265 | *	Locked operation part of the z8530 init code | 
|  | 1266 | */ | 
|  | 1267 |  | 
|  | 1268 | static inline int do_z8530_init(struct z8530_dev *dev) | 
|  | 1269 | { | 
|  | 1270 | /* NOP the interrupt handlers first - we might get a | 
|  | 1271 | floating IRQ transition when we reset the chip */ | 
|  | 1272 | dev->chanA.irqs=&z8530_nop; | 
|  | 1273 | dev->chanB.irqs=&z8530_nop; | 
|  | 1274 | dev->chanA.dcdcheck=DCD; | 
|  | 1275 | dev->chanB.dcdcheck=DCD; | 
|  | 1276 |  | 
|  | 1277 | /* Reset the chip */ | 
|  | 1278 | write_zsreg(&dev->chanA, R9, 0xC0); | 
|  | 1279 | udelay(200); | 
|  | 1280 | /* Now check its valid */ | 
|  | 1281 | write_zsreg(&dev->chanA, R12, 0xAA); | 
|  | 1282 | if(read_zsreg(&dev->chanA, R12)!=0xAA) | 
|  | 1283 | return -ENODEV; | 
|  | 1284 | write_zsreg(&dev->chanA, R12, 0x55); | 
|  | 1285 | if(read_zsreg(&dev->chanA, R12)!=0x55) | 
|  | 1286 | return -ENODEV; | 
|  | 1287 |  | 
|  | 1288 | dev->type=Z8530; | 
|  | 1289 |  | 
|  | 1290 | /* | 
|  | 1291 | *	See the application note. | 
|  | 1292 | */ | 
|  | 1293 |  | 
|  | 1294 | write_zsreg(&dev->chanA, R15, 0x01); | 
|  | 1295 |  | 
|  | 1296 | /* | 
|  | 1297 | *	If we can set the low bit of R15 then | 
|  | 1298 | *	the chip is enhanced. | 
|  | 1299 | */ | 
|  | 1300 |  | 
|  | 1301 | if(read_zsreg(&dev->chanA, R15)==0x01) | 
|  | 1302 | { | 
|  | 1303 | /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */ | 
|  | 1304 | /* Put a char in the fifo */ | 
|  | 1305 | write_zsreg(&dev->chanA, R8, 0); | 
|  | 1306 | if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP) | 
|  | 1307 | dev->type = Z85230;	/* Has a FIFO */ | 
|  | 1308 | else | 
|  | 1309 | dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */ | 
|  | 1310 | } | 
|  | 1311 |  | 
|  | 1312 | /* | 
|  | 1313 | *	The code assumes R7' and friends are | 
|  | 1314 | *	off. Use write_zsext() for these and keep | 
|  | 1315 | *	this bit clear. | 
|  | 1316 | */ | 
|  | 1317 |  | 
|  | 1318 | write_zsreg(&dev->chanA, R15, 0); | 
|  | 1319 |  | 
|  | 1320 | /* | 
|  | 1321 | *	At this point it looks like the chip is behaving | 
|  | 1322 | */ | 
|  | 1323 |  | 
|  | 1324 | memcpy(dev->chanA.regs, reg_init, 16); | 
|  | 1325 | memcpy(dev->chanB.regs, reg_init ,16); | 
|  | 1326 |  | 
|  | 1327 | return 0; | 
|  | 1328 | } | 
|  | 1329 |  | 
|  | 1330 | /** | 
|  | 1331 | *	z8530_init - Initialise a Z8530 device | 
|  | 1332 | *	@dev: Z8530 device to initialise. | 
|  | 1333 | * | 
|  | 1334 | *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device | 
|  | 1335 | *	is present, identify the type and then program it to hopefully | 
|  | 1336 | *	keep quite and behave. This matters a lot, a Z8530 in the wrong | 
|  | 1337 | *	state will sometimes get into stupid modes generating 10Khz | 
|  | 1338 | *	interrupt streams and the like. | 
|  | 1339 | * | 
|  | 1340 | *	We set the interrupt handler up to discard any events, in case | 
|  | 1341 | *	we get them during reset or setp. | 
|  | 1342 | * | 
|  | 1343 | *	Return 0 for success, or a negative value indicating the problem | 
|  | 1344 | *	in errno form. | 
|  | 1345 | */ | 
|  | 1346 |  | 
|  | 1347 | int z8530_init(struct z8530_dev *dev) | 
|  | 1348 | { | 
|  | 1349 | unsigned long flags; | 
|  | 1350 | int ret; | 
|  | 1351 |  | 
|  | 1352 | /* Set up the chip level lock */ | 
|  | 1353 | spin_lock_init(&dev->lock); | 
|  | 1354 | dev->chanA.lock = &dev->lock; | 
|  | 1355 | dev->chanB.lock = &dev->lock; | 
|  | 1356 |  | 
|  | 1357 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 1358 | ret = do_z8530_init(dev); | 
|  | 1359 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 1360 |  | 
|  | 1361 | return ret; | 
|  | 1362 | } | 
|  | 1363 |  | 
|  | 1364 |  | 
|  | 1365 | EXPORT_SYMBOL(z8530_init); | 
|  | 1366 |  | 
|  | 1367 | /** | 
|  | 1368 | *	z8530_shutdown - Shutdown a Z8530 device | 
|  | 1369 | *	@dev: The Z8530 chip to shutdown | 
|  | 1370 | * | 
|  | 1371 | *	We set the interrupt handlers to silence any interrupts. We then | 
|  | 1372 | *	reset the chip and wait 100uS to be sure the reset completed. Just | 
|  | 1373 | *	in case the caller then tries to do stuff. | 
|  | 1374 | * | 
|  | 1375 | *	This is called without the lock held | 
|  | 1376 | */ | 
|  | 1377 |  | 
|  | 1378 | int z8530_shutdown(struct z8530_dev *dev) | 
|  | 1379 | { | 
|  | 1380 | unsigned long flags; | 
|  | 1381 | /* Reset the chip */ | 
|  | 1382 |  | 
|  | 1383 | spin_lock_irqsave(&dev->lock, flags); | 
|  | 1384 | dev->chanA.irqs=&z8530_nop; | 
|  | 1385 | dev->chanB.irqs=&z8530_nop; | 
|  | 1386 | write_zsreg(&dev->chanA, R9, 0xC0); | 
|  | 1387 | /* We must lock the udelay, the chip is offlimits here */ | 
|  | 1388 | udelay(100); | 
|  | 1389 | spin_unlock_irqrestore(&dev->lock, flags); | 
|  | 1390 | return 0; | 
|  | 1391 | } | 
|  | 1392 |  | 
|  | 1393 | EXPORT_SYMBOL(z8530_shutdown); | 
|  | 1394 |  | 
|  | 1395 | /** | 
|  | 1396 | *	z8530_channel_load - Load channel data | 
|  | 1397 | *	@c: Z8530 channel to configure | 
|  | 1398 | *	@rtable: table of register, value pairs | 
|  | 1399 | *	FIXME: ioctl to allow user uploaded tables | 
|  | 1400 | * | 
|  | 1401 | *	Load a Z8530 channel up from the system data. We use +16 to | 
|  | 1402 | *	indicate the "prime" registers. The value 255 terminates the | 
|  | 1403 | *	table. | 
|  | 1404 | */ | 
|  | 1405 |  | 
|  | 1406 | int z8530_channel_load(struct z8530_channel *c, u8 *rtable) | 
|  | 1407 | { | 
|  | 1408 | unsigned long flags; | 
|  | 1409 |  | 
|  | 1410 | spin_lock_irqsave(c->lock, flags); | 
|  | 1411 |  | 
|  | 1412 | while(*rtable!=255) | 
|  | 1413 | { | 
|  | 1414 | int reg=*rtable++; | 
|  | 1415 | if(reg>0x0F) | 
|  | 1416 | write_zsreg(c, R15, c->regs[15]|1); | 
|  | 1417 | write_zsreg(c, reg&0x0F, *rtable); | 
|  | 1418 | if(reg>0x0F) | 
|  | 1419 | write_zsreg(c, R15, c->regs[15]&~1); | 
|  | 1420 | c->regs[reg]=*rtable++; | 
|  | 1421 | } | 
|  | 1422 | c->rx_function=z8530_null_rx; | 
|  | 1423 | c->skb=NULL; | 
|  | 1424 | c->tx_skb=NULL; | 
|  | 1425 | c->tx_next_skb=NULL; | 
|  | 1426 | c->mtu=1500; | 
|  | 1427 | c->max=0; | 
|  | 1428 | c->count=0; | 
|  | 1429 | c->status=read_zsreg(c, R0); | 
|  | 1430 | c->sync=1; | 
|  | 1431 | write_zsreg(c, R3, c->regs[R3]|RxENABLE); | 
|  | 1432 |  | 
|  | 1433 | spin_unlock_irqrestore(c->lock, flags); | 
|  | 1434 | return 0; | 
|  | 1435 | } | 
|  | 1436 |  | 
|  | 1437 | EXPORT_SYMBOL(z8530_channel_load); | 
|  | 1438 |  | 
|  | 1439 |  | 
|  | 1440 | /** | 
|  | 1441 | *	z8530_tx_begin - Begin packet transmission | 
|  | 1442 | *	@c: The Z8530 channel to kick | 
|  | 1443 | * | 
|  | 1444 | *	This is the speed sensitive side of transmission. If we are called | 
|  | 1445 | *	and no buffer is being transmitted we commence the next buffer. If | 
|  | 1446 | *	nothing is queued we idle the sync. | 
|  | 1447 | * | 
|  | 1448 | *	Note: We are handling this code path in the interrupt path, keep it | 
|  | 1449 | *	fast or bad things will happen. | 
|  | 1450 | * | 
|  | 1451 | *	Called with the lock held. | 
|  | 1452 | */ | 
|  | 1453 |  | 
|  | 1454 | static void z8530_tx_begin(struct z8530_channel *c) | 
|  | 1455 | { | 
|  | 1456 | unsigned long flags; | 
|  | 1457 | if(c->tx_skb) | 
|  | 1458 | return; | 
|  | 1459 |  | 
|  | 1460 | c->tx_skb=c->tx_next_skb; | 
|  | 1461 | c->tx_next_skb=NULL; | 
|  | 1462 | c->tx_ptr=c->tx_next_ptr; | 
|  | 1463 |  | 
|  | 1464 | if(c->tx_skb==NULL) | 
|  | 1465 | { | 
|  | 1466 | /* Idle on */ | 
|  | 1467 | if(c->dma_tx) | 
|  | 1468 | { | 
|  | 1469 | flags=claim_dma_lock(); | 
|  | 1470 | disable_dma(c->txdma); | 
|  | 1471 | /* | 
|  | 1472 | *	Check if we crapped out. | 
|  | 1473 | */ | 
|  | 1474 | if(get_dma_residue(c->txdma)) | 
|  | 1475 | { | 
|  | 1476 | c->stats.tx_dropped++; | 
|  | 1477 | c->stats.tx_fifo_errors++; | 
|  | 1478 | } | 
|  | 1479 | release_dma_lock(flags); | 
|  | 1480 | } | 
|  | 1481 | c->txcount=0; | 
|  | 1482 | } | 
|  | 1483 | else | 
|  | 1484 | { | 
|  | 1485 | c->txcount=c->tx_skb->len; | 
|  | 1486 |  | 
|  | 1487 |  | 
|  | 1488 | if(c->dma_tx) | 
|  | 1489 | { | 
|  | 1490 | /* | 
|  | 1491 | *	FIXME. DMA is broken for the original 8530, | 
|  | 1492 | *	on the older parts we need to set a flag and | 
|  | 1493 | *	wait for a further TX interrupt to fire this | 
|  | 1494 | *	stage off | 
|  | 1495 | */ | 
|  | 1496 |  | 
|  | 1497 | flags=claim_dma_lock(); | 
|  | 1498 | disable_dma(c->txdma); | 
|  | 1499 |  | 
|  | 1500 | /* | 
|  | 1501 | *	These two are needed by the 8530/85C30 | 
|  | 1502 | *	and must be issued when idling. | 
|  | 1503 | */ | 
|  | 1504 |  | 
|  | 1505 | if(c->dev->type!=Z85230) | 
|  | 1506 | { | 
|  | 1507 | write_zsctrl(c, RES_Tx_CRC); | 
|  | 1508 | write_zsctrl(c, RES_EOM_L); | 
|  | 1509 | } | 
|  | 1510 | write_zsreg(c, R10, c->regs[10]&~ABUNDER); | 
|  | 1511 | clear_dma_ff(c->txdma); | 
|  | 1512 | set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); | 
|  | 1513 | set_dma_count(c->txdma, c->txcount); | 
|  | 1514 | enable_dma(c->txdma); | 
|  | 1515 | release_dma_lock(flags); | 
|  | 1516 | write_zsctrl(c, RES_EOM_L); | 
|  | 1517 | write_zsreg(c, R5, c->regs[R5]|TxENAB); | 
|  | 1518 | } | 
|  | 1519 | else | 
|  | 1520 | { | 
|  | 1521 |  | 
|  | 1522 | /* ABUNDER off */ | 
|  | 1523 | write_zsreg(c, R10, c->regs[10]); | 
|  | 1524 | write_zsctrl(c, RES_Tx_CRC); | 
|  | 1525 |  | 
|  | 1526 | while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) | 
|  | 1527 | { | 
|  | 1528 | write_zsreg(c, R8, *c->tx_ptr++); | 
|  | 1529 | c->txcount--; | 
|  | 1530 | } | 
|  | 1531 |  | 
|  | 1532 | } | 
|  | 1533 | } | 
|  | 1534 | /* | 
|  | 1535 | *	Since we emptied tx_skb we can ask for more | 
|  | 1536 | */ | 
|  | 1537 | netif_wake_queue(c->netdevice); | 
|  | 1538 | } | 
|  | 1539 |  | 
|  | 1540 | /** | 
|  | 1541 | *	z8530_tx_done - TX complete callback | 
|  | 1542 | *	@c: The channel that completed a transmit. | 
|  | 1543 | * | 
|  | 1544 | *	This is called when we complete a packet send. We wake the queue, | 
|  | 1545 | *	start the next packet going and then free the buffer of the existing | 
|  | 1546 | *	packet. This code is fairly timing sensitive. | 
|  | 1547 | * | 
|  | 1548 | *	Called with the register lock held. | 
|  | 1549 | */ | 
|  | 1550 |  | 
|  | 1551 | static void z8530_tx_done(struct z8530_channel *c) | 
|  | 1552 | { | 
|  | 1553 | struct sk_buff *skb; | 
|  | 1554 |  | 
|  | 1555 | /* Actually this can happen.*/ | 
|  | 1556 | if(c->tx_skb==NULL) | 
|  | 1557 | return; | 
|  | 1558 |  | 
|  | 1559 | skb=c->tx_skb; | 
|  | 1560 | c->tx_skb=NULL; | 
|  | 1561 | z8530_tx_begin(c); | 
|  | 1562 | c->stats.tx_packets++; | 
|  | 1563 | c->stats.tx_bytes+=skb->len; | 
|  | 1564 | dev_kfree_skb_irq(skb); | 
|  | 1565 | } | 
|  | 1566 |  | 
|  | 1567 | /** | 
|  | 1568 | *	z8530_null_rx - Discard a packet | 
|  | 1569 | *	@c: The channel the packet arrived on | 
|  | 1570 | *	@skb: The buffer | 
|  | 1571 | * | 
|  | 1572 | *	We point the receive handler at this function when idle. Instead | 
|  | 1573 | *	of syncppp processing the frames we get to throw them away. | 
|  | 1574 | */ | 
|  | 1575 |  | 
|  | 1576 | void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) | 
|  | 1577 | { | 
|  | 1578 | dev_kfree_skb_any(skb); | 
|  | 1579 | } | 
|  | 1580 |  | 
|  | 1581 | EXPORT_SYMBOL(z8530_null_rx); | 
|  | 1582 |  | 
|  | 1583 | /** | 
|  | 1584 | *	z8530_rx_done - Receive completion callback | 
|  | 1585 | *	@c: The channel that completed a receive | 
|  | 1586 | * | 
|  | 1587 | *	A new packet is complete. Our goal here is to get back into receive | 
|  | 1588 | *	mode as fast as possible. On the Z85230 we could change to using | 
|  | 1589 | *	ESCC mode, but on the older chips we have no choice. We flip to the | 
|  | 1590 | *	new buffer immediately in DMA mode so that the DMA of the next | 
|  | 1591 | *	frame can occur while we are copying the previous buffer to an sk_buff | 
|  | 1592 | * | 
|  | 1593 | *	Called with the lock held | 
|  | 1594 | */ | 
|  | 1595 |  | 
|  | 1596 | static void z8530_rx_done(struct z8530_channel *c) | 
|  | 1597 | { | 
|  | 1598 | struct sk_buff *skb; | 
|  | 1599 | int ct; | 
|  | 1600 |  | 
|  | 1601 | /* | 
|  | 1602 | *	Is our receive engine in DMA mode | 
|  | 1603 | */ | 
|  | 1604 |  | 
|  | 1605 | if(c->rxdma_on) | 
|  | 1606 | { | 
|  | 1607 | /* | 
|  | 1608 | *	Save the ready state and the buffer currently | 
|  | 1609 | *	being used as the DMA target | 
|  | 1610 | */ | 
|  | 1611 |  | 
|  | 1612 | int ready=c->dma_ready; | 
|  | 1613 | unsigned char *rxb=c->rx_buf[c->dma_num]; | 
|  | 1614 | unsigned long flags; | 
|  | 1615 |  | 
|  | 1616 | /* | 
|  | 1617 | *	Complete this DMA. Neccessary to find the length | 
|  | 1618 | */ | 
|  | 1619 |  | 
|  | 1620 | flags=claim_dma_lock(); | 
|  | 1621 |  | 
|  | 1622 | disable_dma(c->rxdma); | 
|  | 1623 | clear_dma_ff(c->rxdma); | 
|  | 1624 | c->rxdma_on=0; | 
|  | 1625 | ct=c->mtu-get_dma_residue(c->rxdma); | 
|  | 1626 | if(ct<0) | 
|  | 1627 | ct=2;	/* Shit happens.. */ | 
|  | 1628 | c->dma_ready=0; | 
|  | 1629 |  | 
|  | 1630 | /* | 
|  | 1631 | *	Normal case: the other slot is free, start the next DMA | 
|  | 1632 | *	into it immediately. | 
|  | 1633 | */ | 
|  | 1634 |  | 
|  | 1635 | if(ready) | 
|  | 1636 | { | 
|  | 1637 | c->dma_num^=1; | 
|  | 1638 | set_dma_mode(c->rxdma, DMA_MODE_READ|0x10); | 
|  | 1639 | set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num])); | 
|  | 1640 | set_dma_count(c->rxdma, c->mtu); | 
|  | 1641 | c->rxdma_on = 1; | 
|  | 1642 | enable_dma(c->rxdma); | 
|  | 1643 | /* Stop any frames that we missed the head of | 
|  | 1644 | from passing */ | 
|  | 1645 | write_zsreg(c, R0, RES_Rx_CRC); | 
|  | 1646 | } | 
|  | 1647 | else | 
|  | 1648 | /* Can't occur as we dont reenable the DMA irq until | 
|  | 1649 | after the flip is done */ | 
|  | 1650 | printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); | 
|  | 1651 |  | 
|  | 1652 | release_dma_lock(flags); | 
|  | 1653 |  | 
|  | 1654 | /* | 
|  | 1655 | *	Shove the old buffer into an sk_buff. We can't DMA | 
|  | 1656 | *	directly into one on a PC - it might be above the 16Mb | 
|  | 1657 | *	boundary. Optimisation - we could check to see if we | 
|  | 1658 | *	can avoid the copy. Optimisation 2 - make the memcpy | 
|  | 1659 | *	a copychecksum. | 
|  | 1660 | */ | 
|  | 1661 |  | 
|  | 1662 | skb=dev_alloc_skb(ct); | 
|  | 1663 | if(skb==NULL) | 
|  | 1664 | { | 
|  | 1665 | c->stats.rx_dropped++; | 
|  | 1666 | printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); | 
|  | 1667 | } | 
|  | 1668 | else | 
|  | 1669 | { | 
|  | 1670 | skb_put(skb, ct); | 
|  | 1671 | memcpy(skb->data, rxb, ct); | 
|  | 1672 | c->stats.rx_packets++; | 
|  | 1673 | c->stats.rx_bytes+=ct; | 
|  | 1674 | } | 
|  | 1675 | c->dma_ready=1; | 
|  | 1676 | } | 
|  | 1677 | else | 
|  | 1678 | { | 
|  | 1679 | RT_LOCK; | 
|  | 1680 | skb=c->skb; | 
|  | 1681 |  | 
|  | 1682 | /* | 
|  | 1683 | *	The game we play for non DMA is similar. We want to | 
|  | 1684 | *	get the controller set up for the next packet as fast | 
|  | 1685 | *	as possible. We potentially only have one byte + the | 
|  | 1686 | *	fifo length for this. Thus we want to flip to the new | 
|  | 1687 | *	buffer and then mess around copying and allocating | 
|  | 1688 | *	things. For the current case it doesn't matter but | 
|  | 1689 | *	if you build a system where the sync irq isnt blocked | 
|  | 1690 | *	by the kernel IRQ disable then you need only block the | 
|  | 1691 | *	sync IRQ for the RT_LOCK area. | 
|  | 1692 | * | 
|  | 1693 | */ | 
|  | 1694 | ct=c->count; | 
|  | 1695 |  | 
|  | 1696 | c->skb = c->skb2; | 
|  | 1697 | c->count = 0; | 
|  | 1698 | c->max = c->mtu; | 
|  | 1699 | if(c->skb) | 
|  | 1700 | { | 
|  | 1701 | c->dptr = c->skb->data; | 
|  | 1702 | c->max = c->mtu; | 
|  | 1703 | } | 
|  | 1704 | else | 
|  | 1705 | { | 
|  | 1706 | c->count= 0; | 
|  | 1707 | c->max = 0; | 
|  | 1708 | } | 
|  | 1709 | RT_UNLOCK; | 
|  | 1710 |  | 
|  | 1711 | c->skb2 = dev_alloc_skb(c->mtu); | 
|  | 1712 | if(c->skb2==NULL) | 
|  | 1713 | printk(KERN_WARNING "%s: memory squeeze.\n", | 
|  | 1714 | c->netdevice->name); | 
|  | 1715 | else | 
|  | 1716 | { | 
|  | 1717 | skb_put(c->skb2,c->mtu); | 
|  | 1718 | } | 
|  | 1719 | c->stats.rx_packets++; | 
|  | 1720 | c->stats.rx_bytes+=ct; | 
|  | 1721 |  | 
|  | 1722 | } | 
|  | 1723 | /* | 
|  | 1724 | *	If we received a frame we must now process it. | 
|  | 1725 | */ | 
|  | 1726 | if(skb) | 
|  | 1727 | { | 
|  | 1728 | skb_trim(skb, ct); | 
|  | 1729 | c->rx_function(c,skb); | 
|  | 1730 | } | 
|  | 1731 | else | 
|  | 1732 | { | 
|  | 1733 | c->stats.rx_dropped++; | 
|  | 1734 | printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); | 
|  | 1735 | } | 
|  | 1736 | } | 
|  | 1737 |  | 
|  | 1738 | /** | 
|  | 1739 | *	spans_boundary - Check a packet can be ISA DMA'd | 
|  | 1740 | *	@skb: The buffer to check | 
|  | 1741 | * | 
|  | 1742 | *	Returns true if the buffer cross a DMA boundary on a PC. The poor | 
|  | 1743 | *	thing can only DMA within a 64K block not across the edges of it. | 
|  | 1744 | */ | 
|  | 1745 |  | 
|  | 1746 | static inline int spans_boundary(struct sk_buff *skb) | 
|  | 1747 | { | 
|  | 1748 | unsigned long a=(unsigned long)skb->data; | 
|  | 1749 | a^=(a+skb->len); | 
|  | 1750 | if(a&0x00010000)	/* If the 64K bit is different.. */ | 
|  | 1751 | return 1; | 
|  | 1752 | return 0; | 
|  | 1753 | } | 
|  | 1754 |  | 
|  | 1755 | /** | 
|  | 1756 | *	z8530_queue_xmit - Queue a packet | 
|  | 1757 | *	@c: The channel to use | 
|  | 1758 | *	@skb: The packet to kick down the channel | 
|  | 1759 | * | 
|  | 1760 | *	Queue a packet for transmission. Because we have rather | 
|  | 1761 | *	hard to hit interrupt latencies for the Z85230 per packet | 
|  | 1762 | *	even in DMA mode we do the flip to DMA buffer if needed here | 
|  | 1763 | *	not in the IRQ. | 
|  | 1764 | * | 
|  | 1765 | *	Called from the network code. The lock is not held at this | 
|  | 1766 | *	point. | 
|  | 1767 | */ | 
|  | 1768 |  | 
|  | 1769 | int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) | 
|  | 1770 | { | 
|  | 1771 | unsigned long flags; | 
|  | 1772 |  | 
|  | 1773 | netif_stop_queue(c->netdevice); | 
|  | 1774 | if(c->tx_next_skb) | 
|  | 1775 | { | 
|  | 1776 | return 1; | 
|  | 1777 | } | 
|  | 1778 |  | 
|  | 1779 | /* PC SPECIFIC - DMA limits */ | 
|  | 1780 |  | 
|  | 1781 | /* | 
|  | 1782 | *	If we will DMA the transmit and its gone over the ISA bus | 
|  | 1783 | *	limit, then copy to the flip buffer | 
|  | 1784 | */ | 
|  | 1785 |  | 
|  | 1786 | if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb))) | 
|  | 1787 | { | 
|  | 1788 | /* | 
|  | 1789 | *	Send the flip buffer, and flip the flippy bit. | 
|  | 1790 | *	We don't care which is used when just so long as | 
|  | 1791 | *	we never use the same buffer twice in a row. Since | 
|  | 1792 | *	only one buffer can be going out at a time the other | 
|  | 1793 | *	has to be safe. | 
|  | 1794 | */ | 
|  | 1795 | c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used]; | 
|  | 1796 | c->tx_dma_used^=1;	/* Flip temp buffer */ | 
|  | 1797 | memcpy(c->tx_next_ptr, skb->data, skb->len); | 
|  | 1798 | } | 
|  | 1799 | else | 
|  | 1800 | c->tx_next_ptr=skb->data; | 
|  | 1801 | RT_LOCK; | 
|  | 1802 | c->tx_next_skb=skb; | 
|  | 1803 | RT_UNLOCK; | 
|  | 1804 |  | 
|  | 1805 | spin_lock_irqsave(c->lock, flags); | 
|  | 1806 | z8530_tx_begin(c); | 
|  | 1807 | spin_unlock_irqrestore(c->lock, flags); | 
|  | 1808 |  | 
|  | 1809 | return 0; | 
|  | 1810 | } | 
|  | 1811 |  | 
|  | 1812 | EXPORT_SYMBOL(z8530_queue_xmit); | 
|  | 1813 |  | 
|  | 1814 | /** | 
|  | 1815 | *	z8530_get_stats - Get network statistics | 
|  | 1816 | *	@c: The channel to use | 
|  | 1817 | * | 
|  | 1818 | *	Get the statistics block. We keep the statistics in software as | 
|  | 1819 | *	the chip doesn't do it for us. | 
|  | 1820 | * | 
|  | 1821 | *	Locking is ignored here - we could lock for a copy but its | 
|  | 1822 | *	not likely to be that big an issue | 
|  | 1823 | */ | 
|  | 1824 |  | 
|  | 1825 | struct net_device_stats *z8530_get_stats(struct z8530_channel *c) | 
|  | 1826 | { | 
|  | 1827 | return &c->stats; | 
|  | 1828 | } | 
|  | 1829 |  | 
|  | 1830 | EXPORT_SYMBOL(z8530_get_stats); | 
|  | 1831 |  | 
|  | 1832 | /* | 
|  | 1833 | *	Module support | 
|  | 1834 | */ | 
|  | 1835 | static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n"; | 
|  | 1836 |  | 
|  | 1837 | static int __init z85230_init_driver(void) | 
|  | 1838 | { | 
|  | 1839 | printk(banner); | 
|  | 1840 | return 0; | 
|  | 1841 | } | 
|  | 1842 | module_init(z85230_init_driver); | 
|  | 1843 |  | 
|  | 1844 | static void __exit z85230_cleanup_driver(void) | 
|  | 1845 | { | 
|  | 1846 | } | 
|  | 1847 | module_exit(z85230_cleanup_driver); | 
|  | 1848 |  | 
|  | 1849 | MODULE_AUTHOR("Red Hat Inc."); | 
|  | 1850 | MODULE_DESCRIPTION("Z85x30 synchronous driver core"); | 
|  | 1851 | MODULE_LICENSE("GPL"); |