blob: b744baf9e2067283bebb44f0dcd27622b7dc76f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifdef __KERNEL__
2#ifndef _PPC_IO_H
3#define _PPC_IO_H
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/string.h>
6#include <linux/types.h>
7
8#include <asm/page.h>
9#include <asm/byteorder.h>
Becky Brucefeaf7cf2005-09-22 14:20:04 -050010#include <asm/synch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/mmu.h>
12
13#define SIO_CONFIG_RA 0x398
14#define SIO_CONFIG_RD 0x399
15
16#define SLOW_DOWN_IO
17
18#define PMAC_ISA_MEM_BASE 0
19#define PMAC_PCI_DRAM_OFFSET 0
20#define CHRP_ISA_IO_BASE 0xf8000000
21#define CHRP_ISA_MEM_BASE 0xf7000000
22#define CHRP_PCI_DRAM_OFFSET 0
23#define PREP_ISA_IO_BASE 0x80000000
24#define PREP_ISA_MEM_BASE 0xc0000000
25#define PREP_PCI_DRAM_OFFSET 0x80000000
26
27#if defined(CONFIG_4xx)
28#include <asm/ibm4xx.h>
29#elif defined(CONFIG_8xx)
30#include <asm/mpc8xx.h>
31#elif defined(CONFIG_8260)
32#include <asm/mpc8260.h>
Sylvain Munaut19a79852006-11-11 10:53:19 +010033#elif defined(CONFIG_APUS) || !defined(CONFIG_PCI)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#define _IO_BASE 0
35#define _ISA_MEM_BASE 0
36#define PCI_DRAM_OFFSET 0
37#else /* Everyone else */
38#define _IO_BASE isa_io_base
39#define _ISA_MEM_BASE isa_mem_base
40#define PCI_DRAM_OFFSET pci_dram_offset
41#endif /* Platform-dependent I/O */
42
43#define ___IO_BASE ((void __iomem *)_IO_BASE)
44extern unsigned long isa_io_base;
45extern unsigned long isa_mem_base;
46extern unsigned long pci_dram_offset;
47
48/*
49 * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
50 *
51 * Read operations have additional twi & isync to make sure the read
52 * is actually performed (i.e. the data has come back) before we start
53 * executing any following instructions.
54 */
Al Viroa3ca0662005-09-30 04:20:57 +010055extern inline int in_8(const volatile unsigned char __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
57 int ret;
58
59 __asm__ __volatile__(
Paul Mackerrasf007cac2006-09-13 22:08:26 +100060 "sync; lbz%U1%X1 %0,%1;\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 "twi 0,%0,0;\n"
62 "isync" : "=r" (ret) : "m" (*addr));
63 return ret;
64}
65
66extern inline void out_8(volatile unsigned char __iomem *addr, int val)
67{
68 __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
69}
70
Al Viroa3ca0662005-09-30 04:20:57 +010071extern inline int in_le16(const volatile unsigned short __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 int ret;
74
Paul Mackerrasf007cac2006-09-13 22:08:26 +100075 __asm__ __volatile__("sync; lhbrx %0,0,%1;\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 "twi 0,%0,0;\n"
77 "isync" : "=r" (ret) :
78 "r" (addr), "m" (*addr));
79 return ret;
80}
81
Al Viroa3ca0662005-09-30 04:20:57 +010082extern inline int in_be16(const volatile unsigned short __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
84 int ret;
85
Paul Mackerrasf007cac2006-09-13 22:08:26 +100086 __asm__ __volatile__("sync; lhz%U1%X1 %0,%1;\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 "twi 0,%0,0;\n"
88 "isync" : "=r" (ret) : "m" (*addr));
89 return ret;
90}
91
92extern inline void out_le16(volatile unsigned short __iomem *addr, int val)
93{
Paul Mackerrasf007cac2006-09-13 22:08:26 +100094 __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 "r" (val), "r" (addr));
96}
97
98extern inline void out_be16(volatile unsigned short __iomem *addr, int val)
99{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000100 __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
102
Al Viroa3ca0662005-09-30 04:20:57 +0100103extern inline unsigned in_le32(const volatile unsigned __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
105 unsigned ret;
106
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000107 __asm__ __volatile__("sync; lwbrx %0,0,%1;\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 "twi 0,%0,0;\n"
109 "isync" : "=r" (ret) :
110 "r" (addr), "m" (*addr));
111 return ret;
112}
113
Al Viroa3ca0662005-09-30 04:20:57 +0100114extern inline unsigned in_be32(const volatile unsigned __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 unsigned ret;
117
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000118 __asm__ __volatile__("sync; lwz%U1%X1 %0,%1;\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 "twi 0,%0,0;\n"
120 "isync" : "=r" (ret) : "m" (*addr));
121 return ret;
122}
123
124extern inline void out_le32(volatile unsigned __iomem *addr, int val)
125{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000126 __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) :
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 "r" (val), "r" (addr));
128}
129
130extern inline void out_be32(volatile unsigned __iomem *addr, int val)
131{
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000132 __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
134#if defined (CONFIG_8260_PCI9)
135#define readb(addr) in_8((volatile u8 *)(addr))
136#define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
137#else
Al Viroa3ca0662005-09-30 04:20:57 +0100138static inline __u8 readb(const volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 return in_8(addr);
141}
142static inline void writeb(__u8 b, volatile void __iomem *addr)
143{
144 out_8(addr, b);
145}
146#endif
147
148#if defined(CONFIG_APUS)
Al Viroa3ca0662005-09-30 04:20:57 +0100149static inline __u16 readw(const volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 return *(__force volatile __u16 *)(addr);
152}
Al Viroa3ca0662005-09-30 04:20:57 +0100153static inline __u32 readl(const volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 return *(__force volatile __u32 *)(addr);
156}
157static inline void writew(__u16 b, volatile void __iomem *addr)
158{
159 *(__force volatile __u16 *)(addr) = b;
160}
161static inline void writel(__u32 b, volatile void __iomem *addr)
162{
163 *(__force volatile __u32 *)(addr) = b;
164}
165#elif defined (CONFIG_8260_PCI9)
166/* Use macros if PCI9 workaround enabled */
167#define readw(addr) in_le16((volatile u16 *)(addr))
168#define readl(addr) in_le32((volatile u32 *)(addr))
169#define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
170#define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
171#else
Al Viroa3ca0662005-09-30 04:20:57 +0100172static inline __u16 readw(const volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
174 return in_le16(addr);
175}
Al Viroa3ca0662005-09-30 04:20:57 +0100176static inline __u32 readl(const volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
178 return in_le32(addr);
179}
180static inline void writew(__u16 b, volatile void __iomem *addr)
181{
182 out_le16(addr, b);
183}
184static inline void writel(__u32 b, volatile void __iomem *addr)
185{
186 out_le32(addr, b);
187}
188#endif /* CONFIG_APUS */
189
190#define readb_relaxed(addr) readb(addr)
191#define readw_relaxed(addr) readw(addr)
192#define readl_relaxed(addr) readl(addr)
193
194static inline __u8 __raw_readb(const volatile void __iomem *addr)
195{
196 return *(__force volatile __u8 *)(addr);
197}
198static inline __u16 __raw_readw(const volatile void __iomem *addr)
199{
200 return *(__force volatile __u16 *)(addr);
201}
202static inline __u32 __raw_readl(const volatile void __iomem *addr)
203{
204 return *(__force volatile __u32 *)(addr);
205}
206static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
207{
208 *(__force volatile __u8 *)(addr) = b;
209}
210static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
211{
212 *(__force volatile __u16 *)(addr) = b;
213}
214static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
215{
216 *(__force volatile __u32 *)(addr) = b;
217}
218
219#define mmiowb()
220
221/*
222 * The insw/outsw/insl/outsl macros don't do byte-swapping.
223 * They are only used in practice for transferring buffers which
224 * are arrays of bytes, and byte-swapping is not appropriate in
225 * that case. - paulus
226 */
227#define insb(port, buf, ns) _insb((port)+___IO_BASE, (buf), (ns))
228#define outsb(port, buf, ns) _outsb((port)+___IO_BASE, (buf), (ns))
229#define insw(port, buf, ns) _insw_ns((port)+___IO_BASE, (buf), (ns))
230#define outsw(port, buf, ns) _outsw_ns((port)+___IO_BASE, (buf), (ns))
231#define insl(port, buf, nl) _insl_ns((port)+___IO_BASE, (buf), (nl))
232#define outsl(port, buf, nl) _outsl_ns((port)+___IO_BASE, (buf), (nl))
233
234/*
Marcelo Tosatti55b63322005-11-05 14:06:24 -0200235 * On powermacs and 8xx we will get a machine check exception
236 * if we try to read data from a non-existent I/O port. Because
237 * the machine check is an asynchronous exception, it isn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 * well-defined which instruction SRR0 will point to when the
239 * exception occurs.
240 * With the sequence below (twi; isync; nop), we have found that
241 * the machine check occurs on one of the three instructions on
242 * all PPC implementations tested so far. The twi and isync are
243 * needed on the 601 (in fact twi; sync works too), the isync and
244 * nop are needed on 604[e|r], and any of twi, sync or isync will
245 * work on 603[e], 750, 74xx.
246 * The twi creates an explicit data dependency on the returned
247 * value which seems to be needed to make the 601 wait for the
248 * load to finish.
249 */
250
251#define __do_in_asm(name, op) \
252extern __inline__ unsigned int name(unsigned int port) \
253{ \
254 unsigned int x; \
255 __asm__ __volatile__( \
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000256 "sync\n" \
Marcelo Tosatti55b63322005-11-05 14:06:24 -0200257 "0:" op " %0,0,%1\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 "1: twi 0,%0,0\n" \
259 "2: isync\n" \
260 "3: nop\n" \
261 "4:\n" \
262 ".section .fixup,\"ax\"\n" \
263 "5: li %0,-1\n" \
264 " b 4b\n" \
265 ".previous\n" \
266 ".section __ex_table,\"a\"\n" \
267 " .align 2\n" \
Marcelo Tosatti55b63322005-11-05 14:06:24 -0200268 " .long 0b,5b\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 " .long 1b,5b\n" \
270 " .long 2b,5b\n" \
271 " .long 3b,5b\n" \
272 ".previous" \
273 : "=&r" (x) \
274 : "r" (port + ___IO_BASE)); \
275 return x; \
276}
277
278#define __do_out_asm(name, op) \
279extern __inline__ void name(unsigned int val, unsigned int port) \
280{ \
281 __asm__ __volatile__( \
Paul Mackerrasf007cac2006-09-13 22:08:26 +1000282 "sync\n" \
Marcelo Tosatti55b63322005-11-05 14:06:24 -0200283 "0:" op " %0,0,%1\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 "1: sync\n" \
285 "2:\n" \
286 ".section __ex_table,\"a\"\n" \
287 " .align 2\n" \
Marcelo Tosatti55b63322005-11-05 14:06:24 -0200288 " .long 0b,2b\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 " .long 1b,2b\n" \
290 ".previous" \
291 : : "r" (val), "r" (port + ___IO_BASE)); \
292}
293
294__do_out_asm(outb, "stbx")
295#ifdef CONFIG_APUS
296__do_in_asm(inb, "lbzx")
297__do_in_asm(inw, "lhz%U1%X1")
298__do_in_asm(inl, "lwz%U1%X1")
299__do_out_asm(outl,"stw%U0%X0")
300__do_out_asm(outw, "sth%U0%X0")
301#elif defined (CONFIG_8260_PCI9)
302/* in asm cannot be defined if PCI9 workaround is used */
303#define inb(port) in_8((port)+___IO_BASE)
304#define inw(port) in_le16((port)+___IO_BASE)
305#define inl(port) in_le32((port)+___IO_BASE)
306__do_out_asm(outw, "sthbrx")
307__do_out_asm(outl, "stwbrx")
308#else
309__do_in_asm(inb, "lbzx")
310__do_in_asm(inw, "lhbrx")
311__do_in_asm(inl, "lwbrx")
312__do_out_asm(outw, "sthbrx")
313__do_out_asm(outl, "stwbrx")
314
315#endif
316
317#define inb_p(port) inb((port))
318#define outb_p(val, port) outb((val), (port))
319#define inw_p(port) inw((port))
320#define outw_p(val, port) outw((val), (port))
321#define inl_p(port) inl((port))
322#define outl_p(val, port) outl((val), (port))
323
Stephen Rothwell5adcaf52006-09-19 22:17:49 +1000324extern void _insb(volatile u8 __iomem *port, void *buf, long count);
325extern void _outsb(volatile u8 __iomem *port, const void *buf, long count);
326extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count);
327extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count);
328extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count);
329extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332#define IO_SPACE_LIMIT ~0
333
334#if defined (CONFIG_8260_PCI9)
335#define memset_io(a,b,c) memset((void *)(a),(b),(c))
336#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
337#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
338#else
339static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
340{
341 memset((void __force *)addr, val, count);
342}
343static inline void memcpy_fromio(void *dst,const volatile void __iomem *src, int count)
344{
345 memcpy(dst, (void __force *) src, count);
346}
347static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
348{
349 memcpy((void __force *) dst, src, count);
350}
351#endif
352
353#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void __force *)(void __iomem *)(b),(c),(d))
354
355/*
356 * Map in an area of physical address space, for accessing
357 * I/O devices etc.
358 */
359extern void __iomem *__ioremap(phys_addr_t address, unsigned long size,
360 unsigned long flags);
361extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
362#ifdef CONFIG_44x
363extern void __iomem *ioremap64(unsigned long long address, unsigned long size);
364#endif
365#define ioremap_nocache(addr, size) ioremap((addr), (size))
366extern void iounmap(volatile void __iomem *addr);
367extern unsigned long iopa(unsigned long addr);
368extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
369extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
370 unsigned int size, int flags);
371
372/*
373 * The PCI bus is inherently Little-Endian. The PowerPC is being
374 * run Big-Endian. Thus all values which cross the [PCI] barrier
375 * must be endian-adjusted. Also, the local DRAM has a different
376 * address from the PCI point of view, thus buffer addresses also
377 * have to be modified [mapped] appropriately.
378 */
379extern inline unsigned long virt_to_bus(volatile void * address)
380{
381#ifndef CONFIG_APUS
382 if (address == (void *)0)
383 return 0;
384 return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET;
385#else
386 return iopa ((unsigned long) address);
387#endif
388}
389
390extern inline void * bus_to_virt(unsigned long address)
391{
392#ifndef CONFIG_APUS
393 if (address == 0)
394 return NULL;
395 return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
396#else
397 return (void*) mm_ptov (address);
398#endif
399}
400
401/*
402 * Change virtual addresses to physical addresses and vv, for
403 * addresses in the area where the kernel has the RAM mapped.
404 */
405extern inline unsigned long virt_to_phys(volatile void * address)
406{
407#ifndef CONFIG_APUS
408 return (unsigned long) address - KERNELBASE;
409#else
410 return iopa ((unsigned long) address);
411#endif
412}
413
414extern inline void * phys_to_virt(unsigned long address)
415{
416#ifndef CONFIG_APUS
417 return (void *) (address + KERNELBASE);
418#else
419 return (void*) mm_ptov (address);
420#endif
421}
422
423/*
424 * Change "struct page" to physical address.
425 */
426#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
427#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429/* Enforce in-order execution of data I/O.
430 * No distinction between read/write on PPC; use eieio for all three.
431 */
432#define iobarrier_rw() eieio()
433#define iobarrier_r() eieio()
434#define iobarrier_w() eieio()
435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436/*
437 * Here comes the ppc implementation of the IOMAP
438 * interfaces.
439 */
440static inline unsigned int ioread8(void __iomem *addr)
441{
442 return readb(addr);
443}
444
445static inline unsigned int ioread16(void __iomem *addr)
446{
447 return readw(addr);
448}
449
450static inline unsigned int ioread32(void __iomem *addr)
451{
452 return readl(addr);
453}
454
455static inline void iowrite8(u8 val, void __iomem *addr)
456{
457 writeb(val, addr);
458}
459
460static inline void iowrite16(u16 val, void __iomem *addr)
461{
462 writew(val, addr);
463}
464
465static inline void iowrite32(u32 val, void __iomem *addr)
466{
467 writel(val, addr);
468}
469
470static inline void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
471{
472 _insb(addr, dst, count);
473}
474
475static inline void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
476{
477 _insw_ns(addr, dst, count);
478}
479
480static inline void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
481{
482 _insl_ns(addr, dst, count);
483}
484
485static inline void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
486{
487 _outsb(addr, src, count);
488}
489
490static inline void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
491{
492 _outsw_ns(addr, src, count);
493}
494
495static inline void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
496{
497 _outsl_ns(addr, src, count);
498}
499
500/* Create a virtual mapping cookie for an IO port range */
501extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
502extern void ioport_unmap(void __iomem *);
503
504/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
505struct pci_dev;
506extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
507extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
508
509#endif /* _PPC_IO_H */
510
511#ifdef CONFIG_8260_PCI9
512#include <asm/mpc8260_pci9.h>
513#endif
514
Stephen Rothwell78baa2f2005-11-19 00:33:56 +1100515#ifdef CONFIG_NOT_COHERENT_CACHE
516
517#define dma_cache_inv(_start,_size) \
518 invalidate_dcache_range(_start, (_start + _size))
519#define dma_cache_wback(_start,_size) \
520 clean_dcache_range(_start, (_start + _size))
521#define dma_cache_wback_inv(_start,_size) \
522 flush_dcache_range(_start, (_start + _size))
523
524#else
525
526#define dma_cache_inv(_start,_size) do { } while (0)
527#define dma_cache_wback(_start,_size) do { } while (0)
528#define dma_cache_wback_inv(_start,_size) do { } while (0)
529
530#endif
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532/*
533 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
534 * access
535 */
536#define xlate_dev_mem_ptr(p) __va(p)
537
538/*
539 * Convert a virtual cached pointer to an uncached pointer
540 */
541#define xlate_dev_kmem_ptr(p) p
542
Vitaly Bordug0ce928e2006-01-15 17:30:29 +0300543/* access ports */
544#define setbits32(_addr, _v) out_be32((_addr), in_be32(_addr) | (_v))
545#define clrbits32(_addr, _v) out_be32((_addr), in_be32(_addr) & ~(_v))
546
547#define setbits16(_addr, _v) out_be16((_addr), in_be16(_addr) | (_v))
548#define clrbits16(_addr, _v) out_be16((_addr), in_be16(_addr) & ~(_v))
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550#endif /* __KERNEL__ */