blob: 4e9d3c451af253239ea6649daca7a4bb37cb3f97 [file] [log] [blame]
David S. Miller8f6a93a2006-02-09 21:32:07 -08001/* pci_sun4v.c: SUN4V specific PCI controller support.
2 *
3 * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
David S. Miller18397942006-02-10 00:08:26 -080012#include <linux/percpu.h>
David S. Miller8f6a93a2006-02-09 21:32:07 -080013
14#include <asm/pbm.h>
15#include <asm/iommu.h>
16#include <asm/irq.h>
17#include <asm/upa.h>
18#include <asm/pstate.h>
19#include <asm/oplib.h>
20#include <asm/hypervisor.h>
21
22#include "pci_impl.h"
23#include "iommu_common.h"
24
David S. Millerbade5622006-02-09 22:05:54 -080025#include "pci_sun4v.h"
26
David S. Miller7c8f4862006-02-13 21:50:27 -080027#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
David S. Miller18397942006-02-10 00:08:26 -080028
David S. Miller6a32fd42006-02-19 22:21:32 -080029struct pci_iommu_batch {
30 struct pci_dev *pdev; /* Device mapping is for. */
31 unsigned long prot; /* IOMMU page protections */
32 unsigned long entry; /* Index into IOTSB. */
33 u64 *pglist; /* List of physical pages */
34 unsigned long npages; /* Number of pages in list. */
David S. Miller18397942006-02-10 00:08:26 -080035};
36
David S. Miller6a32fd42006-02-19 22:21:32 -080037static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch);
38
39/* Interrupts must be disabled. */
40static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry)
41{
42 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
43
44 p->pdev = pdev;
45 p->prot = prot;
46 p->entry = entry;
47 p->npages = 0;
48}
49
50/* Interrupts must be disabled. */
51static long pci_iommu_batch_flush(struct pci_iommu_batch *p)
52{
53 struct pcidev_cookie *pcp = p->pdev->sysdata;
54 unsigned long devhandle = pcp->pbm->devhandle;
55 unsigned long prot = p->prot;
56 unsigned long entry = p->entry;
57 u64 *pglist = p->pglist;
58 unsigned long npages = p->npages;
59
60 do {
61 long num;
62
63 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
64 npages, prot, __pa(pglist));
65 if (unlikely(num < 0)) {
66 if (printk_ratelimit())
67 printk("pci_iommu_batch_flush: IOMMU map of "
68 "[%08lx:%08lx:%lx:%lx:%lx] failed with "
69 "status %ld\n",
70 devhandle, HV_PCI_TSBID(0, entry),
71 npages, prot, __pa(pglist), num);
72 return -1;
73 }
74
75 entry += num;
76 npages -= num;
77 pglist += num;
78 } while (npages != 0);
79
80 p->entry = entry;
81 p->npages = 0;
82
83 return 0;
84}
85
86/* Interrupts must be disabled. */
87static inline long pci_iommu_batch_add(u64 phys_page)
88{
89 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
90
91 BUG_ON(p->npages >= PGLIST_NENTS);
92
93 p->pglist[p->npages++] = phys_page;
94 if (p->npages == PGLIST_NENTS)
95 return pci_iommu_batch_flush(p);
96
97 return 0;
98}
99
100/* Interrupts must be disabled. */
101static inline long pci_iommu_batch_end(void)
102{
103 struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch);
104
105 BUG_ON(p->npages >= PGLIST_NENTS);
106
107 return pci_iommu_batch_flush(p);
108}
David S. Miller18397942006-02-10 00:08:26 -0800109
110static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages)
111{
112 unsigned long n, i, start, end, limit;
113 int pass;
114
115 limit = arena->limit;
116 start = arena->hint;
117 pass = 0;
118
119again:
120 n = find_next_zero_bit(arena->map, limit, start);
121 end = n + npages;
122 if (unlikely(end >= limit)) {
123 if (likely(pass < 1)) {
124 limit = start;
125 start = 0;
126 pass++;
127 goto again;
128 } else {
129 /* Scanned the whole thing, give up. */
130 return -1;
131 }
132 }
133
134 for (i = n; i < end; i++) {
135 if (test_bit(i, arena->map)) {
136 start = i + 1;
137 goto again;
138 }
139 }
140
141 for (i = n; i < end; i++)
142 __set_bit(i, arena->map);
143
144 arena->hint = end;
145
146 return n;
147}
148
149static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
150{
151 unsigned long i;
152
153 for (i = base; i < (base + npages); i++)
154 __clear_bit(i, arena->map);
155}
156
David S. Miller8f6a93a2006-02-09 21:32:07 -0800157static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
158{
David S. Miller18397942006-02-10 00:08:26 -0800159 struct pcidev_cookie *pcp;
160 struct pci_iommu *iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -0800161 unsigned long flags, order, first_page, npages, n;
David S. Miller18397942006-02-10 00:08:26 -0800162 void *ret;
163 long entry;
David S. Miller18397942006-02-10 00:08:26 -0800164
165 size = IO_PAGE_ALIGN(size);
166 order = get_order(size);
David S. Miller6a32fd42006-02-19 22:21:32 -0800167 if (unlikely(order >= MAX_ORDER))
David S. Miller18397942006-02-10 00:08:26 -0800168 return NULL;
169
170 npages = size >> IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800171
172 first_page = __get_free_pages(GFP_ATOMIC, order);
David S. Miller6a32fd42006-02-19 22:21:32 -0800173 if (unlikely(first_page == 0UL))
David S. Miller18397942006-02-10 00:08:26 -0800174 return NULL;
David S. Millere7a04532006-02-15 22:25:27 -0800175
David S. Miller18397942006-02-10 00:08:26 -0800176 memset((char *)first_page, 0, PAGE_SIZE << order);
177
178 pcp = pdev->sysdata;
David S. Miller18397942006-02-10 00:08:26 -0800179 iommu = pcp->pbm->iommu;
180
181 spin_lock_irqsave(&iommu->lock, flags);
182 entry = pci_arena_alloc(&iommu->arena, npages);
183 spin_unlock_irqrestore(&iommu->lock, flags);
184
David S. Miller6a32fd42006-02-19 22:21:32 -0800185 if (unlikely(entry < 0L))
186 goto arena_alloc_fail;
David S. Miller18397942006-02-10 00:08:26 -0800187
188 *dma_addrp = (iommu->page_table_map_base +
189 (entry << IO_PAGE_SHIFT));
190 ret = (void *) first_page;
191 first_page = __pa(first_page);
192
David S. Miller6a32fd42006-02-19 22:21:32 -0800193 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800194
David S. Miller6a32fd42006-02-19 22:21:32 -0800195 pci_iommu_batch_start(pdev,
196 (HV_PCI_MAP_ATTR_READ |
197 HV_PCI_MAP_ATTR_WRITE),
198 entry);
David S. Miller18397942006-02-10 00:08:26 -0800199
David S. Miller6a32fd42006-02-19 22:21:32 -0800200 for (n = 0; n < npages; n++) {
201 long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE));
202 if (unlikely(err < 0L))
203 goto iommu_map_fail;
204 }
David S. Miller18397942006-02-10 00:08:26 -0800205
David S. Miller6a32fd42006-02-19 22:21:32 -0800206 if (unlikely(pci_iommu_batch_end() < 0L))
207 goto iommu_map_fail;
David S. Miller18397942006-02-10 00:08:26 -0800208
David S. Miller6a32fd42006-02-19 22:21:32 -0800209 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800210
211 return ret;
David S. Miller6a32fd42006-02-19 22:21:32 -0800212
213iommu_map_fail:
214 /* Interrupts are disabled. */
215 spin_lock(&iommu->lock);
216 pci_arena_free(&iommu->arena, entry, npages);
217 spin_unlock_irqrestore(&iommu->lock, flags);
218
219arena_alloc_fail:
220 free_pages(first_page, order);
221 return NULL;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800222}
223
224static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
225{
David S. Miller18397942006-02-10 00:08:26 -0800226 struct pcidev_cookie *pcp;
227 struct pci_iommu *iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -0800228 unsigned long flags, order, npages, entry;
229 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800230
231 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
232 pcp = pdev->sysdata;
233 iommu = pcp->pbm->iommu;
234 devhandle = pcp->pbm->devhandle;
235 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
236
237 spin_lock_irqsave(&iommu->lock, flags);
238
239 pci_arena_free(&iommu->arena, entry, npages);
240
241 do {
242 unsigned long num;
243
244 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
245 npages);
246 entry += num;
247 npages -= num;
248 } while (npages != 0);
249
250 spin_unlock_irqrestore(&iommu->lock, flags);
251
252 order = get_order(size);
253 if (order < 10)
254 free_pages((unsigned long)cpu, order);
David S. Miller8f6a93a2006-02-09 21:32:07 -0800255}
256
257static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
258{
David S. Miller18397942006-02-10 00:08:26 -0800259 struct pcidev_cookie *pcp;
260 struct pci_iommu *iommu;
261 unsigned long flags, npages, oaddr;
David S. Miller7c8f4862006-02-13 21:50:27 -0800262 unsigned long i, base_paddr;
David S. Miller6a32fd42006-02-19 22:21:32 -0800263 u32 bus_addr, ret;
David S. Miller18397942006-02-10 00:08:26 -0800264 unsigned long prot;
265 long entry;
David S. Miller18397942006-02-10 00:08:26 -0800266
267 pcp = pdev->sysdata;
268 iommu = pcp->pbm->iommu;
David S. Miller18397942006-02-10 00:08:26 -0800269
270 if (unlikely(direction == PCI_DMA_NONE))
271 goto bad;
272
273 oaddr = (unsigned long)ptr;
274 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
275 npages >>= IO_PAGE_SHIFT;
David S. Miller18397942006-02-10 00:08:26 -0800276
277 spin_lock_irqsave(&iommu->lock, flags);
278 entry = pci_arena_alloc(&iommu->arena, npages);
279 spin_unlock_irqrestore(&iommu->lock, flags);
280
281 if (unlikely(entry < 0L))
282 goto bad;
283
284 bus_addr = (iommu->page_table_map_base +
285 (entry << IO_PAGE_SHIFT));
286 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
287 base_paddr = __pa(oaddr & IO_PAGE_MASK);
288 prot = HV_PCI_MAP_ATTR_READ;
289 if (direction != PCI_DMA_TODEVICE)
290 prot |= HV_PCI_MAP_ATTR_WRITE;
291
David S. Miller6a32fd42006-02-19 22:21:32 -0800292 local_irq_save(flags);
David S. Miller18397942006-02-10 00:08:26 -0800293
David S. Miller6a32fd42006-02-19 22:21:32 -0800294 pci_iommu_batch_start(pdev, prot, entry);
David S. Miller18397942006-02-10 00:08:26 -0800295
David S. Miller6a32fd42006-02-19 22:21:32 -0800296 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
297 long err = pci_iommu_batch_add(base_paddr);
298 if (unlikely(err < 0L))
299 goto iommu_map_fail;
300 }
301 if (unlikely(pci_iommu_batch_end() < 0L))
302 goto iommu_map_fail;
David S. Miller18397942006-02-10 00:08:26 -0800303
David S. Miller6a32fd42006-02-19 22:21:32 -0800304 local_irq_restore(flags);
David S. Miller18397942006-02-10 00:08:26 -0800305
306 return ret;
307
308bad:
309 if (printk_ratelimit())
310 WARN_ON(1);
311 return PCI_DMA_ERROR_CODE;
David S. Miller6a32fd42006-02-19 22:21:32 -0800312
313iommu_map_fail:
314 /* Interrupts are disabled. */
315 spin_lock(&iommu->lock);
316 pci_arena_free(&iommu->arena, entry, npages);
317 spin_unlock_irqrestore(&iommu->lock, flags);
318
319 return PCI_DMA_ERROR_CODE;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800320}
321
322static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
323{
David S. Miller18397942006-02-10 00:08:26 -0800324 struct pcidev_cookie *pcp;
325 struct pci_iommu *iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -0800326 unsigned long flags, npages;
David S. Miller18397942006-02-10 00:08:26 -0800327 long entry;
David S. Miller7c8f4862006-02-13 21:50:27 -0800328 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800329
330 if (unlikely(direction == PCI_DMA_NONE)) {
331 if (printk_ratelimit())
332 WARN_ON(1);
333 return;
334 }
335
336 pcp = pdev->sysdata;
337 iommu = pcp->pbm->iommu;
338 devhandle = pcp->pbm->devhandle;
339
340 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
341 npages >>= IO_PAGE_SHIFT;
342 bus_addr &= IO_PAGE_MASK;
343
344 spin_lock_irqsave(&iommu->lock, flags);
345
346 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
347 pci_arena_free(&iommu->arena, entry, npages);
348
349 do {
350 unsigned long num;
351
352 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
353 npages);
354 entry += num;
355 npages -= num;
356 } while (npages != 0);
357
358 spin_unlock_irqrestore(&iommu->lock, flags);
359}
360
361#define SG_ENT_PHYS_ADDRESS(SG) \
362 (__pa(page_address((SG)->page)) + (SG)->offset)
363
David S. Miller6a32fd42006-02-19 22:21:32 -0800364static inline long fill_sg(long entry, struct pci_dev *pdev,
David S. Miller18397942006-02-10 00:08:26 -0800365 struct scatterlist *sg,
366 int nused, int nelems, unsigned long prot)
367{
368 struct scatterlist *dma_sg = sg;
369 struct scatterlist *sg_end = sg + nelems;
David S. Miller6a32fd42006-02-19 22:21:32 -0800370 unsigned long flags;
371 int i;
David S. Miller18397942006-02-10 00:08:26 -0800372
David S. Miller6a32fd42006-02-19 22:21:32 -0800373 local_irq_save(flags);
374
375 pci_iommu_batch_start(pdev, prot, entry);
376
David S. Miller18397942006-02-10 00:08:26 -0800377 for (i = 0; i < nused; i++) {
378 unsigned long pteval = ~0UL;
379 u32 dma_npages;
380
381 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
382 dma_sg->dma_length +
383 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
384 do {
385 unsigned long offset;
386 signed int len;
387
388 /* If we are here, we know we have at least one
389 * more page to map. So walk forward until we
390 * hit a page crossing, and begin creating new
391 * mappings from that spot.
392 */
393 for (;;) {
394 unsigned long tmp;
395
396 tmp = SG_ENT_PHYS_ADDRESS(sg);
397 len = sg->length;
398 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
399 pteval = tmp & IO_PAGE_MASK;
400 offset = tmp & (IO_PAGE_SIZE - 1UL);
401 break;
402 }
403 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
404 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
405 offset = 0UL;
406 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
407 break;
408 }
409 sg++;
410 }
411
412 pteval = (pteval & IOPTE_PAGE);
413 while (len > 0) {
David S. Miller6a32fd42006-02-19 22:21:32 -0800414 long err;
415
416 err = pci_iommu_batch_add(pteval);
417 if (unlikely(err < 0L))
418 goto iommu_map_failed;
419
David S. Miller18397942006-02-10 00:08:26 -0800420 pteval += IO_PAGE_SIZE;
421 len -= (IO_PAGE_SIZE - offset);
422 offset = 0;
423 dma_npages--;
424 }
425
426 pteval = (pteval & IOPTE_PAGE) + len;
427 sg++;
428
429 /* Skip over any tail mappings we've fully mapped,
430 * adjusting pteval along the way. Stop when we
431 * detect a page crossing event.
432 */
433 while (sg < sg_end &&
434 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
435 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
436 ((pteval ^
437 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
438 pteval += sg->length;
439 sg++;
440 }
441 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
442 pteval = ~0UL;
443 } while (dma_npages != 0);
444 dma_sg++;
445 }
446
David S. Miller6a32fd42006-02-19 22:21:32 -0800447 if (unlikely(pci_iommu_batch_end() < 0L))
448 goto iommu_map_failed;
David S. Miller18397942006-02-10 00:08:26 -0800449
David S. Miller6a32fd42006-02-19 22:21:32 -0800450 local_irq_restore(flags);
451 return 0;
David S. Miller18397942006-02-10 00:08:26 -0800452
David S. Miller6a32fd42006-02-19 22:21:32 -0800453iommu_map_failed:
454 local_irq_restore(flags);
455 return -1L;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800456}
457
458static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
459{
David S. Miller18397942006-02-10 00:08:26 -0800460 struct pcidev_cookie *pcp;
461 struct pci_iommu *iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -0800462 unsigned long flags, npages, prot;
David S. Miller6a32fd42006-02-19 22:21:32 -0800463 u32 dma_base;
David S. Miller18397942006-02-10 00:08:26 -0800464 struct scatterlist *sgtmp;
David S. Miller6a32fd42006-02-19 22:21:32 -0800465 long entry, err;
David S. Miller18397942006-02-10 00:08:26 -0800466 int used;
467
468 /* Fast path single entry scatterlists. */
469 if (nelems == 1) {
470 sglist->dma_address =
471 pci_4v_map_single(pdev,
472 (page_address(sglist->page) + sglist->offset),
473 sglist->length, direction);
474 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
475 return 0;
476 sglist->dma_length = sglist->length;
477 return 1;
478 }
479
480 pcp = pdev->sysdata;
481 iommu = pcp->pbm->iommu;
David S. Miller18397942006-02-10 00:08:26 -0800482
483 if (unlikely(direction == PCI_DMA_NONE))
484 goto bad;
485
486 /* Step 1: Prepare scatter list. */
487 npages = prepare_sg(sglist, nelems);
488 if (unlikely(npages > PGLIST_NENTS))
489 goto bad;
490
491 /* Step 2: Allocate a cluster and context, if necessary. */
492 spin_lock_irqsave(&iommu->lock, flags);
493 entry = pci_arena_alloc(&iommu->arena, npages);
494 spin_unlock_irqrestore(&iommu->lock, flags);
495
496 if (unlikely(entry < 0L))
497 goto bad;
498
499 dma_base = iommu->page_table_map_base +
500 (entry << IO_PAGE_SHIFT);
501
502 /* Step 3: Normalize DMA addresses. */
503 used = nelems;
504
505 sgtmp = sglist;
506 while (used && sgtmp->dma_length) {
507 sgtmp->dma_address += dma_base;
508 sgtmp++;
509 used--;
510 }
511 used = nelems - used;
512
513 /* Step 4: Create the mappings. */
514 prot = HV_PCI_MAP_ATTR_READ;
515 if (direction != PCI_DMA_TODEVICE)
516 prot |= HV_PCI_MAP_ATTR_WRITE;
517
David S. Miller6a32fd42006-02-19 22:21:32 -0800518 err = fill_sg(entry, pdev, sglist, used, nelems, prot);
519 if (unlikely(err < 0L))
520 goto iommu_map_failed;
David S. Miller18397942006-02-10 00:08:26 -0800521
522 return used;
523
524bad:
525 if (printk_ratelimit())
526 WARN_ON(1);
527 return 0;
David S. Miller6a32fd42006-02-19 22:21:32 -0800528
529iommu_map_failed:
530 spin_lock_irqsave(&iommu->lock, flags);
531 pci_arena_free(&iommu->arena, entry, npages);
532 spin_unlock_irqrestore(&iommu->lock, flags);
533
534 return 0;
David S. Miller8f6a93a2006-02-09 21:32:07 -0800535}
536
537static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
538{
David S. Miller18397942006-02-10 00:08:26 -0800539 struct pcidev_cookie *pcp;
540 struct pci_iommu *iommu;
David S. Miller7c8f4862006-02-13 21:50:27 -0800541 unsigned long flags, i, npages;
David S. Miller18397942006-02-10 00:08:26 -0800542 long entry;
David S. Miller7c8f4862006-02-13 21:50:27 -0800543 u32 devhandle, bus_addr;
David S. Miller18397942006-02-10 00:08:26 -0800544
545 if (unlikely(direction == PCI_DMA_NONE)) {
546 if (printk_ratelimit())
547 WARN_ON(1);
548 }
549
550 pcp = pdev->sysdata;
551 iommu = pcp->pbm->iommu;
552 devhandle = pcp->pbm->devhandle;
553
554 bus_addr = sglist->dma_address & IO_PAGE_MASK;
555
556 for (i = 1; i < nelems; i++)
557 if (sglist[i].dma_length == 0)
558 break;
559 i--;
560 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
561 bus_addr) >> IO_PAGE_SHIFT;
562
563 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
564
565 spin_lock_irqsave(&iommu->lock, flags);
566
567 pci_arena_free(&iommu->arena, entry, npages);
568
569 do {
570 unsigned long num;
571
572 num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
573 npages);
574 entry += num;
575 npages -= num;
576 } while (npages != 0);
577
578 spin_unlock_irqrestore(&iommu->lock, flags);
David S. Miller8f6a93a2006-02-09 21:32:07 -0800579}
580
581static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
582{
David S. Miller18397942006-02-10 00:08:26 -0800583 /* Nothing to do... */
David S. Miller8f6a93a2006-02-09 21:32:07 -0800584}
585
586static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
587{
David S. Miller18397942006-02-10 00:08:26 -0800588 /* Nothing to do... */
David S. Miller8f6a93a2006-02-09 21:32:07 -0800589}
590
591struct pci_iommu_ops pci_sun4v_iommu_ops = {
592 .alloc_consistent = pci_4v_alloc_consistent,
593 .free_consistent = pci_4v_free_consistent,
594 .map_single = pci_4v_map_single,
595 .unmap_single = pci_4v_unmap_single,
596 .map_sg = pci_4v_map_sg,
597 .unmap_sg = pci_4v_unmap_sg,
598 .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
599 .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
600};
601
David S. Millerbade5622006-02-09 22:05:54 -0800602/* SUN4V PCI configuration space accessors. */
603
David S. Miller987b6de2006-02-14 16:42:11 -0800604static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
David S. Miller059833e2006-02-12 23:49:18 -0800605{
David S. Miller987b6de2006-02-14 16:42:11 -0800606 if (bus == pbm->pci_first_busno) {
607 if (device == 0 && func == 0)
608 return 0;
609 return 1;
610 }
611
David S. Miller059833e2006-02-12 23:49:18 -0800612 if (bus < pbm->pci_first_busno ||
613 bus > pbm->pci_last_busno)
614 return 1;
615 return 0;
616}
617
David S. Millerbade5622006-02-09 22:05:54 -0800618static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
619 int where, int size, u32 *value)
620{
David S. Miller7eae6422006-02-09 22:20:01 -0800621 struct pci_pbm_info *pbm = bus_dev->sysdata;
David S. Miller059833e2006-02-12 23:49:18 -0800622 u32 devhandle = pbm->devhandle;
David S. Miller7eae6422006-02-09 22:20:01 -0800623 unsigned int bus = bus_dev->number;
624 unsigned int device = PCI_SLOT(devfn);
625 unsigned int func = PCI_FUNC(devfn);
626 unsigned long ret;
627
David S. Miller987b6de2006-02-14 16:42:11 -0800628 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
David S. Miller059833e2006-02-12 23:49:18 -0800629 ret = ~0UL;
630 } else {
631 ret = pci_sun4v_config_get(devhandle,
632 HV_PCI_DEVICE_BUILD(bus, device, func),
633 where, size);
David S. Miller10804822006-02-13 18:09:44 -0800634#if 0
David S. Miller987b6de2006-02-14 16:42:11 -0800635 printk("rcfg: [%x:%x:%x:%d]=[%lx]\n",
David S. Miller10804822006-02-13 18:09:44 -0800636 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
637 where, size, ret);
638#endif
David S. Miller059833e2006-02-12 23:49:18 -0800639 }
David S. Miller7eae6422006-02-09 22:20:01 -0800640 switch (size) {
641 case 1:
642 *value = ret & 0xff;
643 break;
644 case 2:
645 *value = ret & 0xffff;
646 break;
647 case 4:
648 *value = ret & 0xffffffff;
649 break;
650 };
651
652
653 return PCIBIOS_SUCCESSFUL;
David S. Millerbade5622006-02-09 22:05:54 -0800654}
655
656static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
657 int where, int size, u32 value)
658{
David S. Miller7eae6422006-02-09 22:20:01 -0800659 struct pci_pbm_info *pbm = bus_dev->sysdata;
David S. Miller059833e2006-02-12 23:49:18 -0800660 u32 devhandle = pbm->devhandle;
David S. Miller7eae6422006-02-09 22:20:01 -0800661 unsigned int bus = bus_dev->number;
662 unsigned int device = PCI_SLOT(devfn);
663 unsigned int func = PCI_FUNC(devfn);
664 unsigned long ret;
665
David S. Miller987b6de2006-02-14 16:42:11 -0800666 if (pci_sun4v_out_of_range(pbm, bus, device, func)) {
David S. Miller059833e2006-02-12 23:49:18 -0800667 /* Do nothing. */
668 } else {
669 ret = pci_sun4v_config_put(devhandle,
670 HV_PCI_DEVICE_BUILD(bus, device, func),
671 where, size, value);
David S. Miller10804822006-02-13 18:09:44 -0800672#if 0
David S. Miller987b6de2006-02-14 16:42:11 -0800673 printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n",
David S. Miller10804822006-02-13 18:09:44 -0800674 devhandle, HV_PCI_DEVICE_BUILD(bus, device, func),
675 where, size, value, ret);
676#endif
David S. Miller059833e2006-02-12 23:49:18 -0800677 }
David S. Miller7eae6422006-02-09 22:20:01 -0800678 return PCIBIOS_SUCCESSFUL;
David S. Millerbade5622006-02-09 22:05:54 -0800679}
680
681static struct pci_ops pci_sun4v_ops = {
682 .read = pci_sun4v_read_pci_cfg,
683 .write = pci_sun4v_write_pci_cfg,
684};
685
686
David S. Millerc2609262006-02-12 22:18:52 -0800687static void pbm_scan_bus(struct pci_controller_info *p,
688 struct pci_pbm_info *pbm)
689{
690 struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
691
692 if (!cookie) {
693 prom_printf("%s: Critical allocation failure.\n", pbm->name);
694 prom_halt();
695 }
696
697 /* All we care about is the PBM. */
698 memset(cookie, 0, sizeof(*cookie));
699 cookie->pbm = pbm;
700
David S. Miller987b6de2006-02-14 16:42:11 -0800701 pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm);
David S. Miller10804822006-02-13 18:09:44 -0800702#if 0
David S. Millerc2609262006-02-12 22:18:52 -0800703 pci_fixup_host_bridge_self(pbm->pci_bus);
704 pbm->pci_bus->self->sysdata = cookie;
David S. Miller10804822006-02-13 18:09:44 -0800705#endif
David S. Miller10804822006-02-13 18:09:44 -0800706 pci_fill_in_pbm_cookies(pbm->pci_bus, pbm,
David S. Miller987b6de2006-02-14 16:42:11 -0800707 pbm->prom_node);
David S. Millerc2609262006-02-12 22:18:52 -0800708 pci_record_assignments(pbm, pbm->pci_bus);
709 pci_assign_unassigned(pbm, pbm->pci_bus);
710 pci_fixup_irq(pbm, pbm->pci_bus);
711 pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
712 pci_setup_busmastering(pbm, pbm->pci_bus);
713}
714
David S. Millerbade5622006-02-09 22:05:54 -0800715static void pci_sun4v_scan_bus(struct pci_controller_info *p)
716{
David S. Millerc2609262006-02-12 22:18:52 -0800717 if (p->pbm_A.prom_node) {
718 p->pbm_A.is_66mhz_capable =
719 prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
720
721 pbm_scan_bus(p, &p->pbm_A);
722 }
723 if (p->pbm_B.prom_node) {
724 p->pbm_B.is_66mhz_capable =
725 prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
726
727 pbm_scan_bus(p, &p->pbm_B);
728 }
729
730 /* XXX register error interrupt handlers XXX */
David S. Millerbade5622006-02-09 22:05:54 -0800731}
732
733static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
734 struct pci_dev *pdev,
David S. Millere3999572006-02-13 18:16:10 -0800735 unsigned int devino)
David S. Millerbade5622006-02-09 22:05:54 -0800736{
David S. Miller10804822006-02-13 18:09:44 -0800737 u32 devhandle = pbm->devhandle;
738 int pil;
739
David S. Miller10804822006-02-13 18:09:44 -0800740 pil = 4;
741 if (pdev) {
742 switch ((pdev->class >> 16) & 0xff) {
743 case PCI_BASE_CLASS_STORAGE:
744 pil = 4;
745 break;
746
747 case PCI_BASE_CLASS_NETWORK:
748 pil = 6;
749 break;
750
751 case PCI_BASE_CLASS_DISPLAY:
752 pil = 9;
753 break;
754
755 case PCI_BASE_CLASS_MULTIMEDIA:
756 case PCI_BASE_CLASS_MEMORY:
757 case PCI_BASE_CLASS_BRIDGE:
758 case PCI_BASE_CLASS_SERIAL:
759 pil = 10;
760 break;
761
762 default:
763 pil = 4;
764 break;
765 };
766 }
767 BUG_ON(PIL_RESERVED(pil));
768
David S. Millere3999572006-02-13 18:16:10 -0800769 return sun4v_build_irq(devhandle, devino, pil, IBF_PCI);
David S. Millerbade5622006-02-09 22:05:54 -0800770}
771
David S. Millerbade5622006-02-09 22:05:54 -0800772static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
773{
774 struct pcidev_cookie *pcp = pdev->sysdata;
775 struct pci_pbm_info *pbm = pcp->pbm;
776 struct resource *res, *root;
777 u32 reg;
778 int where, size, is_64bit;
779
780 res = &pdev->resource[resource];
781 if (resource < 6) {
782 where = PCI_BASE_ADDRESS_0 + (resource * 4);
783 } else if (resource == PCI_ROM_RESOURCE) {
784 where = pdev->rom_base_reg;
785 } else {
786 /* Somebody might have asked allocation of a non-standard resource */
787 return;
788 }
789
David S. Millerc2609262006-02-12 22:18:52 -0800790 /* XXX 64-bit MEM handling is not %100 correct... XXX */
David S. Millerbade5622006-02-09 22:05:54 -0800791 is_64bit = 0;
792 if (res->flags & IORESOURCE_IO)
793 root = &pbm->io_space;
794 else {
795 root = &pbm->mem_space;
796 if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
797 == PCI_BASE_ADDRESS_MEM_TYPE_64)
798 is_64bit = 1;
799 }
800
801 size = res->end - res->start;
802 pci_read_config_dword(pdev, where, &reg);
803 reg = ((reg & size) |
804 (((u32)(res->start - root->start)) & ~size));
805 if (resource == PCI_ROM_RESOURCE) {
806 reg |= PCI_ROM_ADDRESS_ENABLE;
807 res->flags |= IORESOURCE_ROM_ENABLE;
808 }
809 pci_write_config_dword(pdev, where, reg);
810
811 /* This knows that the upper 32-bits of the address
812 * must be zero. Our PCI common layer enforces this.
813 */
814 if (is_64bit)
815 pci_write_config_dword(pdev, where + 4, 0);
816}
817
David S. Millerbade5622006-02-09 22:05:54 -0800818static void pci_sun4v_resource_adjust(struct pci_dev *pdev,
819 struct resource *res,
820 struct resource *root)
821{
822 res->start += root->start;
823 res->end += root->start;
824}
825
826/* Use ranges property to determine where PCI MEM, I/O, and Config
827 * space are for this PCI bus module.
828 */
829static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm)
830{
David S. Miller221b2fb2006-02-11 23:38:00 -0800831 int i, saw_mem, saw_io;
David S. Millerbade5622006-02-09 22:05:54 -0800832
David S. Miller221b2fb2006-02-11 23:38:00 -0800833 saw_mem = saw_io = 0;
David S. Millerbade5622006-02-09 22:05:54 -0800834 for (i = 0; i < pbm->num_pbm_ranges; i++) {
835 struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
836 unsigned long a;
837 int type;
838
839 type = (pr->child_phys_hi >> 24) & 0x3;
840 a = (((unsigned long)pr->parent_phys_hi << 32UL) |
841 ((unsigned long)pr->parent_phys_lo << 0UL));
842
843 switch (type) {
David S. Millerbade5622006-02-09 22:05:54 -0800844 case 1:
845 /* 16-bit IO space, 16MB */
846 pbm->io_space.start = a;
847 pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
848 pbm->io_space.flags = IORESOURCE_IO;
849 saw_io = 1;
850 break;
851
852 case 2:
853 /* 32-bit MEM space, 2GB */
854 pbm->mem_space.start = a;
855 pbm->mem_space.end = a + (0x80000000UL - 1UL);
856 pbm->mem_space.flags = IORESOURCE_MEM;
857 saw_mem = 1;
858 break;
859
David S. Millerc2609262006-02-12 22:18:52 -0800860 case 3:
861 /* XXX 64-bit MEM handling XXX */
862
David S. Millerbade5622006-02-09 22:05:54 -0800863 default:
864 break;
865 };
866 }
867
David S. Miller221b2fb2006-02-11 23:38:00 -0800868 if (!saw_io || !saw_mem) {
David S. Millerbade5622006-02-09 22:05:54 -0800869 prom_printf("%s: Fatal error, missing %s PBM range.\n",
870 pbm->name,
David S. Miller221b2fb2006-02-11 23:38:00 -0800871 (!saw_io ? "IO" : "MEM"));
David S. Millerbade5622006-02-09 22:05:54 -0800872 prom_halt();
873 }
874
David S. Miller221b2fb2006-02-11 23:38:00 -0800875 printk("%s: PCI IO[%lx] MEM[%lx]\n",
David S. Millerbade5622006-02-09 22:05:54 -0800876 pbm->name,
David S. Millerbade5622006-02-09 22:05:54 -0800877 pbm->io_space.start,
878 pbm->mem_space.start);
879}
880
881static void pbm_register_toplevel_resources(struct pci_controller_info *p,
882 struct pci_pbm_info *pbm)
883{
884 pbm->io_space.name = pbm->mem_space.name = pbm->name;
885
886 request_resource(&ioport_resource, &pbm->io_space);
887 request_resource(&iomem_resource, &pbm->mem_space);
888 pci_register_legacy_regions(&pbm->io_space,
889 &pbm->mem_space);
890}
891
David S. Millere7a04532006-02-15 22:25:27 -0800892static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
893 struct pci_iommu *iommu)
David S. Miller18397942006-02-10 00:08:26 -0800894{
895 struct pci_iommu_arena *arena = &iommu->arena;
David S. Millere7a04532006-02-15 22:25:27 -0800896 unsigned long i, cnt = 0;
David S. Miller7c8f4862006-02-13 21:50:27 -0800897 u32 devhandle;
David S. Miller18397942006-02-10 00:08:26 -0800898
899 devhandle = pbm->devhandle;
900 for (i = 0; i < arena->limit; i++) {
901 unsigned long ret, io_attrs, ra;
902
903 ret = pci_sun4v_iommu_getmap(devhandle,
904 HV_PCI_TSBID(0, i),
905 &io_attrs, &ra);
David S. Millere7a04532006-02-15 22:25:27 -0800906 if (ret == HV_EOK) {
907 cnt++;
David S. Miller18397942006-02-10 00:08:26 -0800908 __set_bit(i, arena->map);
David S. Millere7a04532006-02-15 22:25:27 -0800909 }
David S. Miller18397942006-02-10 00:08:26 -0800910 }
David S. Millere7a04532006-02-15 22:25:27 -0800911
912 return cnt;
David S. Miller18397942006-02-10 00:08:26 -0800913}
914
David S. Millerbade5622006-02-09 22:05:54 -0800915static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
916{
David S. Miller18397942006-02-10 00:08:26 -0800917 struct pci_iommu *iommu = pbm->iommu;
918 unsigned long num_tsb_entries, sz;
919 u32 vdma[2], dma_mask, dma_offset;
920 int err, tsbsize;
921
922 err = prom_getproperty(pbm->prom_node, "virtual-dma",
923 (char *)&vdma[0], sizeof(vdma));
924 if (err == 0 || err == -1) {
925 /* No property, use default values. */
926 vdma[0] = 0x80000000;
927 vdma[1] = 0x80000000;
928 }
929
930 dma_mask = vdma[0];
931 switch (vdma[1]) {
932 case 0x20000000:
933 dma_mask |= 0x1fffffff;
934 tsbsize = 64;
935 break;
936
937 case 0x40000000:
938 dma_mask |= 0x3fffffff;
939 tsbsize = 128;
940 break;
941
942 case 0x80000000:
943 dma_mask |= 0x7fffffff;
David S. Millere7a04532006-02-15 22:25:27 -0800944 tsbsize = 256;
David S. Miller18397942006-02-10 00:08:26 -0800945 break;
946
947 default:
948 prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
949 prom_halt();
950 };
951
David S. Millere7a04532006-02-15 22:25:27 -0800952 tsbsize *= (8 * 1024);
953
David S. Miller18397942006-02-10 00:08:26 -0800954 num_tsb_entries = tsbsize / sizeof(iopte_t);
955
956 dma_offset = vdma[0];
957
958 /* Setup initial software IOMMU state. */
959 spin_lock_init(&iommu->lock);
960 iommu->ctx_lowest_free = 1;
961 iommu->page_table_map_base = dma_offset;
962 iommu->dma_addr_mask = dma_mask;
963
964 /* Allocate and initialize the free area map. */
965 sz = num_tsb_entries / 8;
966 sz = (sz + 7UL) & ~7UL;
967 iommu->arena.map = kmalloc(sz, GFP_KERNEL);
968 if (!iommu->arena.map) {
969 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
970 prom_halt();
971 }
972 memset(iommu->arena.map, 0, sz);
973 iommu->arena.limit = num_tsb_entries;
974
David S. Millere7a04532006-02-15 22:25:27 -0800975 sz = probe_existing_entries(pbm, iommu);
976
977 printk("%s: TSB entries [%lu], existing mapings [%lu]\n",
978 pbm->name, num_tsb_entries, sz);
David S. Millerbade5622006-02-09 22:05:54 -0800979}
980
David S. Miller10804822006-02-13 18:09:44 -0800981static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm)
982{
983 unsigned int busrange[2];
984 int prom_node = pbm->prom_node;
985 int err;
986
David S. Miller10804822006-02-13 18:09:44 -0800987 err = prom_getproperty(prom_node, "bus-range",
988 (char *)&busrange[0],
989 sizeof(busrange));
990 if (err == 0 || err == -1) {
991 prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
992 prom_halt();
993 }
994
995 pbm->pci_first_busno = busrange[0];
996 pbm->pci_last_busno = busrange[1];
997
998}
999
David S. Miller7c8f4862006-02-13 21:50:27 -08001000static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle)
David S. Millerbade5622006-02-09 22:05:54 -08001001{
1002 struct pci_pbm_info *pbm;
David S. Miller38337892006-02-12 22:06:53 -08001003 int err, i;
David S. Millerbade5622006-02-09 22:05:54 -08001004
David S. Miller38337892006-02-12 22:06:53 -08001005 if (devhandle & 0x40)
1006 pbm = &p->pbm_B;
1007 else
1008 pbm = &p->pbm_A;
David S. Millerbade5622006-02-09 22:05:54 -08001009
1010 pbm->parent = p;
1011 pbm->prom_node = prom_node;
1012 pbm->pci_first_slot = 1;
1013
David S. Miller38337892006-02-12 22:06:53 -08001014 pbm->devhandle = devhandle;
David S. Millerbade5622006-02-09 22:05:54 -08001015
1016 sprintf(pbm->name, "SUN4V-PCI%d PBM%c",
1017 p->index, (pbm == &p->pbm_A ? 'A' : 'B'));
1018
David S. Miller987b6de2006-02-14 16:42:11 -08001019 printk("%s: devhandle[%x] prom_node[%x:%x]\n",
1020 pbm->name, pbm->devhandle,
1021 pbm->prom_node, prom_getchild(pbm->prom_node));
David S. Millerbade5622006-02-09 22:05:54 -08001022
1023 prom_getstring(prom_node, "name",
1024 pbm->prom_name, sizeof(pbm->prom_name));
1025
1026 err = prom_getproperty(prom_node, "ranges",
1027 (char *) pbm->pbm_ranges,
1028 sizeof(pbm->pbm_ranges));
1029 if (err == 0 || err == -1) {
1030 prom_printf("%s: Fatal error, no ranges property.\n",
1031 pbm->name);
1032 prom_halt();
1033 }
1034
1035 pbm->num_pbm_ranges =
1036 (err / sizeof(struct linux_prom_pci_ranges));
1037
David S. Miller38337892006-02-12 22:06:53 -08001038 /* Mask out the top 8 bits of the ranges, leaving the real
1039 * physical address.
1040 */
1041 for (i = 0; i < pbm->num_pbm_ranges; i++)
1042 pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff;
1043
David S. Millerbade5622006-02-09 22:05:54 -08001044 pci_sun4v_determine_mem_io_space(pbm);
1045 pbm_register_toplevel_resources(p, pbm);
1046
1047 err = prom_getproperty(prom_node, "interrupt-map",
1048 (char *)pbm->pbm_intmap,
1049 sizeof(pbm->pbm_intmap));
David S. Miller329c68b2006-02-14 22:20:41 -08001050 if (err == 0 || err == -1) {
1051 prom_printf("%s: Fatal error, no interrupt-map property.\n",
1052 pbm->name);
1053 prom_halt();
1054 }
1055
1056 pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
1057 err = prom_getproperty(prom_node, "interrupt-map-mask",
1058 (char *)&pbm->pbm_intmask,
1059 sizeof(pbm->pbm_intmask));
1060 if (err == 0 || err == -1) {
1061 prom_printf("%s: Fatal error, no interrupt-map-mask.\n",
1062 pbm->name);
1063 prom_halt();
David S. Millerbade5622006-02-09 22:05:54 -08001064 }
1065
David S. Miller10804822006-02-13 18:09:44 -08001066 pci_sun4v_get_bus_range(pbm);
David S. Millerbade5622006-02-09 22:05:54 -08001067 pci_sun4v_iommu_init(pbm);
1068}
1069
David S. Miller8f6a93a2006-02-09 21:32:07 -08001070void sun4v_pci_init(int node, char *model_name)
1071{
David S. Millerbade5622006-02-09 22:05:54 -08001072 struct pci_controller_info *p;
1073 struct pci_iommu *iommu;
David S. Miller38337892006-02-12 22:06:53 -08001074 struct linux_prom64_registers regs;
David S. Miller7c8f4862006-02-13 21:50:27 -08001075 u32 devhandle;
1076 int i;
David S. Miller38337892006-02-12 22:06:53 -08001077
1078 prom_getproperty(node, "reg", (char *)&regs, sizeof(regs));
David S. Millerd5eb4002006-02-13 20:41:11 -08001079 devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;
David S. Miller38337892006-02-12 22:06:53 -08001080
1081 for (p = pci_controller_root; p; p = p->next) {
1082 struct pci_pbm_info *pbm;
1083
1084 if (p->pbm_A.prom_node && p->pbm_B.prom_node)
1085 continue;
1086
1087 pbm = (p->pbm_A.prom_node ?
1088 &p->pbm_A :
1089 &p->pbm_B);
1090
David S. Miller0b522492006-02-12 22:29:36 -08001091 if (pbm->devhandle == (devhandle ^ 0x40)) {
David S. Miller38337892006-02-12 22:06:53 -08001092 pci_sun4v_pbm_init(p, node, devhandle);
David S. Miller0b522492006-02-12 22:29:36 -08001093 return;
1094 }
David S. Miller38337892006-02-12 22:06:53 -08001095 }
David S. Millerbade5622006-02-09 22:05:54 -08001096
David S. Miller6a32fd42006-02-19 22:21:32 -08001097 for_each_cpu(i) {
David S. Miller7c8f4862006-02-13 21:50:27 -08001098 unsigned long page = get_zeroed_page(GFP_ATOMIC);
1099
1100 if (!page)
1101 goto fatal_memory_error;
1102
David S. Miller6a32fd42006-02-19 22:21:32 -08001103 per_cpu(pci_iommu_batch, i).pglist = (u64 *) page;
David S. Millerbade5622006-02-09 22:05:54 -08001104 }
David S. Miller7c8f4862006-02-13 21:50:27 -08001105
1106 p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
1107 if (!p)
1108 goto fatal_memory_error;
1109
David S. Millerbade5622006-02-09 22:05:54 -08001110 memset(p, 0, sizeof(*p));
1111
1112 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
David S. Miller7c8f4862006-02-13 21:50:27 -08001113 if (!iommu)
1114 goto fatal_memory_error;
1115
David S. Millerbade5622006-02-09 22:05:54 -08001116 memset(iommu, 0, sizeof(*iommu));
1117 p->pbm_A.iommu = iommu;
1118
1119 iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
David S. Miller7c8f4862006-02-13 21:50:27 -08001120 if (!iommu)
1121 goto fatal_memory_error;
1122
David S. Millerbade5622006-02-09 22:05:54 -08001123 memset(iommu, 0, sizeof(*iommu));
1124 p->pbm_B.iommu = iommu;
1125
1126 p->next = pci_controller_root;
1127 pci_controller_root = p;
1128
1129 p->index = pci_num_controllers++;
1130 p->pbms_same_domain = 0;
1131
1132 p->scan_bus = pci_sun4v_scan_bus;
1133 p->irq_build = pci_sun4v_irq_build;
1134 p->base_address_update = pci_sun4v_base_address_update;
1135 p->resource_adjust = pci_sun4v_resource_adjust;
1136 p->pci_ops = &pci_sun4v_ops;
1137
1138 /* Like PSYCHO and SCHIZO we have a 2GB aligned area
1139 * for memory space.
1140 */
1141 pci_memspace_mask = 0x7fffffffUL;
1142
David S. Miller38337892006-02-12 22:06:53 -08001143 pci_sun4v_pbm_init(p, node, devhandle);
David S. Miller7c8f4862006-02-13 21:50:27 -08001144 return;
1145
1146fatal_memory_error:
1147 prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
1148 prom_halt();
David S. Miller8f6a93a2006-02-09 21:32:07 -08001149}