[PATCH] powerpc: IOMMU support for honoring dma_mask

Some devices don't support full 32-bit DMA address space, which we currently
assume. Add the required mask-passing to the IOMMU allocators.

Signed-off-by: Olof Johansson <olof@lixom.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d9a7fde..4eba60a 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -61,6 +61,7 @@
 static unsigned long iommu_range_alloc(struct iommu_table *tbl,
                                        unsigned long npages,
                                        unsigned long *handle,
+                                       unsigned long mask,
                                        unsigned int align_order)
 { 
 	unsigned long n, end, i, start;
@@ -97,9 +98,21 @@
 	 */
 	if (start >= limit)
 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
-	
+
  again:
 
+	if (limit + tbl->it_offset > mask) {
+		limit = mask - tbl->it_offset + 1;
+		/* If we're constrained on address range, first try
+		 * at the masked hint to avoid O(n) search complexity,
+		 * but on second pass, start at 0.
+		 */
+		if ((start & mask) >= limit || pass > 0)
+			start = 0;
+		else
+			start &= mask;
+	}
+
 	n = find_next_zero_bit(tbl->it_map, limit, start);
 
 	/* Align allocation */
@@ -150,14 +163,14 @@
 
 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
 		       unsigned int npages, enum dma_data_direction direction,
-		       unsigned int align_order)
+		       unsigned long mask, unsigned int align_order)
 {
 	unsigned long entry, flags;
 	dma_addr_t ret = DMA_ERROR_CODE;
-	
+
 	spin_lock_irqsave(&(tbl->it_lock), flags);
 
-	entry = iommu_range_alloc(tbl, npages, NULL, align_order);
+	entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
 
 	if (unlikely(entry == DMA_ERROR_CODE)) {
 		spin_unlock_irqrestore(&(tbl->it_lock), flags);
@@ -236,7 +249,7 @@
 
 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 		struct scatterlist *sglist, int nelems,
-		enum dma_data_direction direction)
+		unsigned long mask, enum dma_data_direction direction)
 {
 	dma_addr_t dma_next = 0, dma_addr;
 	unsigned long flags;
@@ -274,7 +287,7 @@
 		vaddr = (unsigned long)page_address(s->page) + s->offset;
 		npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
 		npages >>= PAGE_SHIFT;
-		entry = iommu_range_alloc(tbl, npages, &handle, 0);
+		entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
 
 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 
@@ -479,7 +492,8 @@
  * byte within the page as vaddr.
  */
 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
-		size_t size, enum dma_data_direction direction)
+		size_t size, unsigned long mask,
+		enum dma_data_direction direction)
 {
 	dma_addr_t dma_handle = DMA_ERROR_CODE;
 	unsigned long uaddr;
@@ -492,7 +506,8 @@
 	npages >>= PAGE_SHIFT;
 
 	if (tbl) {
-		dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
+		dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
+					 mask >> PAGE_SHIFT, 0);
 		if (dma_handle == DMA_ERROR_CODE) {
 			if (printk_ratelimit())  {
 				printk(KERN_INFO "iommu_alloc failed, "
@@ -521,7 +536,7 @@
  * to the dma address (mapping) of the first page.
  */
 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
-		dma_addr_t *dma_handle, gfp_t flag)
+		dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
 {
 	void *ret = NULL;
 	dma_addr_t mapping;
@@ -551,7 +566,8 @@
 	memset(ret, 0, size);
 
 	/* Set up tces to cover the allocated range */
-	mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
+	mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
+			      mask >> PAGE_SHIFT, order);
 	if (mapping == DMA_ERROR_CODE) {
 		free_pages((unsigned long)ret, order);
 		ret = NULL;