powerpc: Use the newly added get_required_mask dma_map_ops hook

Now that the generic code has dma_map_ops set, instead of having a
messy ifdef & if block in the base dma_get_required_mask hook push
the computation into the dma ops.

If the ops fails to set the get_required_mask hook default to the
width of dma_addr_t.

This also corrects ibmbus ibmebus_dma_supported to require a 64
bit mask.  I doubt anything is checking or setting the dma mask on
that bus.

Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 503093e..10b136a 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -96,6 +96,18 @@
 #endif
 }
 
+static u64 dma_direct_get_required_mask(struct device *dev)
+{
+	u64 end, mask;
+
+	end = memblock_end_of_DRAM() + get_dma_offset(dev);
+
+	mask = 1ULL << (fls64(end) - 1);
+	mask += mask - 1;
+
+	return mask;
+}
+
 static inline dma_addr_t dma_direct_map_page(struct device *dev,
 					     struct page *page,
 					     unsigned long offset,
@@ -144,6 +156,7 @@
 	.dma_supported	= dma_direct_dma_supported,
 	.map_page	= dma_direct_map_page,
 	.unmap_page	= dma_direct_unmap_page,
+	.get_required_mask	= dma_direct_get_required_mask,
 #ifdef CONFIG_NOT_COHERENT_CACHE
 	.sync_single_for_cpu 		= dma_direct_sync_single,
 	.sync_single_for_device 	= dma_direct_sync_single,
@@ -173,7 +186,6 @@
 u64 dma_get_required_mask(struct device *dev)
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
-	u64 mask, end = 0;
 
 	if (ppc_md.dma_get_required_mask)
 		return ppc_md.dma_get_required_mask(dev);
@@ -181,31 +193,10 @@
 	if (unlikely(dma_ops == NULL))
 		return 0;
 
-#ifdef CONFIG_PPC64
-	else if (dma_ops == &dma_iommu_ops)
-		return dma_iommu_get_required_mask(dev);
-#endif
-#ifdef CONFIG_SWIOTLB
-	else if (dma_ops == &swiotlb_dma_ops) {
-		u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
+	if (dma_ops->get_required_mask)
+		return dma_ops->get_required_mask(dev);
 
-		end = memblock_end_of_DRAM();
-		if (max_direct_dma_addr && end > max_direct_dma_addr)
-			end = max_direct_dma_addr;
-		end += get_dma_offset(dev);
-	}
-#endif
-	else if (dma_ops == &dma_direct_ops)
-		end = memblock_end_of_DRAM() + get_dma_offset(dev);
-	else {
-		WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
-		end = memblock_end_of_DRAM();
-	}
-
-	mask = 1ULL << (fls64(end) - 1);
-	mask += mask - 1;
-
-	return mask;
+	return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
 }
 EXPORT_SYMBOL_GPL(dma_get_required_mask);