arm: dma: Allow CMA pages to not have a kernel mapping.
Currently, there are use cases where not having any kernel
mapping is required; if the CMA memory needs to be used as
a pool which can have both cached and uncached mappings we
need to remove the mapping to avoid the multiple mapping
problem. Extend the dma APIs to use the DMA_ATTR_NO_KERNEL_MAPPING
with CMA. This doesn't end up saving any virtual address space
but the mapping will still not be present.
Change-Id: I64d21250abbe615c43e2b5b1272ee2b6d106705a
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ab1bd68..afaa39d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -314,7 +314,8 @@
core_initcall(consistent_init);
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
+ pgprot_t prot, struct page **ret_page,
+ bool no_kernel_mapping);
static struct arm_vmregion_head coherent_head = {
.vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
@@ -343,7 +344,7 @@
if (!IS_ENABLED(CONFIG_CMA))
return 0;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ ptr = __alloc_from_contiguous(NULL, size, prot, &page, false);
if (ptr) {
coherent_head.vm_start = (unsigned long) ptr;
coherent_head.vm_end = (unsigned long) ptr + size;
@@ -522,12 +523,27 @@
return 0;
}
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ pte_clear(&init_mm, addr, pte);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+ bool no_kernel_map)
{
unsigned long start = (unsigned long) page_address(page);
unsigned end = start + size;
+ int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
- apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ if (no_kernel_map)
+ func = __dma_clear_pte;
+ else
+ func = __dma_update_pte;
+
+ apply_to_page_range(&init_mm, start, size, func, &prot);
dsb();
flush_tlb_kernel_range(start, end);
}
@@ -604,7 +620,8 @@
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page)
+ pgprot_t prot, struct page **ret_page,
+ bool no_kernel_mapping)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
@@ -615,7 +632,7 @@
return NULL;
__dma_clear_buffer(page, size);
- __dma_remap(page, size, prot);
+ __dma_remap(page, size, prot, no_kernel_mapping);
*ret_page = page;
return page_address(page);
@@ -624,7 +641,7 @@
static void __free_from_contiguous(struct device *dev, struct page *page,
size_t size)
{
- __dma_remap(page, size, pgprot_kernel);
+ __dma_remap(page, size, pgprot_kernel, false);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
@@ -649,7 +666,7 @@
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(dev, size, ret_page, c) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, w) NULL
#define __free_from_pool(cpu_addr, size) 0
#define __free_from_contiguous(dev, page, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
@@ -672,7 +689,8 @@
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, pgprot_t prot, const void *caller)
+ gfp_t gfp, pgprot_t prot, const void *caller,
+ bool no_kernel_mapping)
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page;
@@ -712,7 +730,8 @@
else if (gfp & GFP_ATOMIC)
addr = __alloc_from_pool(dev, size, &page, caller);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page,
+ no_kernel_mapping);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -729,12 +748,14 @@
{
pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
+ bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+ attrs);
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
return __dma_alloc(dev, size, handle, gfp, prot,
- __builtin_return_address(0));
+ __builtin_return_address(0), no_kernel_mapping);
}
/*