ARC: Add support for ioremap_prot API

Implement ioremap_prot() to allow mapping IO memory with variable
protection
via TLB.

Implementing this allows the /dev/mem driver to use its generic access()
VMA callback, which in turn allows ptrace to examine data in memory
mapped regions mapped via /dev/mem, such as Arc DCCM.

The end result is that it is possible to examine values of variables
placed into DCCM in user space programs via GDB.

CC: Alexey Brodkin <Alexey.Brodkin@synopsys.com>
CC: Noam Camus <noamc@ezchip.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 52518b6..3e5c92c 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -16,25 +16,49 @@
 
 void __iomem *ioremap(unsigned long paddr, unsigned long size)
 {
-	unsigned long vaddr;
-	struct vm_struct *area;
-	unsigned long off, end;
-	const pgprot_t prot = PAGE_KERNEL_NO_CACHE;
+	unsigned long end;
 
 	/* Don't allow wraparound or zero size */
 	end = paddr + size - 1;
 	if (!size || (end < paddr))
 		return NULL;
 
-	/* If the region is h/w uncached, nothing special needed */
+	/* If the region is h/w uncached, avoid MMU mappings */
 	if (paddr >= ARC_UNCACHED_ADDR_SPACE)
 		return (void __iomem *)paddr;
 
+	return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+			   unsigned long flags)
+{
+	void __iomem *vaddr;
+	struct vm_struct *area;
+	unsigned long off, end;
+	pgprot_t prot = __pgprot(flags);
+
+	/* Don't allow wraparound, zero size */
+	end = paddr + size - 1;
+	if ((!size) || (end < paddr))
+		return NULL;
+
 	/* An early platform driver might end up here */
 	if (!slab_is_available())
 		return NULL;
 
-	/* Mappings have to be page-aligned, page-sized */
+	/* force uncached */
+	prot = pgprot_noncached(prot);
+
+	/* Mappings have to be page-aligned */
 	off = paddr & ~PAGE_MASK;
 	paddr &= PAGE_MASK;
 	size = PAGE_ALIGN(end + 1) - paddr;
@@ -45,17 +69,17 @@
 	area = get_vm_area(size, VM_IOREMAP);
 	if (!area)
 		return NULL;
-
 	area->phys_addr = paddr;
-	vaddr = (unsigned long)area->addr;
-	if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
-		vfree(area->addr);
+	vaddr = (void __iomem *)area->addr;
+	if (ioremap_page_range((unsigned long)vaddr,
+			       (unsigned long)vaddr + size, paddr, prot)) {
+		vunmap((void __force *)vaddr);
 		return NULL;
 	}
-
 	return (void __iomem *)(off + (char __iomem *)vaddr);
 }
-EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_prot);
+
 
 void iounmap(const void __iomem *addr)
 {