| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/arm/mm/consistent.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2000-2004 Russell King | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  * | 
 | 10 |  *  DMA uncached mapping support. | 
 | 11 |  */ | 
 | 12 | #include <linux/module.h> | 
 | 13 | #include <linux/mm.h> | 
 | 14 | #include <linux/slab.h> | 
 | 15 | #include <linux/errno.h> | 
 | 16 | #include <linux/list.h> | 
 | 17 | #include <linux/init.h> | 
 | 18 | #include <linux/device.h> | 
 | 19 | #include <linux/dma-mapping.h> | 
 | 20 |  | 
 | 21 | #include <asm/cacheflush.h> | 
 | 22 | #include <asm/io.h> | 
 | 23 | #include <asm/tlbflush.h> | 
 | 24 |  | 
 | 25 | #define CONSISTENT_BASE	(0xffc00000) | 
 | 26 | #define CONSISTENT_END	(0xffe00000) | 
 | 27 | #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 
 | 28 |  | 
 | 29 | /* | 
 | 30 |  * This is the page table (2MB) covering uncached, DMA consistent allocations | 
 | 31 |  */ | 
 | 32 | static pte_t *consistent_pte; | 
 | 33 | static DEFINE_SPINLOCK(consistent_lock); | 
 | 34 |  | 
 | 35 | /* | 
 | 36 |  * VM region handling support. | 
 | 37 |  * | 
 | 38 |  * This should become something generic, handling VM region allocations for | 
 | 39 |  * vmalloc and similar (ioremap, module space, etc). | 
 | 40 |  * | 
 | 41 |  * I envisage vmalloc()'s supporting vm_struct becoming: | 
 | 42 |  * | 
 | 43 |  *  struct vm_struct { | 
 | 44 |  *    struct vm_region	region; | 
 | 45 |  *    unsigned long	flags; | 
 | 46 |  *    struct page	**pages; | 
 | 47 |  *    unsigned int	nr_pages; | 
 | 48 |  *    unsigned long	phys_addr; | 
 | 49 |  *  }; | 
 | 50 |  * | 
 | 51 |  * get_vm_area() would then call vm_region_alloc with an appropriate | 
 | 52 |  * struct vm_region head (eg): | 
 | 53 |  * | 
 | 54 |  *  struct vm_region vmalloc_head = { | 
 | 55 |  *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list), | 
 | 56 |  *	.vm_start	= VMALLOC_START, | 
 | 57 |  *	.vm_end		= VMALLOC_END, | 
 | 58 |  *  }; | 
 | 59 |  * | 
 | 60 |  * However, vmalloc_head.vm_start is variable (typically, it is dependent on | 
 | 61 |  * the amount of RAM found at boot time.)  I would imagine that get_vm_area() | 
 | 62 |  * would have to initialise this each time prior to calling vm_region_alloc(). | 
 | 63 |  */ | 
 | 64 | struct vm_region { | 
 | 65 | 	struct list_head	vm_list; | 
 | 66 | 	unsigned long		vm_start; | 
 | 67 | 	unsigned long		vm_end; | 
 | 68 | 	struct page		*vm_pages; | 
 | 69 | }; | 
 | 70 |  | 
 | 71 | static struct vm_region consistent_head = { | 
 | 72 | 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list), | 
 | 73 | 	.vm_start	= CONSISTENT_BASE, | 
 | 74 | 	.vm_end		= CONSISTENT_END, | 
 | 75 | }; | 
 | 76 |  | 
 | 77 | static struct vm_region * | 
| Russell King | f339ab3 | 2005-10-28 14:29:43 +0100 | [diff] [blame^] | 78 | vm_region_alloc(struct vm_region *head, size_t size, unsigned int gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { | 
 | 80 | 	unsigned long addr = head->vm_start, end = head->vm_end - size; | 
 | 81 | 	unsigned long flags; | 
 | 82 | 	struct vm_region *c, *new; | 
 | 83 |  | 
 | 84 | 	new = kmalloc(sizeof(struct vm_region), gfp); | 
 | 85 | 	if (!new) | 
 | 86 | 		goto out; | 
 | 87 |  | 
 | 88 | 	spin_lock_irqsave(&consistent_lock, flags); | 
 | 89 |  | 
 | 90 | 	list_for_each_entry(c, &head->vm_list, vm_list) { | 
 | 91 | 		if ((addr + size) < addr) | 
 | 92 | 			goto nospc; | 
 | 93 | 		if ((addr + size) <= c->vm_start) | 
 | 94 | 			goto found; | 
 | 95 | 		addr = c->vm_end; | 
 | 96 | 		if (addr > end) | 
 | 97 | 			goto nospc; | 
 | 98 | 	} | 
 | 99 |  | 
 | 100 |  found: | 
 | 101 | 	/* | 
 | 102 | 	 * Insert this entry _before_ the one we found. | 
 | 103 | 	 */ | 
 | 104 | 	list_add_tail(&new->vm_list, &c->vm_list); | 
 | 105 | 	new->vm_start = addr; | 
 | 106 | 	new->vm_end = addr + size; | 
 | 107 |  | 
 | 108 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 109 | 	return new; | 
 | 110 |  | 
 | 111 |  nospc: | 
 | 112 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 113 | 	kfree(new); | 
 | 114 |  out: | 
 | 115 | 	return NULL; | 
 | 116 | } | 
 | 117 |  | 
 | 118 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | 
 | 119 | { | 
 | 120 | 	struct vm_region *c; | 
 | 121 | 	 | 
 | 122 | 	list_for_each_entry(c, &head->vm_list, vm_list) { | 
 | 123 | 		if (c->vm_start == addr) | 
 | 124 | 			goto out; | 
 | 125 | 	} | 
 | 126 | 	c = NULL; | 
 | 127 |  out: | 
 | 128 | 	return c; | 
 | 129 | } | 
 | 130 |  | 
 | 131 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 132 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 
 | 133 | #endif | 
 | 134 |  | 
 | 135 | static void * | 
| Russell King | f339ab3 | 2005-10-28 14:29:43 +0100 | [diff] [blame^] | 136 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 
 | 137 | 	    unsigned int gfp, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { | 
 | 139 | 	struct page *page; | 
 | 140 | 	struct vm_region *c; | 
 | 141 | 	unsigned long order; | 
 | 142 | 	u64 mask = ISA_DMA_THRESHOLD, limit; | 
 | 143 |  | 
 | 144 | 	if (!consistent_pte) { | 
 | 145 | 		printk(KERN_ERR "%s: not initialised\n", __func__); | 
 | 146 | 		dump_stack(); | 
 | 147 | 		return NULL; | 
 | 148 | 	} | 
 | 149 |  | 
 | 150 | 	if (dev) { | 
 | 151 | 		mask = dev->coherent_dma_mask; | 
 | 152 |  | 
 | 153 | 		/* | 
 | 154 | 		 * Sanity check the DMA mask - it must be non-zero, and | 
 | 155 | 		 * must be able to be satisfied by a DMA allocation. | 
 | 156 | 		 */ | 
 | 157 | 		if (mask == 0) { | 
 | 158 | 			dev_warn(dev, "coherent DMA mask is unset\n"); | 
 | 159 | 			goto no_page; | 
 | 160 | 		} | 
 | 161 |  | 
 | 162 | 		if ((~mask) & ISA_DMA_THRESHOLD) { | 
 | 163 | 			dev_warn(dev, "coherent DMA mask %#llx is smaller " | 
 | 164 | 				 "than system GFP_DMA mask %#llx\n", | 
 | 165 | 				 mask, (unsigned long long)ISA_DMA_THRESHOLD); | 
 | 166 | 			goto no_page; | 
 | 167 | 		} | 
 | 168 | 	} | 
 | 169 |  | 
 | 170 | 	/* | 
 | 171 | 	 * Sanity check the allocation size. | 
 | 172 | 	 */ | 
 | 173 | 	size = PAGE_ALIGN(size); | 
 | 174 | 	limit = (mask + 1) & ~mask; | 
 | 175 | 	if ((limit && size >= limit) || | 
 | 176 | 	    size >= (CONSISTENT_END - CONSISTENT_BASE)) { | 
 | 177 | 		printk(KERN_WARNING "coherent allocation too big " | 
 | 178 | 		       "(requested %#x mask %#llx)\n", size, mask); | 
 | 179 | 		goto no_page; | 
 | 180 | 	} | 
 | 181 |  | 
 | 182 | 	order = get_order(size); | 
 | 183 |  | 
 | 184 | 	if (mask != 0xffffffff) | 
 | 185 | 		gfp |= GFP_DMA; | 
 | 186 |  | 
 | 187 | 	page = alloc_pages(gfp, order); | 
 | 188 | 	if (!page) | 
 | 189 | 		goto no_page; | 
 | 190 |  | 
 | 191 | 	/* | 
 | 192 | 	 * Invalidate any data that might be lurking in the | 
 | 193 | 	 * kernel direct-mapped region for device DMA. | 
 | 194 | 	 */ | 
 | 195 | 	{ | 
 | 196 | 		unsigned long kaddr = (unsigned long)page_address(page); | 
 | 197 | 		memset(page_address(page), 0, size); | 
 | 198 | 		dmac_flush_range(kaddr, kaddr + size); | 
 | 199 | 	} | 
 | 200 |  | 
 | 201 | 	/* | 
 | 202 | 	 * Allocate a virtual address in the consistent mapping region. | 
 | 203 | 	 */ | 
 | 204 | 	c = vm_region_alloc(&consistent_head, size, | 
 | 205 | 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 
 | 206 | 	if (c) { | 
 | 207 | 		pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | 
 | 208 | 		struct page *end = page + (1 << order); | 
 | 209 |  | 
 | 210 | 		c->vm_pages = page; | 
 | 211 |  | 
 | 212 | 		/* | 
 | 213 | 		 * Set the "dma handle" | 
 | 214 | 		 */ | 
 | 215 | 		*handle = page_to_dma(dev, page); | 
 | 216 |  | 
 | 217 | 		do { | 
 | 218 | 			BUG_ON(!pte_none(*pte)); | 
 | 219 |  | 
 | 220 | 			set_page_count(page, 1); | 
 | 221 | 			/* | 
 | 222 | 			 * x86 does not mark the pages reserved... | 
 | 223 | 			 */ | 
 | 224 | 			SetPageReserved(page); | 
 | 225 | 			set_pte(pte, mk_pte(page, prot)); | 
 | 226 | 			page++; | 
 | 227 | 			pte++; | 
 | 228 | 		} while (size -= PAGE_SIZE); | 
 | 229 |  | 
 | 230 | 		/* | 
 | 231 | 		 * Free the otherwise unused pages. | 
 | 232 | 		 */ | 
 | 233 | 		while (page < end) { | 
 | 234 | 			set_page_count(page, 1); | 
 | 235 | 			__free_page(page); | 
 | 236 | 			page++; | 
 | 237 | 		} | 
 | 238 |  | 
 | 239 | 		return (void *)c->vm_start; | 
 | 240 | 	} | 
 | 241 |  | 
 | 242 | 	if (page) | 
 | 243 | 		__free_pages(page, order); | 
 | 244 |  no_page: | 
 | 245 | 	*handle = ~0; | 
 | 246 | 	return NULL; | 
 | 247 | } | 
 | 248 |  | 
 | 249 | /* | 
 | 250 |  * Allocate DMA-coherent memory space and return both the kernel remapped | 
 | 251 |  * virtual and bus address for that space. | 
 | 252 |  */ | 
 | 253 | void * | 
 | 254 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp) | 
 | 255 | { | 
 | 256 | 	return __dma_alloc(dev, size, handle, gfp, | 
 | 257 | 			   pgprot_noncached(pgprot_kernel)); | 
 | 258 | } | 
 | 259 | EXPORT_SYMBOL(dma_alloc_coherent); | 
 | 260 |  | 
 | 261 | /* | 
 | 262 |  * Allocate a writecombining region, in much the same way as | 
 | 263 |  * dma_alloc_coherent above. | 
 | 264 |  */ | 
 | 265 | void * | 
 | 266 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp) | 
 | 267 | { | 
 | 268 | 	return __dma_alloc(dev, size, handle, gfp, | 
 | 269 | 			   pgprot_writecombine(pgprot_kernel)); | 
 | 270 | } | 
 | 271 | EXPORT_SYMBOL(dma_alloc_writecombine); | 
 | 272 |  | 
 | 273 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | 
 | 274 | 		    void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
 | 275 | { | 
 | 276 | 	unsigned long flags, user_size, kern_size; | 
 | 277 | 	struct vm_region *c; | 
 | 278 | 	int ret = -ENXIO; | 
 | 279 |  | 
 | 280 | 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 
 | 281 |  | 
 | 282 | 	spin_lock_irqsave(&consistent_lock, flags); | 
 | 283 | 	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 
 | 284 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 285 |  | 
 | 286 | 	if (c) { | 
 | 287 | 		unsigned long off = vma->vm_pgoff; | 
 | 288 |  | 
 | 289 | 		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; | 
 | 290 |  | 
 | 291 | 		if (off < kern_size && | 
 | 292 | 		    user_size <= (kern_size - off)) { | 
 | 293 | 			vma->vm_flags |= VM_RESERVED; | 
 | 294 | 			ret = remap_pfn_range(vma, vma->vm_start, | 
 | 295 | 					      page_to_pfn(c->vm_pages) + off, | 
 | 296 | 					      user_size << PAGE_SHIFT, | 
 | 297 | 					      vma->vm_page_prot); | 
 | 298 | 		} | 
 | 299 | 	} | 
 | 300 |  | 
 | 301 | 	return ret; | 
 | 302 | } | 
 | 303 |  | 
 | 304 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 
 | 305 | 		      void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
 | 306 | { | 
 | 307 | 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 
 | 308 | 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 
 | 309 | } | 
 | 310 | EXPORT_SYMBOL(dma_mmap_coherent); | 
 | 311 |  | 
 | 312 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | 
 | 313 | 			  void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
 | 314 | { | 
 | 315 | 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 
 | 316 | 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 
 | 317 | } | 
 | 318 | EXPORT_SYMBOL(dma_mmap_writecombine); | 
 | 319 |  | 
 | 320 | /* | 
 | 321 |  * free a page as defined by the above mapping. | 
 | 322 |  */ | 
 | 323 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 
 | 324 | { | 
 | 325 | 	struct vm_region *c; | 
 | 326 | 	unsigned long flags, addr; | 
 | 327 | 	pte_t *ptep; | 
 | 328 |  | 
 | 329 | 	size = PAGE_ALIGN(size); | 
 | 330 |  | 
 | 331 | 	spin_lock_irqsave(&consistent_lock, flags); | 
 | 332 |  | 
 | 333 | 	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 
 | 334 | 	if (!c) | 
 | 335 | 		goto no_area; | 
 | 336 |  | 
 | 337 | 	if ((c->vm_end - c->vm_start) != size) { | 
 | 338 | 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | 
 | 339 | 		       __func__, c->vm_end - c->vm_start, size); | 
 | 340 | 		dump_stack(); | 
 | 341 | 		size = c->vm_end - c->vm_start; | 
 | 342 | 	} | 
 | 343 |  | 
 | 344 | 	ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | 
 | 345 | 	addr = c->vm_start; | 
 | 346 | 	do { | 
 | 347 | 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | 
 | 348 | 		unsigned long pfn; | 
 | 349 |  | 
 | 350 | 		ptep++; | 
 | 351 | 		addr += PAGE_SIZE; | 
 | 352 |  | 
 | 353 | 		if (!pte_none(pte) && pte_present(pte)) { | 
 | 354 | 			pfn = pte_pfn(pte); | 
 | 355 |  | 
 | 356 | 			if (pfn_valid(pfn)) { | 
 | 357 | 				struct page *page = pfn_to_page(pfn); | 
 | 358 |  | 
 | 359 | 				/* | 
 | 360 | 				 * x86 does not mark the pages reserved... | 
 | 361 | 				 */ | 
 | 362 | 				ClearPageReserved(page); | 
 | 363 |  | 
 | 364 | 				__free_page(page); | 
 | 365 | 				continue; | 
 | 366 | 			} | 
 | 367 | 		} | 
 | 368 |  | 
 | 369 | 		printk(KERN_CRIT "%s: bad page in kernel page table\n", | 
 | 370 | 		       __func__); | 
 | 371 | 	} while (size -= PAGE_SIZE); | 
 | 372 |  | 
 | 373 | 	flush_tlb_kernel_range(c->vm_start, c->vm_end); | 
 | 374 |  | 
 | 375 | 	list_del(&c->vm_list); | 
 | 376 |  | 
 | 377 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 378 |  | 
 | 379 | 	kfree(c); | 
 | 380 | 	return; | 
 | 381 |  | 
 | 382 |  no_area: | 
 | 383 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 384 | 	printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | 
 | 385 | 	       __func__, cpu_addr); | 
 | 386 | 	dump_stack(); | 
 | 387 | } | 
 | 388 | EXPORT_SYMBOL(dma_free_coherent); | 
 | 389 |  | 
 | 390 | /* | 
 | 391 |  * Initialise the consistent memory allocation. | 
 | 392 |  */ | 
 | 393 | static int __init consistent_init(void) | 
 | 394 | { | 
 | 395 | 	pgd_t *pgd; | 
 | 396 | 	pmd_t *pmd; | 
 | 397 | 	pte_t *pte; | 
 | 398 | 	int ret = 0; | 
 | 399 |  | 
 | 400 | 	spin_lock(&init_mm.page_table_lock); | 
 | 401 |  | 
 | 402 | 	do { | 
 | 403 | 		pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | 
 | 404 | 		pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); | 
 | 405 | 		if (!pmd) { | 
 | 406 | 			printk(KERN_ERR "%s: no pmd tables\n", __func__); | 
 | 407 | 			ret = -ENOMEM; | 
 | 408 | 			break; | 
 | 409 | 		} | 
 | 410 | 		WARN_ON(!pmd_none(*pmd)); | 
 | 411 |  | 
 | 412 | 		pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); | 
 | 413 | 		if (!pte) { | 
 | 414 | 			printk(KERN_ERR "%s: no pte tables\n", __func__); | 
 | 415 | 			ret = -ENOMEM; | 
 | 416 | 			break; | 
 | 417 | 		} | 
 | 418 |  | 
 | 419 | 		consistent_pte = pte; | 
 | 420 | 	} while (0); | 
 | 421 |  | 
 | 422 | 	spin_unlock(&init_mm.page_table_lock); | 
 | 423 |  | 
 | 424 | 	return ret; | 
 | 425 | } | 
 | 426 |  | 
 | 427 | core_initcall(consistent_init); | 
 | 428 |  | 
 | 429 | /* | 
 | 430 |  * Make an area consistent for devices. | 
 | 431 |  */ | 
 | 432 | void consistent_sync(void *vaddr, size_t size, int direction) | 
 | 433 | { | 
 | 434 | 	unsigned long start = (unsigned long)vaddr; | 
 | 435 | 	unsigned long end   = start + size; | 
 | 436 |  | 
 | 437 | 	switch (direction) { | 
 | 438 | 	case DMA_FROM_DEVICE:		/* invalidate only */ | 
 | 439 | 		dmac_inv_range(start, end); | 
 | 440 | 		break; | 
 | 441 | 	case DMA_TO_DEVICE:		/* writeback only */ | 
 | 442 | 		dmac_clean_range(start, end); | 
 | 443 | 		break; | 
 | 444 | 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */ | 
 | 445 | 		dmac_flush_range(start, end); | 
 | 446 | 		break; | 
 | 447 | 	default: | 
 | 448 | 		BUG(); | 
 | 449 | 	} | 
 | 450 | } | 
 | 451 | EXPORT_SYMBOL(consistent_sync); |