| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  PowerPC version derived from arch/arm/mm/consistent.c | 
 | 3 |  *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | 
 | 4 |  * | 
 | 5 |  *  Copyright (C) 2000 Russell King | 
 | 6 |  * | 
 | 7 |  * Consistent memory allocators.  Used for DMA devices that want to | 
 | 8 |  * share uncached memory with the processor core.  The function return | 
 | 9 |  * is the virtual address and 'dma_handle' is the physical address. | 
 | 10 |  * Mostly stolen from the ARM port, with some changes for PowerPC. | 
 | 11 |  *						-- Dan | 
 | 12 |  * | 
 | 13 |  * Reorganized to get rid of the arch-specific consistent_* functions | 
 | 14 |  * and provide non-coherent implementations for the DMA API. -Matt | 
 | 15 |  * | 
 | 16 |  * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() | 
 | 17 |  * implementation. This is pulled straight from ARM and barely | 
 | 18 |  * modified. -Matt | 
 | 19 |  * | 
 | 20 |  * This program is free software; you can redistribute it and/or modify | 
 | 21 |  * it under the terms of the GNU General Public License version 2 as | 
 | 22 |  * published by the Free Software Foundation. | 
 | 23 |  */ | 
 | 24 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/sched.h> | 
 | 26 | #include <linux/kernel.h> | 
 | 27 | #include <linux/errno.h> | 
 | 28 | #include <linux/string.h> | 
 | 29 | #include <linux/types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/highmem.h> | 
 | 31 | #include <linux/dma-mapping.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
 | 33 | #include <asm/tlbflush.h> | 
 | 34 |  | 
 | 35 | /* | 
 | 36 |  * This address range defaults to a value that is safe for all | 
 | 37 |  * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It | 
 | 38 |  * can be further configured for specific applications under | 
 | 39 |  * the "Advanced Setup" menu. -Matt | 
 | 40 |  */ | 
 | 41 | #define CONSISTENT_BASE	(CONFIG_CONSISTENT_START) | 
 | 42 | #define CONSISTENT_END	(CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) | 
 | 43 | #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 
 | 44 |  | 
 | 45 | /* | 
 | 46 |  * This is the page table (2MB) covering uncached, DMA consistent allocations | 
 | 47 |  */ | 
 | 48 | static pte_t *consistent_pte; | 
 | 49 | static DEFINE_SPINLOCK(consistent_lock); | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  * VM region handling support. | 
 | 53 |  * | 
 | 54 |  * This should become something generic, handling VM region allocations for | 
 | 55 |  * vmalloc and similar (ioremap, module space, etc). | 
 | 56 |  * | 
 | 57 |  * I envisage vmalloc()'s supporting vm_struct becoming: | 
 | 58 |  * | 
 | 59 |  *  struct vm_struct { | 
 | 60 |  *    struct vm_region	region; | 
 | 61 |  *    unsigned long	flags; | 
 | 62 |  *    struct page	**pages; | 
 | 63 |  *    unsigned int	nr_pages; | 
 | 64 |  *    unsigned long	phys_addr; | 
 | 65 |  *  }; | 
 | 66 |  * | 
 | 67 |  * get_vm_area() would then call vm_region_alloc with an appropriate | 
 | 68 |  * struct vm_region head (eg): | 
 | 69 |  * | 
 | 70 |  *  struct vm_region vmalloc_head = { | 
 | 71 |  *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list), | 
 | 72 |  *	.vm_start	= VMALLOC_START, | 
 | 73 |  *	.vm_end		= VMALLOC_END, | 
 | 74 |  *  }; | 
 | 75 |  * | 
 | 76 |  * However, vmalloc_head.vm_start is variable (typically, it is dependent on | 
 | 77 |  * the amount of RAM found at boot time.)  I would imagine that get_vm_area() | 
 | 78 |  * would have to initialise this each time prior to calling vm_region_alloc(). | 
 | 79 |  */ | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 80 | struct ppc_vm_region { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | 	struct list_head	vm_list; | 
 | 82 | 	unsigned long		vm_start; | 
 | 83 | 	unsigned long		vm_end; | 
 | 84 | }; | 
 | 85 |  | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 86 | static struct ppc_vm_region consistent_head = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list), | 
 | 88 | 	.vm_start	= CONSISTENT_BASE, | 
 | 89 | 	.vm_end		= CONSISTENT_END, | 
 | 90 | }; | 
 | 91 |  | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 92 | static struct ppc_vm_region * | 
 | 93 | ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | { | 
 | 95 | 	unsigned long addr = head->vm_start, end = head->vm_end - size; | 
 | 96 | 	unsigned long flags; | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 97 | 	struct ppc_vm_region *c, *new; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 |  | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 99 | 	new = kmalloc(sizeof(struct ppc_vm_region), gfp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | 	if (!new) | 
 | 101 | 		goto out; | 
 | 102 |  | 
 | 103 | 	spin_lock_irqsave(&consistent_lock, flags); | 
 | 104 |  | 
 | 105 | 	list_for_each_entry(c, &head->vm_list, vm_list) { | 
 | 106 | 		if ((addr + size) < addr) | 
 | 107 | 			goto nospc; | 
 | 108 | 		if ((addr + size) <= c->vm_start) | 
 | 109 | 			goto found; | 
 | 110 | 		addr = c->vm_end; | 
 | 111 | 		if (addr > end) | 
 | 112 | 			goto nospc; | 
 | 113 | 	} | 
 | 114 |  | 
 | 115 |  found: | 
 | 116 | 	/* | 
 | 117 | 	 * Insert this entry _before_ the one we found. | 
 | 118 | 	 */ | 
 | 119 | 	list_add_tail(&new->vm_list, &c->vm_list); | 
 | 120 | 	new->vm_start = addr; | 
 | 121 | 	new->vm_end = addr + size; | 
 | 122 |  | 
 | 123 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 124 | 	return new; | 
 | 125 |  | 
 | 126 |  nospc: | 
 | 127 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 128 | 	kfree(new); | 
 | 129 |  out: | 
 | 130 | 	return NULL; | 
 | 131 | } | 
 | 132 |  | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 133 | static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 135 | 	struct ppc_vm_region *c; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
 | 137 | 	list_for_each_entry(c, &head->vm_list, vm_list) { | 
 | 138 | 		if (c->vm_start == addr) | 
 | 139 | 			goto out; | 
 | 140 | 	} | 
 | 141 | 	c = NULL; | 
 | 142 |  out: | 
 | 143 | 	return c; | 
 | 144 | } | 
 | 145 |  | 
 | 146 | /* | 
 | 147 |  * Allocate DMA-coherent memory space and return both the kernel remapped | 
 | 148 |  * virtual and bus address for that space. | 
 | 149 |  */ | 
 | 150 | void * | 
| Al Viro | e82dd4d | 2005-10-21 03:21:33 -0400 | [diff] [blame] | 151 | __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | { | 
 | 153 | 	struct page *page; | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 154 | 	struct ppc_vm_region *c; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | 	unsigned long order; | 
 | 156 | 	u64 mask = 0x00ffffff, limit; /* ISA default */ | 
 | 157 |  | 
 | 158 | 	if (!consistent_pte) { | 
 | 159 | 		printk(KERN_ERR "%s: not initialised\n", __func__); | 
 | 160 | 		dump_stack(); | 
 | 161 | 		return NULL; | 
 | 162 | 	} | 
 | 163 |  | 
 | 164 | 	size = PAGE_ALIGN(size); | 
 | 165 | 	limit = (mask + 1) & ~mask; | 
 | 166 | 	if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { | 
 | 167 | 		printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", | 
 | 168 | 		       size, mask); | 
 | 169 | 		return NULL; | 
 | 170 | 	} | 
 | 171 |  | 
 | 172 | 	order = get_order(size); | 
 | 173 |  | 
 | 174 | 	if (mask != 0xffffffff) | 
 | 175 | 		gfp |= GFP_DMA; | 
 | 176 |  | 
 | 177 | 	page = alloc_pages(gfp, order); | 
 | 178 | 	if (!page) | 
 | 179 | 		goto no_page; | 
 | 180 |  | 
 | 181 | 	/* | 
 | 182 | 	 * Invalidate any data that might be lurking in the | 
 | 183 | 	 * kernel direct-mapped region for device DMA. | 
 | 184 | 	 */ | 
 | 185 | 	{ | 
 | 186 | 		unsigned long kaddr = (unsigned long)page_address(page); | 
 | 187 | 		memset(page_address(page), 0, size); | 
 | 188 | 		flush_dcache_range(kaddr, kaddr + size); | 
 | 189 | 	} | 
 | 190 |  | 
 | 191 | 	/* | 
 | 192 | 	 * Allocate a virtual address in the consistent mapping region. | 
 | 193 | 	 */ | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 194 | 	c = ppc_vm_region_alloc(&consistent_head, size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 
 | 196 | 	if (c) { | 
 | 197 | 		unsigned long vaddr = c->vm_start; | 
 | 198 | 		pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr); | 
 | 199 | 		struct page *end = page + (1 << order); | 
 | 200 |  | 
| Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 201 | 		split_page(page, order); | 
 | 202 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | 		/* | 
 | 204 | 		 * Set the "dma handle" | 
 | 205 | 		 */ | 
| Benjamin Herrenschmidt | 8aa2659 | 2008-10-09 17:06:24 +0000 | [diff] [blame] | 206 | 		*handle = page_to_phys(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 |  | 
 | 208 | 		do { | 
 | 209 | 			BUG_ON(!pte_none(*pte)); | 
 | 210 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | 			SetPageReserved(page); | 
 | 212 | 			set_pte_at(&init_mm, vaddr, | 
 | 213 | 				   pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); | 
 | 214 | 			page++; | 
 | 215 | 			pte++; | 
 | 216 | 			vaddr += PAGE_SIZE; | 
 | 217 | 		} while (size -= PAGE_SIZE); | 
 | 218 |  | 
 | 219 | 		/* | 
 | 220 | 		 * Free the otherwise unused pages. | 
 | 221 | 		 */ | 
 | 222 | 		while (page < end) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | 			__free_page(page); | 
 | 224 | 			page++; | 
 | 225 | 		} | 
 | 226 |  | 
 | 227 | 		return (void *)c->vm_start; | 
 | 228 | 	} | 
 | 229 |  | 
 | 230 | 	if (page) | 
 | 231 | 		__free_pages(page, order); | 
 | 232 |  no_page: | 
 | 233 | 	return NULL; | 
 | 234 | } | 
 | 235 | EXPORT_SYMBOL(__dma_alloc_coherent); | 
 | 236 |  | 
 | 237 | /* | 
 | 238 |  * free a page as defined by the above mapping. | 
 | 239 |  */ | 
 | 240 | void __dma_free_coherent(size_t size, void *vaddr) | 
 | 241 | { | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 242 | 	struct ppc_vm_region *c; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | 	unsigned long flags, addr; | 
 | 244 | 	pte_t *ptep; | 
 | 245 |  | 
 | 246 | 	size = PAGE_ALIGN(size); | 
 | 247 |  | 
 | 248 | 	spin_lock_irqsave(&consistent_lock, flags); | 
 | 249 |  | 
| David Howells | 8168b54 | 2008-12-11 02:53:54 +0000 | [diff] [blame] | 250 | 	c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | 	if (!c) | 
 | 252 | 		goto no_area; | 
 | 253 |  | 
 | 254 | 	if ((c->vm_end - c->vm_start) != size) { | 
 | 255 | 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | 
 | 256 | 		       __func__, c->vm_end - c->vm_start, size); | 
 | 257 | 		dump_stack(); | 
 | 258 | 		size = c->vm_end - c->vm_start; | 
 | 259 | 	} | 
 | 260 |  | 
 | 261 | 	ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); | 
 | 262 | 	addr = c->vm_start; | 
 | 263 | 	do { | 
 | 264 | 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | 
 | 265 | 		unsigned long pfn; | 
 | 266 |  | 
 | 267 | 		ptep++; | 
 | 268 | 		addr += PAGE_SIZE; | 
 | 269 |  | 
 | 270 | 		if (!pte_none(pte) && pte_present(pte)) { | 
 | 271 | 			pfn = pte_pfn(pte); | 
 | 272 |  | 
 | 273 | 			if (pfn_valid(pfn)) { | 
 | 274 | 				struct page *page = pfn_to_page(pfn); | 
 | 275 | 				ClearPageReserved(page); | 
 | 276 |  | 
 | 277 | 				__free_page(page); | 
 | 278 | 				continue; | 
 | 279 | 			} | 
 | 280 | 		} | 
 | 281 |  | 
 | 282 | 		printk(KERN_CRIT "%s: bad page in kernel page table\n", | 
 | 283 | 		       __func__); | 
 | 284 | 	} while (size -= PAGE_SIZE); | 
 | 285 |  | 
 | 286 | 	flush_tlb_kernel_range(c->vm_start, c->vm_end); | 
 | 287 |  | 
 | 288 | 	list_del(&c->vm_list); | 
 | 289 |  | 
 | 290 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 291 |  | 
 | 292 | 	kfree(c); | 
 | 293 | 	return; | 
 | 294 |  | 
 | 295 |  no_area: | 
 | 296 | 	spin_unlock_irqrestore(&consistent_lock, flags); | 
 | 297 | 	printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | 
 | 298 | 	       __func__, vaddr); | 
 | 299 | 	dump_stack(); | 
 | 300 | } | 
 | 301 | EXPORT_SYMBOL(__dma_free_coherent); | 
 | 302 |  | 
 | 303 | /* | 
 | 304 |  * Initialise the consistent memory allocation. | 
 | 305 |  */ | 
 | 306 | static int __init dma_alloc_init(void) | 
 | 307 | { | 
 | 308 | 	pgd_t *pgd; | 
| David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 309 | 	pud_t *pud; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | 	pmd_t *pmd; | 
 | 311 | 	pte_t *pte; | 
 | 312 | 	int ret = 0; | 
 | 313 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | 	do { | 
 | 315 | 		pgd = pgd_offset(&init_mm, CONSISTENT_BASE); | 
| David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 316 | 		pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE); | 
 | 317 | 		pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | 		if (!pmd) { | 
 | 319 | 			printk(KERN_ERR "%s: no pmd tables\n", __func__); | 
 | 320 | 			ret = -ENOMEM; | 
 | 321 | 			break; | 
 | 322 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 |  | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 324 | 		pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | 		if (!pte) { | 
 | 326 | 			printk(KERN_ERR "%s: no pte tables\n", __func__); | 
 | 327 | 			ret = -ENOMEM; | 
 | 328 | 			break; | 
 | 329 | 		} | 
 | 330 |  | 
 | 331 | 		consistent_pte = pte; | 
 | 332 | 	} while (0); | 
 | 333 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | 	return ret; | 
 | 335 | } | 
 | 336 |  | 
 | 337 | core_initcall(dma_alloc_init); | 
 | 338 |  | 
 | 339 | /* | 
 | 340 |  * make an area consistent. | 
 | 341 |  */ | 
 | 342 | void __dma_sync(void *vaddr, size_t size, int direction) | 
 | 343 | { | 
 | 344 | 	unsigned long start = (unsigned long)vaddr; | 
 | 345 | 	unsigned long end   = start + size; | 
 | 346 |  | 
 | 347 | 	switch (direction) { | 
 | 348 | 	case DMA_NONE: | 
 | 349 | 		BUG(); | 
| Andrew Lewis | 03d7061 | 2008-06-26 19:29:05 +1000 | [diff] [blame] | 350 | 	case DMA_FROM_DEVICE: | 
 | 351 | 		/* | 
 | 352 | 		 * invalidate only when cache-line aligned otherwise there is | 
 | 353 | 		 * the potential for discarding uncommitted data from the cache | 
 | 354 | 		 */ | 
 | 355 | 		if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1))) | 
 | 356 | 			flush_dcache_range(start, end); | 
 | 357 | 		else | 
 | 358 | 			invalidate_dcache_range(start, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | 		break; | 
 | 360 | 	case DMA_TO_DEVICE:		/* writeback only */ | 
 | 361 | 		clean_dcache_range(start, end); | 
 | 362 | 		break; | 
 | 363 | 	case DMA_BIDIRECTIONAL:	/* writeback and invalidate */ | 
 | 364 | 		flush_dcache_range(start, end); | 
 | 365 | 		break; | 
 | 366 | 	} | 
 | 367 | } | 
 | 368 | EXPORT_SYMBOL(__dma_sync); | 
 | 369 |  | 
 | 370 | #ifdef CONFIG_HIGHMEM | 
 | 371 | /* | 
 | 372 |  * __dma_sync_page() implementation for systems using highmem. | 
 | 373 |  * In this case, each page of a buffer must be kmapped/kunmapped | 
 | 374 |  * in order to have a virtual address for __dma_sync(). This must | 
| Adrian Bunk | 338cec3 | 2005-09-10 00:26:54 -0700 | [diff] [blame] | 375 |  * not sleep so kmap_atomic()/kunmap_atomic() are used. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 |  * | 
 | 377 |  * Note: yes, it is possible and correct to have a buffer extend | 
 | 378 |  * beyond the first page. | 
 | 379 |  */ | 
 | 380 | static inline void __dma_sync_page_highmem(struct page *page, | 
 | 381 | 		unsigned long offset, size_t size, int direction) | 
 | 382 | { | 
| Paolo Galtieri | a0c111c | 2005-10-11 08:29:07 -0700 | [diff] [blame] | 383 | 	size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | 	size_t cur_size = seg_size; | 
 | 385 | 	unsigned long flags, start, seg_offset = offset; | 
| Paolo Galtieri | a0c111c | 2005-10-11 08:29:07 -0700 | [diff] [blame] | 386 | 	int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | 	int seg_nr = 0; | 
 | 388 |  | 
 | 389 | 	local_irq_save(flags); | 
 | 390 |  | 
 | 391 | 	do { | 
 | 392 | 		start = (unsigned long)kmap_atomic(page + seg_nr, | 
 | 393 | 				KM_PPC_SYNC_PAGE) + seg_offset; | 
 | 394 |  | 
 | 395 | 		/* Sync this buffer segment */ | 
 | 396 | 		__dma_sync((void *)start, seg_size, direction); | 
 | 397 | 		kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); | 
 | 398 | 		seg_nr++; | 
 | 399 |  | 
 | 400 | 		/* Calculate next buffer segment size */ | 
 | 401 | 		seg_size = min((size_t)PAGE_SIZE, size - cur_size); | 
 | 402 |  | 
 | 403 | 		/* Add the segment size to our running total */ | 
 | 404 | 		cur_size += seg_size; | 
 | 405 | 		seg_offset = 0; | 
 | 406 | 	} while (seg_nr < nr_segs); | 
 | 407 |  | 
 | 408 | 	local_irq_restore(flags); | 
 | 409 | } | 
 | 410 | #endif /* CONFIG_HIGHMEM */ | 
 | 411 |  | 
 | 412 | /* | 
 | 413 |  * __dma_sync_page makes memory consistent. identical to __dma_sync, but | 
 | 414 |  * takes a struct page instead of a virtual address | 
 | 415 |  */ | 
 | 416 | void __dma_sync_page(struct page *page, unsigned long offset, | 
 | 417 | 	size_t size, int direction) | 
 | 418 | { | 
 | 419 | #ifdef CONFIG_HIGHMEM | 
 | 420 | 	__dma_sync_page_highmem(page, offset, size, direction); | 
 | 421 | #else | 
 | 422 | 	unsigned long start = (unsigned long)page_address(page) + offset; | 
 | 423 | 	__dma_sync((void *)start, size, direction); | 
 | 424 | #endif | 
 | 425 | } | 
 | 426 | EXPORT_SYMBOL(__dma_sync_page); |