| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * arch/sh/mm/consistent.c | 
 | 3 |  * | 
| Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame] | 4 |  * Copyright (C) 2004 - 2007  Paul Mundt | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  * | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 6 |  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c | 
 | 7 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 9 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 10 |  * for more details. | 
 | 11 |  */ | 
 | 12 | #include <linux/mm.h> | 
| Paul Mundt | 7b41f56 | 2009-04-14 15:22:15 +0900 | [diff] [blame] | 13 | #include <linux/init.h> | 
| Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 14 | #include <linux/platform_device.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/dma-mapping.h> | 
| Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame] | 16 | #include <linux/dma-debug.h> | 
| Paul Mundt | 7b41f56 | 2009-04-14 15:22:15 +0900 | [diff] [blame] | 17 | #include <linux/io.h> | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 18 | #include <linux/module.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 19 | #include <linux/gfp.h> | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 20 | #include <asm/cacheflush.h> | 
 | 21 | #include <asm/addrspace.h> | 
| Paul Mundt | 7b41f56 | 2009-04-14 15:22:15 +0900 | [diff] [blame] | 22 |  | 
 | 23 | #define PREALLOC_DMA_DEBUG_ENTRIES	4096 | 
 | 24 |  | 
| Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 25 | struct dma_map_ops *dma_ops; | 
 | 26 | EXPORT_SYMBOL(dma_ops); | 
 | 27 |  | 
| Paul Mundt | 7b41f56 | 2009-04-14 15:22:15 +0900 | [diff] [blame] | 28 | static int __init dma_init(void) | 
 | 29 | { | 
 | 30 | 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 
 | 31 | 	return 0; | 
 | 32 | } | 
 | 33 | fs_initcall(dma_init); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
| Paul Mundt | f32154c | 2009-10-26 09:50:51 +0900 | [diff] [blame] | 35 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 
 | 36 | 				 dma_addr_t *dma_handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | { | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 38 | 	void *ret, *ret_nocache; | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 39 | 	int order = get_order(size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
| Andrew Murray | 5768282 | 2010-08-04 16:38:35 +0900 | [diff] [blame] | 41 | 	gfp |= __GFP_ZERO; | 
 | 42 |  | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 43 | 	ret = (void *)__get_free_pages(gfp, order); | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 44 | 	if (!ret) | 
 | 45 | 		return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 |  | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 47 | 	/* | 
 | 48 | 	 * Pages from the page allocator may have data present in | 
 | 49 | 	 * cache. So flush the cache before using uncached memory. | 
 | 50 | 	 */ | 
 | 51 | 	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); | 
 | 52 |  | 
| Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 53 | 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 54 | 	if (!ret_nocache) { | 
 | 55 | 		free_pages((unsigned long)ret, order); | 
 | 56 | 		return NULL; | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 57 | 	} | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 58 |  | 
| Magnus Damm | da9fdc8 | 2008-12-17 17:18:45 +0900 | [diff] [blame] | 59 | 	split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); | 
 | 60 |  | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 61 | 	*dma_handle = virt_to_phys(ret); | 
| Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame] | 62 |  | 
| Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 63 | 	return ret_nocache; | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 64 | } | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 65 |  | 
| Paul Mundt | f32154c | 2009-10-26 09:50:51 +0900 | [diff] [blame] | 66 | void dma_generic_free_coherent(struct device *dev, size_t size, | 
 | 67 | 			       void *vaddr, dma_addr_t dma_handle) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 69 | 	int order = get_order(size); | 
| Magnus Damm | da9fdc8 | 2008-12-17 17:18:45 +0900 | [diff] [blame] | 70 | 	unsigned long pfn = dma_handle >> PAGE_SHIFT; | 
 | 71 | 	int k; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 |  | 
| Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame] | 73 | 	for (k = 0; k < (1 << order); k++) | 
 | 74 | 		__free_pages(pfn_to_page(pfn + k), 0); | 
| Paul Mundt | f32154c | 2009-10-26 09:50:51 +0900 | [diff] [blame] | 75 |  | 
| Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame] | 76 | 	iounmap(vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | } | 
 | 78 |  | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 79 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 
 | 80 | 		    enum dma_data_direction direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { | 
| Paul Mundt | e2fcf74 | 2010-11-04 12:32:24 +0900 | [diff] [blame] | 82 | 	void *addr; | 
 | 83 |  | 
 | 84 | 	addr = __in_29bit_mode() ? | 
| Paul Mundt | 3f9b852 | 2011-05-31 14:38:29 +0900 | [diff] [blame] | 85 | 	       (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 |  | 
 | 87 | 	switch (direction) { | 
 | 88 | 	case DMA_FROM_DEVICE:		/* invalidate only */ | 
| Paul Mundt | e2fcf74 | 2010-11-04 12:32:24 +0900 | [diff] [blame] | 89 | 		__flush_invalidate_region(addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | 		break; | 
 | 91 | 	case DMA_TO_DEVICE:		/* writeback only */ | 
| Paul Mundt | e2fcf74 | 2010-11-04 12:32:24 +0900 | [diff] [blame] | 92 | 		__flush_wback_region(addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | 		break; | 
 | 94 | 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */ | 
| Paul Mundt | e2fcf74 | 2010-11-04 12:32:24 +0900 | [diff] [blame] | 95 | 		__flush_purge_region(addr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | 		break; | 
 | 97 | 	default: | 
 | 98 | 		BUG(); | 
 | 99 | 	} | 
 | 100 | } | 
| Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 101 | EXPORT_SYMBOL(dma_cache_sync); | 
| Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 102 |  | 
| Magnus Damm | 0c13bf1 | 2008-08-11 15:13:24 +0900 | [diff] [blame] | 103 | static int __init memchunk_setup(char *str) | 
 | 104 | { | 
 | 105 | 	return 1; /* accept anything that begins with "memchunk." */ | 
 | 106 | } | 
 | 107 | __setup("memchunk.", memchunk_setup); | 
 | 108 |  | 
| Magnus Damm | c773d8a | 2008-08-27 18:21:29 +0900 | [diff] [blame] | 109 | static void __init memchunk_cmdline_override(char *name, unsigned long *sizep) | 
| Magnus Damm | 0c13bf1 | 2008-08-11 15:13:24 +0900 | [diff] [blame] | 110 | { | 
 | 111 | 	char *p = boot_command_line; | 
 | 112 | 	int k = strlen(name); | 
 | 113 |  | 
 | 114 | 	while ((p = strstr(p, "memchunk."))) { | 
 | 115 | 		p += 9; /* strlen("memchunk.") */ | 
 | 116 | 		if (!strncmp(name, p, k) && p[k] == '=') { | 
 | 117 | 			p += k + 1; | 
 | 118 | 			*sizep = memparse(p, NULL); | 
 | 119 | 			pr_info("%s: forcing memory chunk size to 0x%08lx\n", | 
 | 120 | 				name, *sizep); | 
 | 121 | 			break; | 
 | 122 | 		} | 
 | 123 | 	} | 
 | 124 | } | 
 | 125 |  | 
| Magnus Damm | c773d8a | 2008-08-27 18:21:29 +0900 | [diff] [blame] | 126 | int __init platform_resource_setup_memory(struct platform_device *pdev, | 
 | 127 | 					  char *name, unsigned long memsize) | 
| Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 128 | { | 
 | 129 | 	struct resource *r; | 
 | 130 | 	dma_addr_t dma_handle; | 
 | 131 | 	void *buf; | 
 | 132 |  | 
 | 133 | 	r = pdev->resource + pdev->num_resources - 1; | 
 | 134 | 	if (r->flags) { | 
 | 135 | 		pr_warning("%s: unable to find empty space for resource\n", | 
 | 136 | 			name); | 
 | 137 | 		return -EINVAL; | 
 | 138 | 	} | 
 | 139 |  | 
| Magnus Damm | 0c13bf1 | 2008-08-11 15:13:24 +0900 | [diff] [blame] | 140 | 	memchunk_cmdline_override(name, &memsize); | 
 | 141 | 	if (!memsize) | 
 | 142 | 		return 0; | 
 | 143 |  | 
| Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 144 | 	buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); | 
 | 145 | 	if (!buf) { | 
 | 146 | 		pr_warning("%s: unable to allocate memory\n", name); | 
 | 147 | 		return -ENOMEM; | 
 | 148 | 	} | 
 | 149 |  | 
 | 150 | 	memset(buf, 0, memsize); | 
 | 151 |  | 
 | 152 | 	r->flags = IORESOURCE_MEM; | 
 | 153 | 	r->start = dma_handle; | 
 | 154 | 	r->end = r->start + memsize - 1; | 
 | 155 | 	r->name = name; | 
 | 156 | 	return 0; | 
 | 157 | } |