Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/consistent.c |
| 3 | * |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame] | 4 | * Copyright (C) 2004 - 2007 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 6 | * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c |
| 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | */ |
| 12 | #include <linux/mm.h> |
Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 13 | #include <linux/platform_device.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/dma-mapping.h> |
Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame^] | 15 | #include <linux/dma-debug.h> |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 16 | #include <asm/cacheflush.h> |
| 17 | #include <asm/addrspace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/io.h> |
| 19 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 20 | void *dma_alloc_coherent(struct device *dev, size_t size, |
| 21 | dma_addr_t *dma_handle, gfp_t gfp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | { |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 23 | void *ret, *ret_nocache; |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 24 | int order = get_order(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Dmitry Baryshkov | 9de90ac | 2008-07-18 13:30:31 +0400 | [diff] [blame] | 26 | if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) |
| 27 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 29 | ret = (void *)__get_free_pages(gfp, order); |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 30 | if (!ret) |
| 31 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 33 | memset(ret, 0, size); |
| 34 | /* |
| 35 | * Pages from the page allocator may have data present in |
| 36 | * cache. So flush the cache before using uncached memory. |
| 37 | */ |
| 38 | dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); |
| 39 | |
Paul Mundt | fa43972 | 2008-09-04 18:53:58 +0900 | [diff] [blame] | 40 | ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 41 | if (!ret_nocache) { |
| 42 | free_pages((unsigned long)ret, order); |
| 43 | return NULL; |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 44 | } |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 45 | |
Magnus Damm | da9fdc8 | 2008-12-17 17:18:45 +0900 | [diff] [blame] | 46 | split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); |
| 47 | |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 48 | *dma_handle = virt_to_phys(ret); |
Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame^] | 49 | |
| 50 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache); |
| 51 | |
Magnus Damm | 2a3eeba | 2008-01-25 12:42:48 +0900 | [diff] [blame] | 52 | return ret_nocache; |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 53 | } |
| 54 | EXPORT_SYMBOL(dma_alloc_coherent); |
| 55 | |
| 56 | void dma_free_coherent(struct device *dev, size_t size, |
| 57 | void *vaddr, dma_addr_t dma_handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | { |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 59 | int order = get_order(size); |
Magnus Damm | da9fdc8 | 2008-12-17 17:18:45 +0900 | [diff] [blame] | 60 | unsigned long pfn = dma_handle >> PAGE_SHIFT; |
| 61 | int k; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Paul Mundt | f802d96 | 2009-04-09 10:36:54 -0700 | [diff] [blame^] | 63 | WARN_ON(irqs_disabled()); /* for portability */ |
| 64 | |
| 65 | if (dma_release_from_coherent(dev, order, vaddr)) |
| 66 | return; |
| 67 | |
| 68 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); |
| 69 | for (k = 0; k < (1 << order); k++) |
| 70 | __free_pages(pfn_to_page(pfn + k), 0); |
| 71 | iounmap(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 73 | EXPORT_SYMBOL(dma_free_coherent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 75 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 76 | enum dma_data_direction direction) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | { |
Paul Mundt | 8a7bcf0 | 2007-11-11 17:07:06 +0900 | [diff] [blame] | 78 | #ifdef CONFIG_CPU_SH5 |
| 79 | void *p1addr = vaddr; |
| 80 | #else |
| 81 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); |
| 82 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
| 84 | switch (direction) { |
| 85 | case DMA_FROM_DEVICE: /* invalidate only */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 86 | __flush_invalidate_region(p1addr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | break; |
| 88 | case DMA_TO_DEVICE: /* writeback only */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 89 | __flush_wback_region(p1addr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | break; |
| 91 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 92 | __flush_purge_region(p1addr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | break; |
| 94 | default: |
| 95 | BUG(); |
| 96 | } |
| 97 | } |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 98 | EXPORT_SYMBOL(dma_cache_sync); |
Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 99 | |
Magnus Damm | 0c13bf1 | 2008-08-11 15:13:24 +0900 | [diff] [blame] | 100 | static int __init memchunk_setup(char *str) |
| 101 | { |
| 102 | return 1; /* accept anything that begins with "memchunk." */ |
| 103 | } |
| 104 | __setup("memchunk.", memchunk_setup); |
| 105 | |
Magnus Damm | c773d8a | 2008-08-27 18:21:29 +0900 | [diff] [blame] | 106 | static void __init memchunk_cmdline_override(char *name, unsigned long *sizep) |
Magnus Damm | 0c13bf1 | 2008-08-11 15:13:24 +0900 | [diff] [blame] | 107 | { |
| 108 | char *p = boot_command_line; |
| 109 | int k = strlen(name); |
| 110 | |
| 111 | while ((p = strstr(p, "memchunk."))) { |
| 112 | p += 9; /* strlen("memchunk.") */ |
| 113 | if (!strncmp(name, p, k) && p[k] == '=') { |
| 114 | p += k + 1; |
| 115 | *sizep = memparse(p, NULL); |
| 116 | pr_info("%s: forcing memory chunk size to 0x%08lx\n", |
| 117 | name, *sizep); |
| 118 | break; |
| 119 | } |
| 120 | } |
| 121 | } |
| 122 | |
Magnus Damm | c773d8a | 2008-08-27 18:21:29 +0900 | [diff] [blame] | 123 | int __init platform_resource_setup_memory(struct platform_device *pdev, |
| 124 | char *name, unsigned long memsize) |
Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 125 | { |
| 126 | struct resource *r; |
| 127 | dma_addr_t dma_handle; |
| 128 | void *buf; |
| 129 | |
| 130 | r = pdev->resource + pdev->num_resources - 1; |
| 131 | if (r->flags) { |
| 132 | pr_warning("%s: unable to find empty space for resource\n", |
| 133 | name); |
| 134 | return -EINVAL; |
| 135 | } |
| 136 | |
Magnus Damm | 0c13bf1 | 2008-08-11 15:13:24 +0900 | [diff] [blame] | 137 | memchunk_cmdline_override(name, &memsize); |
| 138 | if (!memsize) |
| 139 | return 0; |
| 140 | |
Magnus Damm | 1eca5c9 | 2008-07-16 19:02:54 +0900 | [diff] [blame] | 141 | buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); |
| 142 | if (!buf) { |
| 143 | pr_warning("%s: unable to allocate memory\n", name); |
| 144 | return -ENOMEM; |
| 145 | } |
| 146 | |
| 147 | memset(buf, 0, memsize); |
| 148 | |
| 149 | r->flags = IORESOURCE_MEM; |
| 150 | r->start = dma_handle; |
| 151 | r->end = r->start + memsize - 1; |
| 152 | r->name = name; |
| 153 | return 0; |
| 154 | } |