blob: 3095d958147564a27fdc198652c0dcd512e73466 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/consistent.c
3 *
Paul Mundt8a7bcf02007-11-11 17:07:06 +09004 * Copyright (C) 2004 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Magnus Dammf93e97e2008-01-24 18:35:10 +09006 * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/mm.h>
13#include <linux/dma-mapping.h>
Paul Mundt26ff6c12006-09-27 15:13:36 +090014#include <asm/cacheflush.h>
15#include <asm/addrspace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/io.h>
17
Magnus Dammf93e97e2008-01-24 18:35:10 +090018struct dma_coherent_mem {
19 void *virt_base;
20 u32 device_base;
21 int size;
22 int flags;
23 unsigned long *bitmap;
24};
25
26void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028{
Magnus Damm2a3eeba2008-01-25 12:42:48 +090029 void *ret, *ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090030 int order = get_order(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Dmitry Baryshkov9de90ac2008-07-18 13:30:31 +040032 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
33 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Magnus Dammf93e97e2008-01-24 18:35:10 +090035 ret = (void *)__get_free_pages(gfp, order);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090036 if (!ret)
37 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Magnus Damm2a3eeba2008-01-25 12:42:48 +090039 memset(ret, 0, size);
40 /*
41 * Pages from the page allocator may have data present in
42 * cache. So flush the cache before using uncached memory.
43 */
44 dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
45
46 ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
47 if (!ret_nocache) {
48 free_pages((unsigned long)ret, order);
49 return NULL;
Magnus Dammf93e97e2008-01-24 18:35:10 +090050 }
Magnus Damm2a3eeba2008-01-25 12:42:48 +090051
52 *dma_handle = virt_to_phys(ret);
53 return ret_nocache;
Magnus Dammf93e97e2008-01-24 18:35:10 +090054}
55EXPORT_SYMBOL(dma_alloc_coherent);
56
57void dma_free_coherent(struct device *dev, size_t size,
58 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Magnus Dammf93e97e2008-01-24 18:35:10 +090060 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
61 int order = get_order(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Dmitry Baryshkov9de90ac2008-07-18 13:30:31 +040063 if (!dma_release_from_coherent(dev, order, vaddr)) {
Magnus Dammf93e97e2008-01-24 18:35:10 +090064 WARN_ON(irqs_disabled()); /* for portability */
65 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
Magnus Damm2a3eeba2008-01-25 12:42:48 +090066 free_pages((unsigned long)phys_to_virt(dma_handle), order);
67 iounmap(vaddr);
Magnus Dammf93e97e2008-01-24 18:35:10 +090068 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
Magnus Dammf93e97e2008-01-24 18:35:10 +090070EXPORT_SYMBOL(dma_free_coherent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Magnus Dammf93e97e2008-01-24 18:35:10 +090072void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
73 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Paul Mundt8a7bcf02007-11-11 17:07:06 +090075#ifdef CONFIG_CPU_SH5
76 void *p1addr = vaddr;
77#else
78 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
79#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81 switch (direction) {
82 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070083 __flush_invalidate_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 break;
85 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070086 __flush_wback_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 break;
88 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070089 __flush_purge_region(p1addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 break;
91 default:
92 BUG();
93 }
94}
Magnus Dammf93e97e2008-01-24 18:35:10 +090095EXPORT_SYMBOL(dma_cache_sync);