| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 |  *  arch/arm/include/asm/cache.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  */ | 
 | 4 | #ifndef __ASMARM_CACHE_H | 
 | 5 | #define __ASMARM_CACHE_H | 
 | 6 |  | 
| Kirill A. Shutemov | 910a17e | 2009-09-15 10:23:53 +0100 | [diff] [blame] | 7 | #define L1_CACHE_SHIFT		CONFIG_ARM_L1_CACHE_SHIFT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT) | 
 | 9 |  | 
| Martin Fuzzey | eb5f4ca | 2009-06-01 09:19:37 +0100 | [diff] [blame] | 10 | /* | 
 | 11 |  * Memory returned by kmalloc() may be used for DMA, so we must make | 
 | 12 |  * sure that all such allocations are cache aligned. Otherwise, | 
 | 13 |  * unrelated code may cause parts of the buffer to be read into the | 
 | 14 |  * cache before the transfer is done, causing old data to be seen by | 
 | 15 |  * the CPU. | 
 | 16 |  */ | 
 | 17 | #define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES | 
 | 18 |  | 
 | 19 | /* | 
 | 20 |  * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | 
 | 21 |  */ | 
 | 22 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | 
 | 23 | #define ARCH_SLAB_MINALIGN 8 | 
 | 24 | #endif | 
 | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #endif |