sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.

Previously this was using a hardcoded 32, use L1_CACHE_BYTES for
cacheline alignment instead.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 75de165..78a6c09 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,6 +3,7 @@
  * Written by Niibe Yutaka
  */
 #include <asm/thread_info.h>
+#include <asm/cache.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
@@ -53,7 +54,7 @@
   . = ALIGN(PAGE_SIZE);
   .data.page_aligned : { *(.data.page_aligned) }
 
-  . = ALIGN(32);
+  . = ALIGN(L1_CACHE_BYTES);
   __per_cpu_start = .;
   .data.percpu : { *(.data.percpu) }
   __per_cpu_end = .;
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index e3a180c..9a3cb6b 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -21,6 +21,7 @@
 
 #define L1_CACHE_ALIGN(x)	(((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 
+#ifndef __ASSEMBLY__
 struct cache_info {
 	unsigned int ways;		/* Number of cache ways */
 	unsigned int sets;		/* Number of cache sets */
@@ -47,6 +48,6 @@
 
 	unsigned long flags;
 };
-
+#endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHE_H */