blob: f6a2db12ad784d334b0233f4d75c8f1870008187 [file] [log] [blame]
Paul Mundt711fa802006-10-03 13:14:04 +09001/*
Paul Mundt11c19652006-12-25 10:19:56 +09002 * arch/sh/kernel/setup.c
3 *
4 * This file handles the architecture-dependent parts of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 1999 Niibe Yutaka
Paul Mundt19d8f842010-05-10 15:39:05 +09007 * Copyright (C) 2002 - 2010 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Jon Smirl894673e2006-07-10 04:44:13 -07009#include <linux/screen_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/ioport.h>
11#include <linux/init.h>
12#include <linux/initrd.h>
13#include <linux/bootmem.h>
14#include <linux/console.h>
15#include <linux/seq_file.h>
16#include <linux/root_dev.h>
17#include <linux/utsname.h>
Paul Mundt01066622007-03-28 16:38:13 +090018#include <linux/nodemask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/cpu.h>
Dave Hansen22a98352006-03-27 01:16:04 -080020#include <linux/pfn.h>
Paul Mundt711fa802006-10-03 13:14:04 +090021#include <linux/fs.h>
Paul Mundt01066622007-03-28 16:38:13 +090022#include <linux/mm.h>
Paul Mundt4d5ade52007-04-27 11:25:57 +090023#include <linux/kexec.h>
Paul Mundt98d877c2007-07-20 16:59:49 +090024#include <linux/module.h>
Paul Mundt0016a122007-09-21 18:39:49 +090025#include <linux/smp.h>
Paul Mundtb9e393c2008-03-07 17:19:58 +090026#include <linux/err.h>
27#include <linux/debugfs.h>
Simon Hormandaf423d2008-07-30 10:29:39 +100028#include <linux/crash_dump.h>
Paul Mundtfa439722008-09-04 18:53:58 +090029#include <linux/mmzone.h>
Paul Mundtcf204fa2008-09-08 20:47:42 +090030#include <linux/clk.h>
31#include <linux/delay.h>
Magnus Damm87a00dc2009-04-15 10:50:21 +000032#include <linux/platform_device.h>
Matt Flemingc601a512009-07-03 16:16:54 +090033#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/uaccess.h>
35#include <asm/io.h>
Paul Mundt7a302a92007-05-14 12:50:43 +090036#include <asm/page.h>
Paul Mundtcd012042007-12-10 15:50:28 +090037#include <asm/elf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/sections.h>
39#include <asm/irq.h>
40#include <asm/setup.h>
Paul Mundtde027972006-02-01 03:06:02 -080041#include <asm/clock.h>
Paul Mundtc9f4a3f2010-04-26 18:35:44 +090042#include <asm/smp.h>
Paul Mundt01066622007-03-28 16:38:13 +090043#include <asm/mmu_context.h>
Paul Mundt19d8f842010-05-10 15:39:05 +090044#include <asm/mmzone.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/*
47 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
48 * This value will be used at the very early stage of serial setup.
49 * The bigger value means no problem.
50 */
Paul Mundt2d4a73d2007-09-21 18:01:40 +090051struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
52 [0] = {
53 .type = CPU_SH_NONE,
Paul Mundte82da212009-08-15 10:48:13 +090054 .family = CPU_FAMILY_UNKNOWN,
Paul Mundt2d4a73d2007-09-21 18:01:40 +090055 .loops_per_jiffy = 10000000,
56 },
57};
58EXPORT_SYMBOL(cpu_data);
Paul Mundt82f81f42007-05-15 15:19:34 +090059
60/*
61 * The machine vector. First entry in .machvec.init, or clobbered by
62 * sh_mv= on the command line, prior to .machvec.init teardown.
63 */
Paul Mundtfd8f20e2007-05-15 15:38:30 +090064struct sh_machine_vector sh_mv = { .mv_name = "generic", };
Paul Mundt971ac162008-04-25 16:01:38 +090065EXPORT_SYMBOL(sh_mv);
Paul Mundt82f81f42007-05-15 15:19:34 +090066
Paul Mundt2c7834a2006-09-27 18:17:31 +090067#ifdef CONFIG_VT
Linus Torvalds1da177e2005-04-16 15:20:36 -070068struct screen_info screen_info;
Paul Mundt2c7834a2006-09-27 18:17:31 +090069#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Linus Torvalds1da177e2005-04-16 15:20:36 -070071extern int root_mountflags;
72
Paul Mundt65463b72005-11-07 00:58:24 -080073#define RAMDISK_IMAGE_START_MASK 0x07FF
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#define RAMDISK_PROMPT_FLAG 0x8000
Paul Mundt65463b72005-11-07 00:58:24 -080075#define RAMDISK_LOAD_FLAG 0x4000
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Alon Bar-Lev53c82622007-02-12 00:54:19 -080077static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Paul Mundt69d1ef42007-10-30 17:32:08 +090079static struct resource code_resource = {
80 .name = "Kernel code",
81 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
82};
83
84static struct resource data_resource = {
85 .name = "Kernel data",
86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
87};
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Magnus Damm3d839842008-04-23 20:50:27 +090089static struct resource bss_resource = {
90 .name = "Kernel bss",
91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
92};
93
Paul Mundt98d877c2007-07-20 16:59:49 +090094unsigned long memory_start;
95EXPORT_SYMBOL(memory_start);
Paul Mundt7e5186e2007-10-30 17:18:08 +090096unsigned long memory_end = 0;
Paul Mundt98d877c2007-07-20 16:59:49 +090097EXPORT_SYMBOL(memory_end);
Paul Mundt5e2ff322010-05-10 20:17:25 +090098unsigned long memory_limit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Magnus Damm0146ba72008-04-23 20:56:44 +0900100static struct resource mem_resources[MAX_NUMNODES];
101
Paul Mundtcd012042007-12-10 15:50:28 +0900102int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
103
Paul Mundt9655ad02007-05-14 15:59:09 +0900104static int __init early_parse_mem(char *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Paul Mundt5e2ff322010-05-10 20:17:25 +0900106 if (!p)
107 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Paul Mundt5e2ff322010-05-10 20:17:25 +0900109 memory_limit = PAGE_ALIGN(memparse(p, &p));
Stuart Menefy80a68a42007-11-26 21:16:09 +0900110
Paul Mundt5e2ff322010-05-10 20:17:25 +0900111 pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
Paul Mundt2c7834a2006-09-27 18:17:31 +0900112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 return 0;
114}
Paul Mundt9655ad02007-05-14 15:59:09 +0900115early_param("mem", early_parse_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Paul Mundt01066622007-03-28 16:38:13 +0900117/*
118 * Register fully available low RAM pages with the bootmem allocator.
119 */
120static void __init register_bootmem_low_pages(void)
121{
122 unsigned long curr_pfn, last_pfn, pages;
123
124 /*
125 * We are rounding up the start address of usable memory:
126 */
127 curr_pfn = PFN_UP(__MEMORY_START);
128
129 /*
130 * ... and at the end of the usable range downwards:
131 */
132 last_pfn = PFN_DOWN(__pa(memory_end));
133
134 if (last_pfn > max_low_pfn)
135 last_pfn = max_low_pfn;
136
137 pages = last_pfn - curr_pfn;
138 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
139}
140
Paul Mundt9b7a3782010-05-06 15:09:45 +0900141static void __init check_for_initrd(void)
142{
143#ifdef CONFIG_BLK_DEV_INITRD
144 unsigned long start, end;
145
146 /*
147 * Check for the rare cases where boot loaders adhere to the boot
148 * ABI.
149 */
150 if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
151 goto disable;
152
153 start = INITRD_START + __MEMORY_START;
154 end = start + INITRD_SIZE;
155
156 if (unlikely(end <= start))
157 goto disable;
158 if (unlikely(start & ~PAGE_MASK)) {
159 pr_err("initrd must be page aligned\n");
160 goto disable;
161 }
162
163 if (unlikely(start < PAGE_OFFSET)) {
164 pr_err("initrd start < PAGE_OFFSET\n");
165 goto disable;
166 }
167
168 if (unlikely(end > lmb_end_of_DRAM())) {
169 pr_err("initrd extends beyond end of memory "
170 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
171 end, (unsigned long)lmb_end_of_DRAM());
172 goto disable;
173 }
174
175 /*
176 * If we got this far inspite of the boot loader's best efforts
177 * to the contrary, assume we actually have a valid initrd and
178 * fix up the root dev.
179 */
180 ROOT_DEV = Root_RAM0;
181
182 /*
183 * Address sanitization
184 */
185 initrd_start = (unsigned long)__va(__pa(start));
186 initrd_end = initrd_start + INITRD_SIZE;
187
Paul Mundt36fa06d2010-05-07 15:10:07 +0900188 lmb_reserve(__pa(initrd_start), INITRD_SIZE);
Paul Mundt9b7a3782010-05-06 15:09:45 +0900189
190 return;
191
192disable:
193 pr_info("initrd disabled\n");
194 initrd_start = initrd_end = 0;
195#endif
196}
197
Paul Mundtcf204fa2008-09-08 20:47:42 +0900198void __cpuinit calibrate_delay(void)
199{
200 struct clk *clk = clk_get(NULL, "cpu_clk");
201
202 if (IS_ERR(clk))
203 panic("Need a sane CPU clock definition!");
204
205 loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
206
207 printk(KERN_INFO "Calibrating delay loop (skipped)... "
208 "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
209 loops_per_jiffy/(500000/HZ),
210 (loops_per_jiffy/(5000/HZ)) % 100,
211 loops_per_jiffy);
212}
Paul Mundtcf204fa2008-09-08 20:47:42 +0900213
Magnus Damm0146ba72008-04-23 20:56:44 +0900214void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
215 unsigned long end_pfn)
216{
217 struct resource *res = &mem_resources[nid];
218
219 WARN_ON(res->name); /* max one active range per node for now */
220
221 res->name = "System RAM";
222 res->start = start_pfn << PAGE_SHIFT;
223 res->end = (end_pfn << PAGE_SHIFT) - 1;
224 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
225 if (request_resource(&iomem_resource, res)) {
226 pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
227 start_pfn, end_pfn);
228 return;
229 }
230
231 /*
232 * We don't know which RAM region contains kernel data,
233 * so we try it repeatedly and let the resource manager
234 * test it.
235 */
236 request_resource(res, &code_resource);
237 request_resource(res, &data_resource);
238 request_resource(res, &bss_resource);
239
Magnus Damm0146ba72008-04-23 20:56:44 +0900240 add_active_range(nid, start_pfn, end_pfn);
241}
242
Paul Mundt19d8f842010-05-10 15:39:05 +0900243void __init do_init_bootmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 unsigned long bootmap_size;
Matt Flemingc601a512009-07-03 16:16:54 +0900246 unsigned long bootmap_pages, bootmem_paddr;
Paul Mundt5e2ff322010-05-10 20:17:25 +0900247 u64 total_pages = lmb_phys_mem_size() >> PAGE_SHIFT;
Matt Flemingc601a512009-07-03 16:16:54 +0900248 int i;
249
250 bootmap_pages = bootmem_bootmap_pages(total_pages);
251
252 bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
Paul Mundt01066622007-03-28 16:38:13 +0900253
254 /*
255 * Find a proper area for the bootmem bitmap. After this
256 * bootstrap step all allocations (until the page allocator
257 * is intact) must be done via bootmem_alloc().
258 */
Matt Flemingc601a512009-07-03 16:16:54 +0900259 bootmap_size = init_bootmem_node(NODE_DATA(0),
260 bootmem_paddr >> PAGE_SHIFT,
Paul Mundt01066622007-03-28 16:38:13 +0900261 min_low_pfn, max_low_pfn);
262
Matt Flemingc601a512009-07-03 16:16:54 +0900263 /* Add active regions with valid PFNs. */
264 for (i = 0; i < lmb.memory.cnt; i++) {
265 unsigned long start_pfn, end_pfn;
266 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
267 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
268 __add_active_range(0, start_pfn, end_pfn);
269 }
270
271 /*
272 * Add all physical memory to the bootmem map and mark each
273 * area as present.
274 */
Paul Mundt01066622007-03-28 16:38:13 +0900275 register_bootmem_low_pages();
276
Matt Flemingc601a512009-07-03 16:16:54 +0900277 /* Reserve the sections we're already using. */
278 for (i = 0; i < lmb.reserved.cnt; i++)
279 reserve_bootmem(lmb.reserved.region[i].base,
280 lmb_size_bytes(&lmb.reserved, i),
Christopher SMITH48865162008-10-06 12:46:18 +0100281 BOOTMEM_DEFAULT);
Paul Mundt01066622007-03-28 16:38:13 +0900282
Matt Flemingc601a512009-07-03 16:16:54 +0900283 node_set_online(0);
284
Paul Mundt2826fa62007-06-01 17:04:36 +0900285 sparse_memory_present_with_active_regions(0);
Paul Mundt01066622007-03-28 16:38:13 +0900286}
287
Paul Mundt5e2ff322010-05-10 20:17:25 +0900288static void __init early_reserve_mem(void)
Paul Mundt01066622007-03-28 16:38:13 +0900289{
290 unsigned long start_pfn;
291
292 /*
293 * Partially used pages are not usable - thus
294 * we are rounding upwards:
295 */
296 start_pfn = PFN_UP(__pa(_end));
Matt Flemingc601a512009-07-03 16:16:54 +0900297
Matt Flemingc601a512009-07-03 16:16:54 +0900298 /*
299 * Reserve the kernel text and
300 * Reserve the bootmem bitmap. We do this in two steps (first step
301 * was init_bootmem()), because this catches the (definitely buggy)
302 * case of us accidentally initializing the bootmem allocator with
303 * an invalid RAM area.
304 */
305 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
306 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
307 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
308
309 /*
310 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
311 */
312 if (CONFIG_ZERO_PAGE_OFFSET != 0)
313 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
314
Paul Mundt5e2ff322010-05-10 20:17:25 +0900315 /*
316 * Handle additional early reservations
317 */
318 check_for_initrd();
319 reserve_crashkernel();
Paul Mundt01066622007-03-28 16:38:13 +0900320}
Paul Mundt01066622007-03-28 16:38:13 +0900321
Simon Hormandaf423d2008-07-30 10:29:39 +1000322/*
323 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
324 * is_kdump_kernel() to determine if we are booting after a panic. Hence
325 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
326 */
327#ifdef CONFIG_CRASH_DUMP
328/* elfcorehdr= specifies the location of elf core header
329 * stored by the crashed kernel.
330 */
331static int __init parse_elfcorehdr(char *arg)
332{
333 if (!arg)
334 return -EINVAL;
335 elfcorehdr_addr = memparse(arg, &arg);
336 return 0;
337}
338early_param("elfcorehdr", parse_elfcorehdr);
339#endif
340
Paul Mundt19d8f842010-05-10 15:39:05 +0900341void __init __weak plat_early_device_setup(void)
342{
343}
344
345void __init __weak plat_mem_setup(void)
Magnus Damm87a00dc2009-04-15 10:50:21 +0000346{
347}
348
Paul Mundt01066622007-03-28 16:38:13 +0900349void __init setup_arch(char **cmdline_p)
350{
351 enable_mmu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
354
Paul Mundt01053462008-05-13 17:40:17 +0900355 printk(KERN_NOTICE "Boot params:\n"
356 "... MOUNT_ROOT_RDONLY - %08lx\n"
357 "... RAMDISK_FLAGS - %08lx\n"
358 "... ORIG_ROOT_DEV - %08lx\n"
359 "... LOADER_TYPE - %08lx\n"
360 "... INITRD_START - %08lx\n"
361 "... INITRD_SIZE - %08lx\n",
362 MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
363 ORIG_ROOT_DEV, LOADER_TYPE,
364 INITRD_START, INITRD_SIZE);
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366#ifdef CONFIG_BLK_DEV_RAM
367 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
368 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
369 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
370#endif
371
372 if (!MOUNT_ROOT_RDONLY)
373 root_mountflags &= ~MS_RDONLY;
374 init_mm.start_code = (unsigned long) _text;
375 init_mm.end_code = (unsigned long) _etext;
376 init_mm.end_data = (unsigned long) _edata;
377 init_mm.brk = (unsigned long) _end;
378
Paul Mundt01066622007-03-28 16:38:13 +0900379 code_resource.start = virt_to_phys(_text);
380 code_resource.end = virt_to_phys(_etext)-1;
381 data_resource.start = virt_to_phys(_etext);
382 data_resource.end = virt_to_phys(_edata)-1;
Magnus Damm3d839842008-04-23 20:50:27 +0900383 bss_resource.start = virt_to_phys(__bss_start);
384 bss_resource.end = virt_to_phys(_ebss)-1;
Paul Mundt01066622007-03-28 16:38:13 +0900385
Pawel Molld724a9c2009-08-24 16:25:38 +0900386#ifdef CONFIG_CMDLINE_OVERWRITE
Paul Mundtba36197c2007-05-14 17:48:00 +0900387 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
388#else
389 strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
Pawel Molld724a9c2009-08-24 16:25:38 +0900390#ifdef CONFIG_CMDLINE_EXTEND
391 strlcat(command_line, " ", sizeof(command_line));
392 strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
393#endif
Paul Mundtba36197c2007-05-14 17:48:00 +0900394#endif
Paul Mundt9655ad02007-05-14 15:59:09 +0900395
Paul Mundtba36197c2007-05-14 17:48:00 +0900396 /* Save unparsed command line copy for /proc/cmdline */
397 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
Paul Mundt9655ad02007-05-14 15:59:09 +0900398 *cmdline_p = command_line;
399
Paul Mundt01066622007-03-28 16:38:13 +0900400 parse_early_param();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Magnus Damm87a00dc2009-04-15 10:50:21 +0000402 plat_early_device_setup();
403
Magnus Damm7b6fd3b2009-12-14 10:24:42 +0000404 /* Let earlyprintk output early console messages */
405 early_platform_driver_probe("earlyprintk", 1, 1);
406
Paul Mundt19d8f842010-05-10 15:39:05 +0900407 lmb_init();
408
Paul Mundt9655ad02007-05-14 15:59:09 +0900409 sh_mv_setup();
Paul Mundt19d8f842010-05-10 15:39:05 +0900410 sh_mv.mv_mem_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Paul Mundt5e2ff322010-05-10 20:17:25 +0900412 early_reserve_mem();
413
414 lmb_enforce_memory_limit(memory_limit);
415 lmb_analyze();
416
417 lmb_dump_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 /*
420 * Determine low and high memory ranges:
421 */
Paul Mundt5e2ff322010-05-10 20:17:25 +0900422 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
Paul Mundt01066622007-03-28 16:38:13 +0900423 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Paul Mundt01066622007-03-28 16:38:13 +0900425 nodes_clear(node_online_map);
Paul Mundtdfbb9042007-05-23 17:48:36 +0900426
Paul Mundt5e2ff322010-05-10 20:17:25 +0900427 memory_start = (unsigned long)__va(__MEMORY_START);
428 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size());
429
430 uncached_init();
Paul Mundt09e11722010-03-03 13:16:31 +0900431 pmb_init();
Paul Mundt5e2ff322010-05-10 20:17:25 +0900432 do_init_bootmem();
433 plat_mem_setup();
Paul Mundt01066622007-03-28 16:38:13 +0900434 sparse_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436#ifdef CONFIG_DUMMY_CONSOLE
437 conswitchp = &dummy_con;
438#endif
Matt Fleming4d35b932009-11-05 07:54:17 +0000439 paging_init();
Matt Fleming4d35b932009-11-05 07:54:17 +0000440
441 ioremap_fixed_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 /* Perform the machine specific initialisation */
Paul Mundt2c7834a2006-09-27 18:17:31 +0900444 if (likely(sh_mv.mv_setup))
445 sh_mv.mv_setup(cmdline_p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Paul Mundt0016a122007-09-21 18:39:49 +0900447 plat_smp_setup();
Paul Mundtdfbb9042007-05-23 17:48:36 +0900448}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Magnus Dammeb9b9b52009-05-28 11:51:51 +0000450/* processor boot mode configuration */
451int generic_mode_pins(void)
452{
453 pr_warning("generic_mode_pins(): missing mode pin configuration\n");
454 return 0;
455}
456
457int test_mode_pin(int pin)
458{
Magnus Damm0d4fdbb2009-06-02 09:22:02 +0000459 return sh_mv.mv_mode_pins() & pin;
Magnus Dammeb9b9b52009-05-28 11:51:51 +0000460}
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462static const char *cpu_name[] = {
Peter Griffin28259992008-11-28 22:48:20 +0900463 [CPU_SH7201] = "SH7201",
Paul Mundta8f67f42007-11-26 19:54:02 +0900464 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
Paul Mundtb552c7e2006-11-20 14:14:29 +0900465 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
Paul Mundte5723e02006-09-27 17:38:11 +0900466 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
467 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
468 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
Markus Brunner3ea6bc32007-08-20 08:59:33 +0900469 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
Yoshihiro Shimoda31a49c42007-12-26 11:45:06 +0900470 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
471 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
472 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
473 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
Paul Mundte5723e02006-09-27 17:38:11 +0900474 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
Yoshihiro Shimoda7d740a02008-01-07 14:40:07 +0900475 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
476 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
477 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
Yoshihiro Shimodac01f0f12009-08-21 16:30:28 +0900478 [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
Yoshihiro Shimoda7d740a02008-01-07 14:40:07 +0900479 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
Paul Mundtc2672f62007-11-20 18:29:00 +0900480 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
Paul Mundt178dd0c2008-04-09 17:56:18 +0900481 [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
Kuninori Morimoto0207a2e2009-04-16 14:40:56 +0900482 [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
483 [CPU_SH_NONE] = "Unknown"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484};
485
Paul Mundt11c19652006-12-25 10:19:56 +0900486const char *get_cpu_subtype(struct sh_cpuinfo *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
Paul Mundt11c19652006-12-25 10:19:56 +0900488 return cpu_name[c->type];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
Adrian Bunkb19a33c2008-06-18 01:30:24 +0300490EXPORT_SYMBOL(get_cpu_subtype);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492#ifdef CONFIG_PROC_FS
Paul Mundt2220d162006-09-27 18:24:28 +0900493/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494static const char *cpu_flags[] = {
Paul Mundt2220d162006-09-27 18:24:28 +0900495 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
Paul Mundt8263a672009-03-17 17:49:49 +0900496 "ptea", "llsc", "l2", "op32", "pteaex", NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497};
498
Paul Mundt11c19652006-12-25 10:19:56 +0900499static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
501 unsigned long i;
502
503 seq_printf(m, "cpu flags\t:");
504
Paul Mundt11c19652006-12-25 10:19:56 +0900505 if (!c->flags) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 seq_printf(m, " %s\n", cpu_flags[0]);
507 return;
508 }
509
Paul Mundtde027972006-02-01 03:06:02 -0800510 for (i = 0; cpu_flags[i]; i++)
Paul Mundt11c19652006-12-25 10:19:56 +0900511 if ((c->flags & (1 << i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 seq_printf(m, " %s", cpu_flags[i+1]);
513
514 seq_printf(m, "\n");
515}
516
Paul Mundt2c7834a2006-09-27 18:17:31 +0900517static void show_cacheinfo(struct seq_file *m, const char *type,
518 struct cache_info info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
520 unsigned int cache_size;
521
522 cache_size = info.ways * info.sets * info.linesz;
523
Paul Mundtde027972006-02-01 03:06:02 -0800524 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
525 type, cache_size >> 10, info.ways);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
528/*
529 * Get CPU information for use by the procfs.
530 */
531static int show_cpuinfo(struct seq_file *m, void *v)
532{
Paul Mundt11c19652006-12-25 10:19:56 +0900533 struct sh_cpuinfo *c = v;
534 unsigned int cpu = c - cpu_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
Paul Mundt11c19652006-12-25 10:19:56 +0900536 if (!cpu_online(cpu))
537 return 0;
538
539 if (cpu == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 seq_printf(m, "machine\t\t: %s\n", get_system_type());
Paul Mundt2908df92009-10-14 14:13:41 +0900541 else
542 seq_printf(m, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
544 seq_printf(m, "processor\t: %d\n", cpu);
Serge E. Hallyn96b644b2006-10-02 02:18:13 -0700545 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
Paul Mundt11c19652006-12-25 10:19:56 +0900546 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
Stuart Menefy3611ee72008-07-02 15:15:09 +0900547 if (c->cut_major == -1)
548 seq_printf(m, "cut\t\t: unknown\n");
549 else if (c->cut_minor == -1)
550 seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
551 else
552 seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Paul Mundt11c19652006-12-25 10:19:56 +0900554 show_cpuflags(m, c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
556 seq_printf(m, "cache type\t: ");
557
558 /*
559 * Check for what type of cache we have, we support both the
560 * unified cache on the SH-2 and SH-3, as well as the harvard
561 * style cache on the SH-4.
562 */
Paul Mundt11c19652006-12-25 10:19:56 +0900563 if (c->icache.flags & SH_CACHE_COMBINED) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 seq_printf(m, "unified\n");
Paul Mundt11c19652006-12-25 10:19:56 +0900565 show_cacheinfo(m, "cache", c->icache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 } else {
567 seq_printf(m, "split (harvard)\n");
Paul Mundt11c19652006-12-25 10:19:56 +0900568 show_cacheinfo(m, "icache", c->icache);
569 show_cacheinfo(m, "dcache", c->dcache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571
Paul Mundt72c35542006-09-27 18:27:43 +0900572 /* Optional secondary cache */
Paul Mundt11c19652006-12-25 10:19:56 +0900573 if (c->flags & CPU_HAS_L2_CACHE)
574 show_cacheinfo(m, "scache", c->scache);
Paul Mundt72c35542006-09-27 18:27:43 +0900575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 seq_printf(m, "bogomips\t: %lu.%02lu\n",
Paul Mundt11c19652006-12-25 10:19:56 +0900577 c->loops_per_jiffy/(500000/HZ),
578 (c->loops_per_jiffy/(5000/HZ)) % 100);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Paul Mundtdb62e5b2007-04-26 12:17:20 +0900580 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583static void *c_start(struct seq_file *m, loff_t *pos)
584{
585 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
586}
587static void *c_next(struct seq_file *m, void *v, loff_t *pos)
588{
589 ++*pos;
590 return c_start(m, pos);
591}
592static void c_stop(struct seq_file *m, void *v)
593{
594}
Jan Engelhardt773c7bd2008-01-23 12:47:48 +0900595const struct seq_operations cpuinfo_op = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 .start = c_start,
597 .next = c_next,
598 .stop = c_stop,
599 .show = show_cpuinfo,
600};
601#endif /* CONFIG_PROC_FS */
Paul Mundtb9e393c2008-03-07 17:19:58 +0900602
603struct dentry *sh_debugfs_root;
604
605static int __init sh_debugfs_init(void)
606{
607 sh_debugfs_root = debugfs_create_dir("sh", NULL);
Zhaolei9986b312008-10-17 19:25:03 +0800608 if (!sh_debugfs_root)
609 return -ENOMEM;
Paul Mundtb9e393c2008-03-07 17:19:58 +0900610 if (IS_ERR(sh_debugfs_root))
611 return PTR_ERR(sh_debugfs_root);
612
613 return 0;
614}
615arch_initcall(sh_debugfs_init);