blob: fa3f1a354b3fabc573d099d14f17b0bc7a6ea19e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/alpha/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
8
9/*
10 * Bootup setup stuff.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/stddef.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/user.h>
21#include <linux/a.out.h>
22#include <linux/tty.h>
23#include <linux/delay.h>
24#include <linux/config.h> /* CONFIG_ALPHA_LCA etc */
25#include <linux/mc146818rtc.h>
26#include <linux/console.h>
27#include <linux/errno.h>
28#include <linux/init.h>
29#include <linux/string.h>
30#include <linux/ioport.h>
Michael Neulinge5c6c8e2006-03-14 00:11:50 -050031#include <linux/platform_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/bootmem.h>
33#include <linux/pci.h>
34#include <linux/seq_file.h>
35#include <linux/root_dev.h>
36#include <linux/initrd.h>
37#include <linux/eisa.h>
38#ifdef CONFIG_MAGIC_SYSRQ
39#include <linux/sysrq.h>
40#include <linux/reboot.h>
41#endif
42#include <linux/notifier.h>
43#include <asm/setup.h>
44#include <asm/io.h>
45
46extern struct notifier_block *panic_notifier_list;
47static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
48static struct notifier_block alpha_panic_block = {
49 alpha_panic_event,
50 NULL,
51 INT_MAX /* try to do it first */
52};
53
54#include <asm/uaccess.h>
55#include <asm/pgtable.h>
56#include <asm/system.h>
57#include <asm/hwrpb.h>
58#include <asm/dma.h>
59#include <asm/io.h>
60#include <asm/mmu_context.h>
61#include <asm/console.h>
62
63#include "proto.h"
64#include "pci_impl.h"
65
66
67struct hwrpb_struct *hwrpb;
68unsigned long srm_hae;
69
70int alpha_l1i_cacheshape;
71int alpha_l1d_cacheshape;
72int alpha_l2_cacheshape;
73int alpha_l3_cacheshape;
74
75#ifdef CONFIG_VERBOSE_MCHECK
76/* 0=minimum, 1=verbose, 2=all */
77/* These can be overridden via the command line, ie "verbose_mcheck=2") */
78unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
79#endif
80
81/* Which processor we booted from. */
82int boot_cpuid;
83
84/*
85 * Using SRM callbacks for initial console output. This works from
86 * setup_arch() time through the end of time_init(), as those places
87 * are under our (Alpha) control.
88
89 * "srmcons" specified in the boot command arguments allows us to
90 * see kernel messages during the period of time before the true
91 * console device is "registered" during console_init().
92 * As of this version (2.5.59), console_init() will call
93 * disable_early_printk() as the last action before initializing
94 * the console drivers. That's the last possible time srmcons can be
95 * unregistered without interfering with console behavior.
96 *
97 * By default, OFF; set it with a bootcommand arg of "srmcons" or
98 * "console=srm". The meaning of these two args is:
99 * "srmcons" - early callback prints
100 * "console=srm" - full callback based console, including early prints
101 */
102int srmcons_output = 0;
103
104/* Enforce a memory size limit; useful for testing. By default, none. */
105unsigned long mem_size_limit = 0;
106
107/* Set AGP GART window size (0 means disabled). */
108unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
109
110#ifdef CONFIG_ALPHA_GENERIC
111struct alpha_machine_vector alpha_mv;
112int alpha_using_srm;
113#endif
114
115#define N(a) (sizeof(a)/sizeof(a[0]))
116
117static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
118 unsigned long);
119static struct alpha_machine_vector *get_sysvec_byname(const char *);
120static void get_sysnames(unsigned long, unsigned long, unsigned long,
121 char **, char **);
122static void determine_cpu_caches (unsigned int);
123
124static char command_line[COMMAND_LINE_SIZE];
125
126/*
127 * The format of "screen_info" is strange, and due to early
128 * i386-setup code. This is just enough to make the console
129 * code think we're on a VGA color display.
130 */
131
132struct screen_info screen_info = {
133 .orig_x = 0,
134 .orig_y = 25,
135 .orig_video_cols = 80,
136 .orig_video_lines = 25,
137 .orig_video_isVGA = 1,
138 .orig_video_points = 16
139};
140
141/*
142 * The direct map I/O window, if any. This should be the same
143 * for all busses, since it's used by virt_to_bus.
144 */
145
146unsigned long __direct_map_base;
147unsigned long __direct_map_size;
148
149/*
150 * Declare all of the machine vectors.
151 */
152
153/* GCC 2.7.2 (on alpha at least) is lame. It does not support either
154 __attribute__((weak)) or #pragma weak. Bypass it and talk directly
155 to the assembler. */
156
157#define WEAK(X) \
158 extern struct alpha_machine_vector X; \
159 asm(".weak "#X)
160
161WEAK(alcor_mv);
162WEAK(alphabook1_mv);
163WEAK(avanti_mv);
164WEAK(cabriolet_mv);
165WEAK(clipper_mv);
166WEAK(dp264_mv);
167WEAK(eb164_mv);
168WEAK(eb64p_mv);
169WEAK(eb66_mv);
170WEAK(eb66p_mv);
171WEAK(eiger_mv);
172WEAK(jensen_mv);
173WEAK(lx164_mv);
174WEAK(lynx_mv);
175WEAK(marvel_ev7_mv);
176WEAK(miata_mv);
177WEAK(mikasa_mv);
178WEAK(mikasa_primo_mv);
179WEAK(monet_mv);
180WEAK(nautilus_mv);
181WEAK(noname_mv);
182WEAK(noritake_mv);
183WEAK(noritake_primo_mv);
184WEAK(p2k_mv);
185WEAK(pc164_mv);
186WEAK(privateer_mv);
187WEAK(rawhide_mv);
188WEAK(ruffian_mv);
189WEAK(rx164_mv);
190WEAK(sable_mv);
191WEAK(sable_gamma_mv);
192WEAK(shark_mv);
193WEAK(sx164_mv);
194WEAK(takara_mv);
195WEAK(titan_mv);
196WEAK(webbrick_mv);
197WEAK(wildfire_mv);
198WEAK(xl_mv);
199WEAK(xlt_mv);
200
201#undef WEAK
202
203/*
204 * I/O resources inherited from PeeCees. Except for perhaps the
205 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
206 *
207 * ??? If this becomes less standard, move the struct out into the
208 * machine vector.
209 */
210
211static void __init
212reserve_std_resources(void)
213{
214 static struct resource standard_io_resources[] = {
215 { .name = "rtc", .start = -1, .end = -1 },
216 { .name = "dma1", .start = 0x00, .end = 0x1f },
217 { .name = "pic1", .start = 0x20, .end = 0x3f },
218 { .name = "timer", .start = 0x40, .end = 0x5f },
219 { .name = "keyboard", .start = 0x60, .end = 0x6f },
220 { .name = "dma page reg", .start = 0x80, .end = 0x8f },
221 { .name = "pic2", .start = 0xa0, .end = 0xbf },
222 { .name = "dma2", .start = 0xc0, .end = 0xdf },
223 };
224
225 struct resource *io = &ioport_resource;
226 size_t i;
227
228 if (hose_head) {
229 struct pci_controller *hose;
230 for (hose = hose_head; hose; hose = hose->next)
231 if (hose->index == 0) {
232 io = hose->io_space;
233 break;
234 }
235 }
236
237 /* Fix up for the Jensen's queer RTC placement. */
238 standard_io_resources[0].start = RTC_PORT(0);
239 standard_io_resources[0].end = RTC_PORT(0) + 0x10;
240
241 for (i = 0; i < N(standard_io_resources); ++i)
242 request_resource(io, standard_io_resources+i);
243}
244
245#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
246#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
247#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
248#define PFN_MAX PFN_DOWN(0x80000000)
249#define for_each_mem_cluster(memdesc, cluster, i) \
250 for ((cluster) = (memdesc)->cluster, (i) = 0; \
251 (i) < (memdesc)->numclusters; (i)++, (cluster)++)
252
253static unsigned long __init
254get_mem_size_limit(char *s)
255{
256 unsigned long end = 0;
257 char *from = s;
258
259 end = simple_strtoul(from, &from, 0);
260 if ( *from == 'K' || *from == 'k' ) {
261 end = end << 10;
262 from++;
263 } else if ( *from == 'M' || *from == 'm' ) {
264 end = end << 20;
265 from++;
266 } else if ( *from == 'G' || *from == 'g' ) {
267 end = end << 30;
268 from++;
269 }
270 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
271}
272
273#ifdef CONFIG_BLK_DEV_INITRD
274void * __init
275move_initrd(unsigned long mem_limit)
276{
277 void *start;
278 unsigned long size;
279
280 size = initrd_end - initrd_start;
281 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
282 if (!start || __pa(start) + size > mem_limit) {
283 initrd_start = initrd_end = 0;
284 return NULL;
285 }
286 memmove(start, (void *)initrd_start, size);
287 initrd_start = (unsigned long)start;
288 initrd_end = initrd_start + size;
289 printk("initrd moved to %p\n", start);
290 return start;
291}
292#endif
293
294#ifndef CONFIG_DISCONTIGMEM
295static void __init
296setup_memory(void *kernel_end)
297{
298 struct memclust_struct * cluster;
299 struct memdesc_struct * memdesc;
300 unsigned long start_kernel_pfn, end_kernel_pfn;
301 unsigned long bootmap_size, bootmap_pages, bootmap_start;
302 unsigned long start, end;
303 unsigned long i;
304
305 /* Find free clusters, and init and free the bootmem accordingly. */
306 memdesc = (struct memdesc_struct *)
307 (hwrpb->mddt_offset + (unsigned long) hwrpb);
308
309 for_each_mem_cluster(memdesc, cluster, i) {
310 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
311 i, cluster->usage, cluster->start_pfn,
312 cluster->start_pfn + cluster->numpages);
313
314 /* Bit 0 is console/PALcode reserved. Bit 1 is
315 non-volatile memory -- we might want to mark
316 this for later. */
317 if (cluster->usage & 3)
318 continue;
319
320 end = cluster->start_pfn + cluster->numpages;
321 if (end > max_low_pfn)
322 max_low_pfn = end;
323 }
324
325 /*
326 * Except for the NUMA systems (wildfire, marvel) all of the
327 * Alpha systems we run on support 32GB of memory or less.
328 * Since the NUMA systems introduce large holes in memory addressing,
329 * we can get into a situation where there is not enough contiguous
330 * memory for the memory map.
331 *
332 * Limit memory to the first 32GB to limit the NUMA systems to
333 * memory on their first node (wildfire) or 2 (marvel) to avoid
334 * not being able to produce the memory map. In order to access
335 * all of the memory on the NUMA systems, build with discontiguous
336 * memory support.
337 *
338 * If the user specified a memory limit, let that memory limit stand.
339 */
340 if (!mem_size_limit)
341 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
342
343 if (mem_size_limit && max_low_pfn >= mem_size_limit)
344 {
345 printk("setup: forcing memory size to %ldK (from %ldK).\n",
346 mem_size_limit << (PAGE_SHIFT - 10),
347 max_low_pfn << (PAGE_SHIFT - 10));
348 max_low_pfn = mem_size_limit;
349 }
350
351 /* Find the bounds of kernel memory. */
352 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
353 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
354 bootmap_start = -1;
355
356 try_again:
357 if (max_low_pfn <= end_kernel_pfn)
358 panic("not enough memory to boot");
359
360 /* We need to know how many physically contiguous pages
361 we'll need for the bootmap. */
362 bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
363
364 /* Now find a good region where to allocate the bootmap. */
365 for_each_mem_cluster(memdesc, cluster, i) {
366 if (cluster->usage & 3)
367 continue;
368
369 start = cluster->start_pfn;
370 end = start + cluster->numpages;
371 if (start >= max_low_pfn)
372 continue;
373 if (end > max_low_pfn)
374 end = max_low_pfn;
375 if (start < start_kernel_pfn) {
376 if (end > end_kernel_pfn
377 && end - end_kernel_pfn >= bootmap_pages) {
378 bootmap_start = end_kernel_pfn;
379 break;
380 } else if (end > start_kernel_pfn)
381 end = start_kernel_pfn;
382 } else if (start < end_kernel_pfn)
383 start = end_kernel_pfn;
384 if (end - start >= bootmap_pages) {
385 bootmap_start = start;
386 break;
387 }
388 }
389
390 if (bootmap_start == ~0UL) {
391 max_low_pfn >>= 1;
392 goto try_again;
393 }
394
395 /* Allocate the bootmap and mark the whole MM as reserved. */
396 bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
397
398 /* Mark the free regions. */
399 for_each_mem_cluster(memdesc, cluster, i) {
400 if (cluster->usage & 3)
401 continue;
402
403 start = cluster->start_pfn;
404 end = cluster->start_pfn + cluster->numpages;
405 if (start >= max_low_pfn)
406 continue;
407 if (end > max_low_pfn)
408 end = max_low_pfn;
409 if (start < start_kernel_pfn) {
410 if (end > end_kernel_pfn) {
411 free_bootmem(PFN_PHYS(start),
412 (PFN_PHYS(start_kernel_pfn)
413 - PFN_PHYS(start)));
414 printk("freeing pages %ld:%ld\n",
415 start, start_kernel_pfn);
416 start = end_kernel_pfn;
417 } else if (end > start_kernel_pfn)
418 end = start_kernel_pfn;
419 } else if (start < end_kernel_pfn)
420 start = end_kernel_pfn;
421 if (start >= end)
422 continue;
423
424 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
425 printk("freeing pages %ld:%ld\n", start, end);
426 }
427
428 /* Reserve the bootmap memory. */
429 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
430 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
431
432#ifdef CONFIG_BLK_DEV_INITRD
433 initrd_start = INITRD_START;
434 if (initrd_start) {
435 initrd_end = initrd_start+INITRD_SIZE;
436 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
437 (void *) initrd_start, INITRD_SIZE);
438
439 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
440 if (!move_initrd(PFN_PHYS(max_low_pfn)))
441 printk("initrd extends beyond end of memory "
442 "(0x%08lx > 0x%p)\ndisabling initrd\n",
443 initrd_end,
444 phys_to_virt(PFN_PHYS(max_low_pfn)));
445 } else {
446 reserve_bootmem(virt_to_phys((void *)initrd_start),
447 INITRD_SIZE);
448 }
449 }
450#endif /* CONFIG_BLK_DEV_INITRD */
451}
452#else
453extern void setup_memory(void *);
454#endif /* !CONFIG_DISCONTIGMEM */
455
456int __init
457page_is_ram(unsigned long pfn)
458{
459 struct memclust_struct * cluster;
460 struct memdesc_struct * memdesc;
461 unsigned long i;
462
463 memdesc = (struct memdesc_struct *)
464 (hwrpb->mddt_offset + (unsigned long) hwrpb);
465 for_each_mem_cluster(memdesc, cluster, i)
466 {
467 if (pfn >= cluster->start_pfn &&
468 pfn < cluster->start_pfn + cluster->numpages) {
469 return (cluster->usage & 3) ? 0 : 1;
470 }
471 }
472
473 return 0;
474}
475
476#undef PFN_UP
477#undef PFN_DOWN
478#undef PFN_PHYS
479#undef PFN_MAX
480
481void __init
482setup_arch(char **cmdline_p)
483{
484 extern char _end[];
485
486 struct alpha_machine_vector *vec = NULL;
487 struct percpu_struct *cpu;
488 char *type_name, *var_name, *p;
489 void *kernel_end = _end; /* end of kernel */
490 char *args = command_line;
491
492 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
493 boot_cpuid = hard_smp_processor_id();
494
495 /*
496 * Pre-process the system type to make sure it will be valid.
497 *
498 * This may restore real CABRIO and EB66+ family names, ie
499 * EB64+ and EB66.
500 *
501 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
502 * and AS1200 (DIGITAL Server 5000 series) have the type as
503 * the negative of the real one.
504 */
505 if ((long)hwrpb->sys_type < 0) {
506 hwrpb->sys_type = -((long)hwrpb->sys_type);
507 hwrpb_update_checksum(hwrpb);
508 }
509
510 /* Register a call for panic conditions. */
511 notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
512
513#ifdef CONFIG_ALPHA_GENERIC
514 /* Assume that we've booted from SRM if we haven't booted from MILO.
515 Detect the later by looking for "MILO" in the system serial nr. */
516 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
517#endif
518
519 /* If we are using SRM, we want to allow callbacks
520 as early as possible, so do this NOW, and then
521 they should work immediately thereafter.
522 */
523 kernel_end = callback_init(kernel_end);
524
525 /*
526 * Locate the command line.
527 */
528 /* Hack for Jensen... since we're restricted to 8 or 16 chars for
529 boot flags depending on the boot mode, we need some shorthand.
530 This should do for installation. */
531 if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
532 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
533 } else {
534 strlcpy(command_line, COMMAND_LINE, sizeof command_line);
535 }
536 strcpy(saved_command_line, command_line);
537 *cmdline_p = command_line;
538
539 /*
540 * Process command-line arguments.
541 */
542 while ((p = strsep(&args, " \t")) != NULL) {
543 if (!*p) continue;
544 if (strncmp(p, "alpha_mv=", 9) == 0) {
545 vec = get_sysvec_byname(p+9);
546 continue;
547 }
548 if (strncmp(p, "cycle=", 6) == 0) {
549 est_cycle_freq = simple_strtol(p+6, NULL, 0);
550 continue;
551 }
552 if (strncmp(p, "mem=", 4) == 0) {
553 mem_size_limit = get_mem_size_limit(p+4);
554 continue;
555 }
556 if (strncmp(p, "srmcons", 7) == 0) {
557 srmcons_output |= 1;
558 continue;
559 }
560 if (strncmp(p, "console=srm", 11) == 0) {
561 srmcons_output |= 2;
562 continue;
563 }
564 if (strncmp(p, "gartsize=", 9) == 0) {
565 alpha_agpgart_size =
566 get_mem_size_limit(p+9) << PAGE_SHIFT;
567 continue;
568 }
569#ifdef CONFIG_VERBOSE_MCHECK
570 if (strncmp(p, "verbose_mcheck=", 15) == 0) {
571 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
572 continue;
573 }
574#endif
575 }
576
577 /* Replace the command line, now that we've killed it with strsep. */
578 strcpy(command_line, saved_command_line);
579
580 /* If we want SRM console printk echoing early, do it now. */
581 if (alpha_using_srm && srmcons_output) {
582 register_srm_console();
583
584 /*
585 * If "console=srm" was specified, clear the srmcons_output
586 * flag now so that time.c won't unregister_srm_console
587 */
588 if (srmcons_output & 2)
589 srmcons_output = 0;
590 }
591
592#ifdef CONFIG_MAGIC_SYSRQ
593 /* If we're using SRM, make sysrq-b halt back to the prom,
594 not auto-reboot. */
595 if (alpha_using_srm) {
596 struct sysrq_key_op *op = __sysrq_get_key_op('b');
597 op->handler = (void *) machine_halt;
598 }
599#endif
600
601 /*
602 * Identify and reconfigure for the current system.
603 */
604 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
605
606 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
607 cpu->type, &type_name, &var_name);
608 if (*var_name == '0')
609 var_name = "";
610
611 if (!vec) {
612 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
613 cpu->type);
614 }
615
616 if (!vec) {
617 panic("Unsupported system type: %s%s%s (%ld %ld)\n",
618 type_name, (*var_name ? " variation " : ""), var_name,
619 hwrpb->sys_type, hwrpb->sys_variation);
620 }
621 if (vec != &alpha_mv) {
622 alpha_mv = *vec;
623 }
624
625 printk("Booting "
626#ifdef CONFIG_ALPHA_GENERIC
627 "GENERIC "
628#endif
629 "on %s%s%s using machine vector %s from %s\n",
630 type_name, (*var_name ? " variation " : ""),
631 var_name, alpha_mv.vector_name,
632 (alpha_using_srm ? "SRM" : "MILO"));
633
634 printk("Major Options: "
635#ifdef CONFIG_SMP
636 "SMP "
637#endif
638#ifdef CONFIG_ALPHA_EV56
639 "EV56 "
640#endif
641#ifdef CONFIG_ALPHA_EV67
642 "EV67 "
643#endif
644#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
645 "LEGACY_START "
646#endif
647#ifdef CONFIG_VERBOSE_MCHECK
648 "VERBOSE_MCHECK "
649#endif
650
651#ifdef CONFIG_DISCONTIGMEM
652 "DISCONTIGMEM "
653#ifdef CONFIG_NUMA
654 "NUMA "
655#endif
656#endif
657
658#ifdef CONFIG_DEBUG_SPINLOCK
659 "DEBUG_SPINLOCK "
660#endif
661#ifdef CONFIG_MAGIC_SYSRQ
662 "MAGIC_SYSRQ "
663#endif
664 "\n");
665
666 printk("Command line: %s\n", command_line);
667
668 /*
669 * Sync up the HAE.
670 * Save the SRM's current value for restoration.
671 */
672 srm_hae = *alpha_mv.hae_register;
673 __set_hae(alpha_mv.hae_cache);
674
675 /* Reset enable correctable error reports. */
676 wrmces(0x7);
677
678 /* Find our memory. */
679 setup_memory(kernel_end);
680
681 /* First guess at cpu cache sizes. Do this before init_arch. */
682 determine_cpu_caches(cpu->type);
683
684 /* Initialize the machine. Usually has to do with setting up
685 DMA windows and the like. */
686 if (alpha_mv.init_arch)
687 alpha_mv.init_arch();
688
689 /* Reserve standard resources. */
690 reserve_std_resources();
691
692 /*
693 * Give us a default console. TGA users will see nothing until
694 * chr_dev_init is called, rather late in the boot sequence.
695 */
696
697#ifdef CONFIG_VT
698#if defined(CONFIG_VGA_CONSOLE)
699 conswitchp = &vga_con;
700#elif defined(CONFIG_DUMMY_CONSOLE)
701 conswitchp = &dummy_con;
702#endif
703#endif
704
705 /* Default root filesystem to sda2. */
706 ROOT_DEV = Root_SDA2;
707
708#ifdef CONFIG_EISA
709 /* FIXME: only set this when we actually have EISA in this box? */
710 EISA_bus = 1;
711#endif
712
713 /*
714 * Check ASN in HWRPB for validity, report if bad.
715 * FIXME: how was this failing? Should we trust it instead,
716 * and copy the value into alpha_mv.max_asn?
717 */
718
719 if (hwrpb->max_asn != MAX_ASN) {
720 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
721 }
722
723 /*
724 * Identify the flock of penguins.
725 */
726
727#ifdef CONFIG_SMP
728 setup_smp();
729#endif
730 paging_init();
731}
732
733void __init
734disable_early_printk(void)
735{
736 if (alpha_using_srm && srmcons_output) {
737 unregister_srm_console();
738 srmcons_output = 0;
739 }
740}
741
742static char sys_unknown[] = "Unknown";
743static char systype_names[][16] = {
744 "0",
745 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
746 "Pelican", "Morgan", "Sable", "Medulla", "Noname",
747 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
748 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
749 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
750 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
751 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
752};
753
754static char unofficial_names[][8] = {"100", "Ruffian"};
755
756static char api_names[][16] = {"200", "Nautilus"};
757
758static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
759static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
760
761static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
762static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
763
764static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
765static int eb64p_indices[] = {0,0,1,2};
766
767static char eb66_names[][8] = {"EB66", "EB66+"};
768static int eb66_indices[] = {0,0,1};
769
770static char marvel_names[][16] = {
771 "Marvel/EV7"
772};
773static int marvel_indices[] = { 0 };
774
775static char rawhide_names[][16] = {
776 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
777};
778static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
779
780static char titan_names[][16] = {
781 "DEFAULT", "Privateer", "Falcon", "Granite"
782};
783static int titan_indices[] = {0,1,2,2,3};
784
785static char tsunami_names[][16] = {
786 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
787 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
788 "Flying Clipper", "Shark"
789};
790static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
791
792static struct alpha_machine_vector * __init
793get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
794{
795 static struct alpha_machine_vector *systype_vecs[] __initdata =
796 {
797 NULL, /* 0 */
798 NULL, /* ADU */
799 NULL, /* Cobra */
800 NULL, /* Ruby */
801 NULL, /* Flamingo */
802 NULL, /* Mannequin */
803 &jensen_mv,
804 NULL, /* Pelican */
805 NULL, /* Morgan */
806 NULL, /* Sable -- see below. */
807 NULL, /* Medulla */
808 &noname_mv,
809 NULL, /* Turbolaser */
810 &avanti_mv,
811 NULL, /* Mustang */
812 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
813 NULL, /* Tradewind */
814 NULL, /* Mikasa -- see below. */
815 NULL, /* EB64 */
816 NULL, /* EB66 -- see variation. */
817 NULL, /* EB64+ -- see variation. */
818 &alphabook1_mv,
819 &rawhide_mv,
820 NULL, /* K2 */
821 &lynx_mv, /* Lynx */
822 &xl_mv,
823 NULL, /* EB164 -- see variation. */
824 NULL, /* Noritake -- see below. */
825 NULL, /* Cortex */
826 NULL, /* 29 */
827 &miata_mv,
828 NULL, /* XXM */
829 &takara_mv,
830 NULL, /* Yukon */
831 NULL, /* Tsunami -- see variation. */
832 &wildfire_mv, /* Wildfire */
833 NULL, /* CUSCO */
834 &eiger_mv, /* Eiger */
835 NULL, /* Titan */
836 NULL, /* Marvel */
837 };
838
839 static struct alpha_machine_vector *unofficial_vecs[] __initdata =
840 {
841 NULL, /* 100 */
842 &ruffian_mv,
843 };
844
845 static struct alpha_machine_vector *api_vecs[] __initdata =
846 {
847 NULL, /* 200 */
848 &nautilus_mv,
849 };
850
851 static struct alpha_machine_vector *alcor_vecs[] __initdata =
852 {
853 &alcor_mv, &xlt_mv, &xlt_mv
854 };
855
856 static struct alpha_machine_vector *eb164_vecs[] __initdata =
857 {
858 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
859 };
860
861 static struct alpha_machine_vector *eb64p_vecs[] __initdata =
862 {
863 &eb64p_mv,
864 &cabriolet_mv,
865 &cabriolet_mv /* AlphaPCI64 */
866 };
867
868 static struct alpha_machine_vector *eb66_vecs[] __initdata =
869 {
870 &eb66_mv,
871 &eb66p_mv
872 };
873
874 static struct alpha_machine_vector *marvel_vecs[] __initdata =
875 {
876 &marvel_ev7_mv,
877 };
878
879 static struct alpha_machine_vector *titan_vecs[] __initdata =
880 {
881 &titan_mv, /* default */
882 &privateer_mv, /* privateer */
883 &titan_mv, /* falcon */
884 &privateer_mv, /* granite */
885 };
886
887 static struct alpha_machine_vector *tsunami_vecs[] __initdata =
888 {
889 NULL,
890 &dp264_mv, /* dp264 */
891 &dp264_mv, /* warhol */
892 &dp264_mv, /* windjammer */
893 &monet_mv, /* monet */
894 &clipper_mv, /* clipper */
895 &dp264_mv, /* goldrush */
896 &webbrick_mv, /* webbrick */
897 &dp264_mv, /* catamaran */
898 NULL, /* brisbane? */
899 NULL, /* melbourne? */
900 NULL, /* flying clipper? */
901 &shark_mv, /* shark */
902 };
903
904 /* ??? Do we need to distinguish between Rawhides? */
905
906 struct alpha_machine_vector *vec;
907
908 /* Search the system tables first... */
909 vec = NULL;
910 if (type < N(systype_vecs)) {
911 vec = systype_vecs[type];
912 } else if ((type > ST_API_BIAS) &&
913 (type - ST_API_BIAS) < N(api_vecs)) {
914 vec = api_vecs[type - ST_API_BIAS];
915 } else if ((type > ST_UNOFFICIAL_BIAS) &&
916 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
917 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
918 }
919
920 /* If we've not found one, try for a variation. */
921
922 if (!vec) {
923 /* Member ID is a bit-field. */
924 unsigned long member = (variation >> 10) & 0x3f;
925
926 cpu &= 0xffffffff; /* make it usable */
927
928 switch (type) {
929 case ST_DEC_ALCOR:
930 if (member < N(alcor_indices))
931 vec = alcor_vecs[alcor_indices[member]];
932 break;
933 case ST_DEC_EB164:
934 if (member < N(eb164_indices))
935 vec = eb164_vecs[eb164_indices[member]];
936 /* PC164 may show as EB164 variation with EV56 CPU,
937 but, since no true EB164 had anything but EV5... */
938 if (vec == &eb164_mv && cpu == EV56_CPU)
939 vec = &pc164_mv;
940 break;
941 case ST_DEC_EB64P:
942 if (member < N(eb64p_indices))
943 vec = eb64p_vecs[eb64p_indices[member]];
944 break;
945 case ST_DEC_EB66:
946 if (member < N(eb66_indices))
947 vec = eb66_vecs[eb66_indices[member]];
948 break;
949 case ST_DEC_MARVEL:
950 if (member < N(marvel_indices))
951 vec = marvel_vecs[marvel_indices[member]];
952 break;
953 case ST_DEC_TITAN:
954 vec = titan_vecs[0]; /* default */
955 if (member < N(titan_indices))
956 vec = titan_vecs[titan_indices[member]];
957 break;
958 case ST_DEC_TSUNAMI:
959 if (member < N(tsunami_indices))
960 vec = tsunami_vecs[tsunami_indices[member]];
961 break;
962 case ST_DEC_1000:
963 if (cpu == EV5_CPU || cpu == EV56_CPU)
964 vec = &mikasa_primo_mv;
965 else
966 vec = &mikasa_mv;
967 break;
968 case ST_DEC_NORITAKE:
969 if (cpu == EV5_CPU || cpu == EV56_CPU)
970 vec = &noritake_primo_mv;
971 else
972 vec = &noritake_mv;
973 break;
974 case ST_DEC_2100_A500:
975 if (cpu == EV5_CPU || cpu == EV56_CPU)
976 vec = &sable_gamma_mv;
977 else
978 vec = &sable_mv;
979 break;
980 }
981 }
982 return vec;
983}
984
985static struct alpha_machine_vector * __init
986get_sysvec_byname(const char *name)
987{
988 static struct alpha_machine_vector *all_vecs[] __initdata =
989 {
990 &alcor_mv,
991 &alphabook1_mv,
992 &avanti_mv,
993 &cabriolet_mv,
994 &clipper_mv,
995 &dp264_mv,
996 &eb164_mv,
997 &eb64p_mv,
998 &eb66_mv,
999 &eb66p_mv,
1000 &eiger_mv,
1001 &jensen_mv,
1002 &lx164_mv,
1003 &lynx_mv,
1004 &miata_mv,
1005 &mikasa_mv,
1006 &mikasa_primo_mv,
1007 &monet_mv,
1008 &nautilus_mv,
1009 &noname_mv,
1010 &noritake_mv,
1011 &noritake_primo_mv,
1012 &p2k_mv,
1013 &pc164_mv,
1014 &privateer_mv,
1015 &rawhide_mv,
1016 &ruffian_mv,
1017 &rx164_mv,
1018 &sable_mv,
1019 &sable_gamma_mv,
1020 &shark_mv,
1021 &sx164_mv,
1022 &takara_mv,
1023 &webbrick_mv,
1024 &wildfire_mv,
1025 &xl_mv,
1026 &xlt_mv
1027 };
1028
1029 size_t i;
1030
1031 for (i = 0; i < N(all_vecs); ++i) {
1032 struct alpha_machine_vector *mv = all_vecs[i];
1033 if (strcasecmp(mv->vector_name, name) == 0)
1034 return mv;
1035 }
1036 return NULL;
1037}
1038
1039static void
1040get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1041 char **type_name, char **variation_name)
1042{
1043 unsigned long member;
1044
1045 /* If not in the tables, make it UNKNOWN,
1046 else set type name to family */
1047 if (type < N(systype_names)) {
1048 *type_name = systype_names[type];
1049 } else if ((type > ST_API_BIAS) &&
1050 (type - ST_API_BIAS) < N(api_names)) {
1051 *type_name = api_names[type - ST_API_BIAS];
1052 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1053 (type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
1054 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1055 } else {
1056 *type_name = sys_unknown;
1057 *variation_name = sys_unknown;
1058 return;
1059 }
1060
1061 /* Set variation to "0"; if variation is zero, done. */
1062 *variation_name = systype_names[0];
1063 if (variation == 0) {
1064 return;
1065 }
1066
1067 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1068
1069 cpu &= 0xffffffff; /* make it usable */
1070
1071 switch (type) { /* select by family */
1072 default: /* default to variation "0" for now */
1073 break;
1074 case ST_DEC_EB164:
1075 if (member < N(eb164_indices))
1076 *variation_name = eb164_names[eb164_indices[member]];
1077 /* PC164 may show as EB164 variation, but with EV56 CPU,
1078 so, since no true EB164 had anything but EV5... */
1079 if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1080 *variation_name = eb164_names[1]; /* make it PC164 */
1081 break;
1082 case ST_DEC_ALCOR:
1083 if (member < N(alcor_indices))
1084 *variation_name = alcor_names[alcor_indices[member]];
1085 break;
1086 case ST_DEC_EB64P:
1087 if (member < N(eb64p_indices))
1088 *variation_name = eb64p_names[eb64p_indices[member]];
1089 break;
1090 case ST_DEC_EB66:
1091 if (member < N(eb66_indices))
1092 *variation_name = eb66_names[eb66_indices[member]];
1093 break;
1094 case ST_DEC_MARVEL:
1095 if (member < N(marvel_indices))
1096 *variation_name = marvel_names[marvel_indices[member]];
1097 break;
1098 case ST_DEC_RAWHIDE:
1099 if (member < N(rawhide_indices))
1100 *variation_name = rawhide_names[rawhide_indices[member]];
1101 break;
1102 case ST_DEC_TITAN:
1103 *variation_name = titan_names[0]; /* default */
1104 if (member < N(titan_indices))
1105 *variation_name = titan_names[titan_indices[member]];
1106 break;
1107 case ST_DEC_TSUNAMI:
1108 if (member < N(tsunami_indices))
1109 *variation_name = tsunami_names[tsunami_indices[member]];
1110 break;
1111 }
1112}
1113
1114/*
1115 * A change was made to the HWRPB via an ECO and the following code
1116 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
1117 * was not implemented in the console firmware. If it's revision 5 or
1118 * greater we can get the name of the platform as an ASCII string from
1119 * the HWRPB. That's what this function does. It checks the revision
1120 * level and if the string is in the HWRPB it returns the address of
1121 * the string--a pointer to the name of the platform.
1122 *
1123 * Returns:
1124 * - Pointer to a ASCII string if it's in the HWRPB
1125 * - Pointer to a blank string if the data is not in the HWRPB.
1126 */
1127
1128static char *
1129platform_string(void)
1130{
1131 struct dsr_struct *dsr;
1132 static char unk_system_string[] = "N/A";
1133
1134 /* Go to the console for the string pointer.
1135 * If the rpb_vers is not 5 or greater the rpb
1136 * is old and does not have this data in it.
1137 */
1138 if (hwrpb->revision < 5)
1139 return (unk_system_string);
1140 else {
1141 /* The Dynamic System Recognition struct
1142 * has the system platform name starting
1143 * after the character count of the string.
1144 */
1145 dsr = ((struct dsr_struct *)
1146 ((char *)hwrpb + hwrpb->dsr_offset));
1147 return ((char *)dsr + (dsr->sysname_off +
1148 sizeof(long)));
1149 }
1150}
1151
1152static int
1153get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1154{
1155 struct percpu_struct *cpu;
1156 unsigned long i;
1157 int count = 0;
1158
1159 for (i = 0; i < num; i++) {
1160 cpu = (struct percpu_struct *)
1161 ((char *)cpubase + i*hwrpb->processor_size);
1162 if ((cpu->flags & 0x1cc) == 0x1cc)
1163 count++;
1164 }
1165 return count;
1166}
1167
1168static void
1169show_cache_size (struct seq_file *f, const char *which, int shape)
1170{
1171 if (shape == -1)
1172 seq_printf (f, "%s\t\t: n/a\n", which);
1173 else if (shape == 0)
1174 seq_printf (f, "%s\t\t: unknown\n", which);
1175 else
1176 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1177 which, shape >> 10, shape & 15,
1178 1 << ((shape >> 4) & 15));
1179}
1180
1181static int
1182show_cpuinfo(struct seq_file *f, void *slot)
1183{
1184 extern struct unaligned_stat {
1185 unsigned long count, va, pc;
1186 } unaligned[2];
1187
1188 static char cpu_names[][8] = {
1189 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1190 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1191 "EV68CX", "EV7", "EV79", "EV69"
1192 };
1193
1194 struct percpu_struct *cpu = slot;
1195 unsigned int cpu_index;
1196 char *cpu_name;
1197 char *systype_name;
1198 char *sysvariation_name;
1199 int nr_processors;
1200
1201 cpu_index = (unsigned) (cpu->type - 1);
1202 cpu_name = "Unknown";
1203 if (cpu_index < N(cpu_names))
1204 cpu_name = cpu_names[cpu_index];
1205
1206 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1207 cpu->type, &systype_name, &sysvariation_name);
1208
1209 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1210
1211 seq_printf(f, "cpu\t\t\t: Alpha\n"
1212 "cpu model\t\t: %s\n"
1213 "cpu variation\t\t: %ld\n"
1214 "cpu revision\t\t: %ld\n"
1215 "cpu serial number\t: %s\n"
1216 "system type\t\t: %s\n"
1217 "system variation\t: %s\n"
1218 "system revision\t\t: %ld\n"
1219 "system serial number\t: %s\n"
1220 "cycle frequency [Hz]\t: %lu %s\n"
1221 "timer frequency [Hz]\t: %lu.%02lu\n"
1222 "page size [bytes]\t: %ld\n"
1223 "phys. address bits\t: %ld\n"
1224 "max. addr. space #\t: %ld\n"
1225 "BogoMIPS\t\t: %lu.%02lu\n"
1226 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1227 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1228 "platform string\t\t: %s\n"
1229 "cpus detected\t\t: %d\n",
1230 cpu_name, cpu->variation, cpu->revision,
1231 (char*)cpu->serial_no,
1232 systype_name, sysvariation_name, hwrpb->sys_revision,
1233 (char*)hwrpb->ssn,
1234 est_cycle_freq ? : hwrpb->cycle_freq,
1235 est_cycle_freq ? "est." : "",
1236 hwrpb->intr_freq / 4096,
1237 (100 * hwrpb->intr_freq / 4096) % 100,
1238 hwrpb->pagesize,
1239 hwrpb->pa_bits,
1240 hwrpb->max_asn,
1241 loops_per_jiffy / (500000/HZ),
1242 (loops_per_jiffy / (5000/HZ)) % 100,
1243 unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1244 unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1245 platform_string(), nr_processors);
1246
1247#ifdef CONFIG_SMP
1248 seq_printf(f, "cpus active\t\t: %d\n"
1249 "cpu active mask\t\t: %016lx\n",
1250 num_online_cpus(), cpus_addr(cpu_possible_map)[0]);
1251#endif
1252
1253 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1254 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1255 show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1256 show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1257
1258 return 0;
1259}
1260
1261static int __init
1262read_mem_block(int *addr, int stride, int size)
1263{
1264 long nloads = size / stride, cnt, tmp;
1265
1266 __asm__ __volatile__(
1267 " rpcc %0\n"
1268 "1: ldl %3,0(%2)\n"
1269 " subq %1,1,%1\n"
1270 /* Next two XORs introduce an explicit data dependency between
1271 consecutive loads in the loop, which will give us true load
1272 latency. */
1273 " xor %3,%2,%2\n"
1274 " xor %3,%2,%2\n"
1275 " addq %2,%4,%2\n"
1276 " bne %1,1b\n"
1277 " rpcc %3\n"
1278 " subl %3,%0,%0\n"
1279 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1280 : "r" (stride), "1" (nloads), "2" (addr));
1281
1282 return cnt / (size / stride);
1283}
1284
1285#define CSHAPE(totalsize, linesize, assoc) \
1286 ((totalsize & ~0xff) | (linesize << 4) | assoc)
1287
1288/* ??? EV5 supports up to 64M, but did the systems with more than
1289 16M of BCACHE ever exist? */
1290#define MAX_BCACHE_SIZE 16*1024*1024
1291
1292/* Note that the offchip caches are direct mapped on all Alphas. */
1293static int __init
1294external_cache_probe(int minsize, int width)
1295{
1296 int cycles, prev_cycles = 1000000;
1297 int stride = 1 << width;
1298 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1299
1300 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1301 maxsize = 1 << (floor_log2(max_low_pfn + 1) + PAGE_SHIFT);
1302
1303 /* Get the first block cached. */
1304 read_mem_block(__va(0), stride, size);
1305
1306 while (size < maxsize) {
1307 /* Get an average load latency in cycles. */
1308 cycles = read_mem_block(__va(0), stride, size);
1309 if (cycles > prev_cycles * 2) {
1310 /* Fine, we exceed the cache. */
1311 printk("%ldK Bcache detected; load hit latency %d "
1312 "cycles, load miss latency %d cycles\n",
1313 size >> 11, prev_cycles, cycles);
1314 return CSHAPE(size >> 1, width, 1);
1315 }
1316 /* Try to get the next block cached. */
1317 read_mem_block(__va(size), stride, size);
1318 prev_cycles = cycles;
1319 size <<= 1;
1320 }
1321 return -1; /* No BCACHE found. */
1322}
1323
1324static void __init
1325determine_cpu_caches (unsigned int cpu_type)
1326{
1327 int L1I, L1D, L2, L3;
1328
1329 switch (cpu_type) {
1330 case EV4_CPU:
1331 case EV45_CPU:
1332 {
1333 if (cpu_type == EV4_CPU)
1334 L1I = CSHAPE(8*1024, 5, 1);
1335 else
1336 L1I = CSHAPE(16*1024, 5, 1);
1337 L1D = L1I;
1338 L3 = -1;
1339
1340 /* BIU_CTL is a write-only Abox register. PALcode has a
1341 shadow copy, and may be available from some versions
1342 of the CSERVE PALcall. If we can get it, then
1343
1344 unsigned long biu_ctl, size;
1345 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1346 L2 = CSHAPE (size, 5, 1);
1347
1348 Unfortunately, we can't rely on that.
1349 */
1350 L2 = external_cache_probe(128*1024, 5);
1351 break;
1352 }
1353
1354 case LCA4_CPU:
1355 {
1356 unsigned long car, size;
1357
1358 L1I = L1D = CSHAPE(8*1024, 5, 1);
1359 L3 = -1;
1360
1361 car = *(vuip) phys_to_virt (0x120000078UL);
1362 size = 64*1024 * (1 << ((car >> 5) & 7));
1363 /* No typo -- 8 byte cacheline size. Whodathunk. */
1364 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1365 break;
1366 }
1367
1368 case EV5_CPU:
1369 case EV56_CPU:
1370 {
1371 unsigned long sc_ctl, width;
1372
1373 L1I = L1D = CSHAPE(8*1024, 5, 1);
1374
1375 /* Check the line size of the Scache. */
1376 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1377 width = sc_ctl & 0x1000 ? 6 : 5;
1378 L2 = CSHAPE (96*1024, width, 3);
1379
1380 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
1381 has a shadow copy, and may be available from some versions
1382 of the CSERVE PALcall. If we can get it, then
1383
1384 unsigned long bc_control, bc_config, size;
1385 size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1386 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1387
1388 Unfortunately, we can't rely on that.
1389 */
1390 L3 = external_cache_probe(1024*1024, width);
1391 break;
1392 }
1393
1394 case PCA56_CPU:
1395 case PCA57_CPU:
1396 {
1397 unsigned long cbox_config, size;
1398
1399 if (cpu_type == PCA56_CPU) {
1400 L1I = CSHAPE(16*1024, 6, 1);
1401 L1D = CSHAPE(8*1024, 5, 1);
1402 } else {
1403 L1I = CSHAPE(32*1024, 6, 2);
1404 L1D = CSHAPE(16*1024, 5, 1);
1405 }
1406 L3 = -1;
1407
1408 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1409 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1410
1411#if 0
1412 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1413#else
1414 L2 = external_cache_probe(512*1024, 6);
1415#endif
1416 break;
1417 }
1418
1419 case EV6_CPU:
1420 case EV67_CPU:
1421 case EV68CB_CPU:
1422 case EV68AL_CPU:
1423 case EV68CX_CPU:
1424 case EV69_CPU:
1425 L1I = L1D = CSHAPE(64*1024, 6, 2);
1426 L2 = external_cache_probe(1024*1024, 6);
1427 L3 = -1;
1428 break;
1429
1430 case EV7_CPU:
1431 case EV79_CPU:
1432 L1I = L1D = CSHAPE(64*1024, 6, 2);
1433 L2 = CSHAPE(7*1024*1024/4, 6, 7);
1434 L3 = -1;
1435 break;
1436
1437 default:
1438 /* Nothing known about this cpu type. */
1439 L1I = L1D = L2 = L3 = 0;
1440 break;
1441 }
1442
1443 alpha_l1i_cacheshape = L1I;
1444 alpha_l1d_cacheshape = L1D;
1445 alpha_l2_cacheshape = L2;
1446 alpha_l3_cacheshape = L3;
1447}
1448
1449/*
1450 * We show only CPU #0 info.
1451 */
1452static void *
1453c_start(struct seq_file *f, loff_t *pos)
1454{
1455 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1456}
1457
1458static void *
1459c_next(struct seq_file *f, void *v, loff_t *pos)
1460{
1461 return NULL;
1462}
1463
1464static void
1465c_stop(struct seq_file *f, void *v)
1466{
1467}
1468
1469struct seq_operations cpuinfo_op = {
1470 .start = c_start,
1471 .next = c_next,
1472 .stop = c_stop,
1473 .show = show_cpuinfo,
1474};
1475
1476
1477static int
1478alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1479{
1480#if 1
1481 /* FIXME FIXME FIXME */
1482 /* If we are using SRM and serial console, just hard halt here. */
1483 if (alpha_using_srm && srmcons_output)
1484 __halt();
1485#endif
1486 return NOTIFY_DONE;
1487}
Michael Neulinge5c6c8e2006-03-14 00:11:50 -05001488
1489static __init int add_pcspkr(void)
1490{
1491 struct platform_device *pd;
1492 int ret;
1493
1494 pd = platform_device_alloc("pcspkr", -1);
1495 if (!pd)
1496 return -ENOMEM;
1497
1498 ret = platform_device_add(pd);
1499 if (ret)
1500 platform_device_put(pd);
1501
1502 return ret;
1503}
1504device_initcall(add_pcspkr);