blob: 53a1ff0cb05c37e055e0fa75593a8be5100bdc72 [file] [log] [blame]
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07001/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/clk.h>
10#include <linux/init.h>
Haavard Skinnemoen5539f592007-03-21 15:39:18 +010011#include <linux/initrd.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070012#include <linux/sched.h>
13#include <linux/console.h>
14#include <linux/ioport.h>
15#include <linux/bootmem.h>
16#include <linux/fs.h>
17#include <linux/module.h>
Haavard Skinnemoen5539f592007-03-21 15:39:18 +010018#include <linux/pfn.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070019#include <linux/root_dev.h>
20#include <linux/cpu.h>
Ahmed S. Darwish10b50b72007-02-05 04:41:27 +020021#include <linux/kernel.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070022
23#include <asm/sections.h>
24#include <asm/processor.h>
25#include <asm/pgtable.h>
26#include <asm/setup.h>
27#include <asm/sysreg.h>
28
29#include <asm/arch/board.h>
30#include <asm/arch/init.h>
31
32extern int root_mountflags;
33
34/*
35 * Bootloader-provided information about physical memory
36 */
37struct tag_mem_range *mem_phys;
38struct tag_mem_range *mem_reserved;
39struct tag_mem_range *mem_ramdisk;
40
41/*
42 * Initialize loops_per_jiffy as 5000000 (500MIPS).
43 * Better make it too large than too small...
44 */
45struct avr32_cpuinfo boot_cpu_data = {
46 .loops_per_jiffy = 5000000
47};
48EXPORT_SYMBOL(boot_cpu_data);
49
Alon Bar-Levbf4352c2007-02-12 00:54:08 -080050static char __initdata command_line[COMMAND_LINE_SIZE];
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070051
52/*
53 * Should be more than enough, but if you have a _really_ complex
54 * setup, you might need to increase the size of this...
55 */
56static struct tag_mem_range __initdata mem_range_cache[32];
57static unsigned mem_range_next_free;
58
59/*
60 * Standard memory resources
61 */
62static struct resource mem_res[] = {
63 {
64 .name = "Kernel code",
65 .start = 0,
66 .end = 0,
67 .flags = IORESOURCE_MEM
68 },
69 {
70 .name = "Kernel data",
71 .start = 0,
72 .end = 0,
73 .flags = IORESOURCE_MEM,
74 },
75};
76
77#define kernel_code mem_res[0]
78#define kernel_data mem_res[1]
79
80/*
81 * Early framebuffer allocation. Works as follows:
82 * - If fbmem_size is zero, nothing will be allocated or reserved.
83 * - If fbmem_start is zero when setup_bootmem() is called,
84 * fbmem_size bytes will be allocated from the bootmem allocator.
85 * - If fbmem_start is nonzero, an area of size fbmem_size will be
86 * reserved at the physical address fbmem_start if necessary. If
87 * the area isn't in a memory region known to the kernel, it will
88 * be left alone.
89 *
90 * Board-specific code may use these variables to set up platform data
91 * for the framebuffer driver if fbmem_size is nonzero.
92 */
93static unsigned long __initdata fbmem_start;
94static unsigned long __initdata fbmem_size;
95
96/*
97 * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
98 * use as framebuffer.
99 *
100 * "fbmem=xxx[kKmM]@yyy[kKmM]" defines a memory region of size xxx and
101 * starting at yyy to be reserved for use as framebuffer.
102 *
103 * The kernel won't verify that the memory region starting at yyy
104 * actually contains usable RAM.
105 */
106static int __init early_parse_fbmem(char *p)
107{
108 fbmem_size = memparse(p, &p);
109 if (*p == '@')
110 fbmem_start = memparse(p, &p);
111 return 0;
112}
113early_param("fbmem", early_parse_fbmem);
114
115static inline void __init resource_init(void)
116{
117 struct tag_mem_range *region;
118
119 kernel_code.start = __pa(init_mm.start_code);
120 kernel_code.end = __pa(init_mm.end_code - 1);
121 kernel_data.start = __pa(init_mm.end_code);
122 kernel_data.end = __pa(init_mm.brk - 1);
123
124 for (region = mem_phys; region; region = region->next) {
125 struct resource *res;
126 unsigned long phys_start, phys_end;
127
128 if (region->size == 0)
129 continue;
130
131 phys_start = region->addr;
132 phys_end = phys_start + region->size - 1;
133
134 res = alloc_bootmem_low(sizeof(*res));
135 res->name = "System RAM";
136 res->start = phys_start;
137 res->end = phys_end;
138 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
139
140 request_resource (&iomem_resource, res);
141
142 if (kernel_code.start >= res->start &&
143 kernel_code.end <= res->end)
144 request_resource (res, &kernel_code);
145 if (kernel_data.start >= res->start &&
146 kernel_data.end <= res->end)
147 request_resource (res, &kernel_data);
148 }
149}
150
151static int __init parse_tag_core(struct tag *tag)
152{
153 if (tag->hdr.size > 2) {
154 if ((tag->u.core.flags & 1) == 0)
155 root_mountflags &= ~MS_RDONLY;
156 ROOT_DEV = new_decode_dev(tag->u.core.rootdev);
157 }
158 return 0;
159}
160__tagtable(ATAG_CORE, parse_tag_core);
161
162static int __init parse_tag_mem_range(struct tag *tag,
163 struct tag_mem_range **root)
164{
165 struct tag_mem_range *cur, **pprev;
166 struct tag_mem_range *new;
167
168 /*
169 * Ignore zero-sized entries. If we're running standalone, the
170 * SDRAM code may emit such entries if something goes
171 * wrong...
172 */
173 if (tag->u.mem_range.size == 0)
174 return 0;
175
176 /*
177 * Copy the data so the bootmem init code doesn't need to care
178 * about it.
179 */
Ahmed S. Darwish10b50b72007-02-05 04:41:27 +0200180 if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache))
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700181 panic("Physical memory map too complex!\n");
182
183 new = &mem_range_cache[mem_range_next_free++];
184 *new = tag->u.mem_range;
185
186 pprev = root;
187 cur = *root;
188 while (cur) {
189 pprev = &cur->next;
190 cur = cur->next;
191 }
192
193 *pprev = new;
194 new->next = NULL;
195
196 return 0;
197}
198
199static int __init parse_tag_mem(struct tag *tag)
200{
201 return parse_tag_mem_range(tag, &mem_phys);
202}
203__tagtable(ATAG_MEM, parse_tag_mem);
204
205static int __init parse_tag_cmdline(struct tag *tag)
206{
Alon Bar-Levbf4352c2007-02-12 00:54:08 -0800207 strlcpy(boot_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700208 return 0;
209}
210__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
211
212static int __init parse_tag_rdimg(struct tag *tag)
213{
214 return parse_tag_mem_range(tag, &mem_ramdisk);
215}
216__tagtable(ATAG_RDIMG, parse_tag_rdimg);
217
218static int __init parse_tag_clock(struct tag *tag)
219{
220 /*
221 * We'll figure out the clocks by peeking at the system
222 * manager regs directly.
223 */
224 return 0;
225}
226__tagtable(ATAG_CLOCK, parse_tag_clock);
227
228static int __init parse_tag_rsvd_mem(struct tag *tag)
229{
230 return parse_tag_mem_range(tag, &mem_reserved);
231}
232__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
233
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700234/*
235 * Scan the tag table for this tag, and call its parse function. The
236 * tag table is built by the linker from all the __tagtable
237 * declarations.
238 */
239static int __init parse_tag(struct tag *tag)
240{
241 extern struct tagtable __tagtable_begin, __tagtable_end;
242 struct tagtable *t;
243
244 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
245 if (tag->hdr.tag == t->tag) {
246 t->parse(tag);
247 break;
248 }
249
250 return t < &__tagtable_end;
251}
252
253/*
254 * Parse all tags in the list we got from the boot loader
255 */
256static void __init parse_tags(struct tag *t)
257{
258 for (; t->hdr.tag != ATAG_NONE; t = tag_next(t))
259 if (!parse_tag(t))
260 printk(KERN_WARNING
261 "Ignoring unrecognised tag 0x%08x\n",
262 t->hdr.tag);
263}
264
Haavard Skinnemoen5539f592007-03-21 15:39:18 +0100265static void __init print_memory_map(const char *what,
266 struct tag_mem_range *mem)
267{
268 printk ("%s:\n", what);
269 for (; mem; mem = mem->next) {
270 printk (" %08lx - %08lx\n",
271 (unsigned long)mem->addr,
272 (unsigned long)(mem->addr + mem->size));
273 }
274}
275
276#define MAX_LOWMEM HIGHMEM_START
277#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
278
279/*
280 * Sort a list of memory regions in-place by ascending address.
281 *
282 * We're using bubble sort because we only have singly linked lists
283 * with few elements.
284 */
285static void __init sort_mem_list(struct tag_mem_range **pmem)
286{
287 int done;
288 struct tag_mem_range **a, **b;
289
290 if (!*pmem)
291 return;
292
293 do {
294 done = 1;
295 a = pmem, b = &(*pmem)->next;
296 while (*b) {
297 if ((*a)->addr > (*b)->addr) {
298 struct tag_mem_range *tmp;
299 tmp = (*b)->next;
300 (*b)->next = *a;
301 *a = *b;
302 *b = tmp;
303 done = 0;
304 }
305 a = &(*a)->next;
306 b = &(*a)->next;
307 }
308 } while (!done);
309}
310
311/*
312 * Find a free memory region large enough for storing the
313 * bootmem bitmap.
314 */
315static unsigned long __init
316find_bootmap_pfn(const struct tag_mem_range *mem)
317{
318 unsigned long bootmap_pages, bootmap_len;
319 unsigned long node_pages = PFN_UP(mem->size);
320 unsigned long bootmap_addr = mem->addr;
321 struct tag_mem_range *reserved = mem_reserved;
322 struct tag_mem_range *ramdisk = mem_ramdisk;
323 unsigned long kern_start = __pa(_stext);
324 unsigned long kern_end = __pa(_end);
325
326 bootmap_pages = bootmem_bootmap_pages(node_pages);
327 bootmap_len = bootmap_pages << PAGE_SHIFT;
328
329 /*
330 * Find a large enough region without reserved pages for
331 * storing the bootmem bitmap. We can take advantage of the
332 * fact that all lists have been sorted.
333 *
334 * We have to check explicitly reserved regions as well as the
335 * kernel image and any RAMDISK images...
336 *
337 * Oh, and we have to make sure we don't overwrite the taglist
338 * since we're going to use it until the bootmem allocator is
339 * fully up and running.
340 */
341 while (1) {
342 if ((bootmap_addr < kern_end) &&
343 ((bootmap_addr + bootmap_len) > kern_start))
344 bootmap_addr = kern_end;
345
346 while (reserved &&
347 (bootmap_addr >= (reserved->addr + reserved->size)))
348 reserved = reserved->next;
349
350 if (reserved &&
351 ((bootmap_addr + bootmap_len) >= reserved->addr)) {
352 bootmap_addr = reserved->addr + reserved->size;
353 continue;
354 }
355
356 while (ramdisk &&
357 (bootmap_addr >= (ramdisk->addr + ramdisk->size)))
358 ramdisk = ramdisk->next;
359
360 if (!ramdisk ||
361 ((bootmap_addr + bootmap_len) < ramdisk->addr))
362 break;
363
364 bootmap_addr = ramdisk->addr + ramdisk->size;
365 }
366
367 if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size))
368 return ~0UL;
369
370 return PFN_UP(bootmap_addr);
371}
372
373static void __init setup_bootmem(void)
374{
375 unsigned bootmap_size;
376 unsigned long first_pfn, bootmap_pfn, pages;
377 unsigned long max_pfn, max_low_pfn;
378 unsigned long kern_start = __pa(_stext);
379 unsigned long kern_end = __pa(_end);
380 unsigned node = 0;
381 struct tag_mem_range *bank, *res;
382
383 sort_mem_list(&mem_phys);
384 sort_mem_list(&mem_reserved);
385
386 print_memory_map("Physical memory", mem_phys);
387 print_memory_map("Reserved memory", mem_reserved);
388
389 nodes_clear(node_online_map);
390
391 if (mem_ramdisk) {
392#ifdef CONFIG_BLK_DEV_INITRD
393 initrd_start = (unsigned long)__va(mem_ramdisk->addr);
394 initrd_end = initrd_start + mem_ramdisk->size;
395
396 print_memory_map("RAMDISK images", mem_ramdisk);
397 if (mem_ramdisk->next)
398 printk(KERN_WARNING
399 "Warning: Only the first RAMDISK image "
400 "will be used\n");
401 sort_mem_list(&mem_ramdisk);
402#else
403 printk(KERN_WARNING "RAM disk image present, but "
404 "no initrd support in kernel!\n");
405#endif
406 }
407
408 if (mem_phys->next)
409 printk(KERN_WARNING "Only using first memory bank\n");
410
411 for (bank = mem_phys; bank; bank = NULL) {
412 first_pfn = PFN_UP(bank->addr);
413 max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size);
414 bootmap_pfn = find_bootmap_pfn(bank);
415 if (bootmap_pfn > max_pfn)
416 panic("No space for bootmem bitmap!\n");
417
418 if (max_low_pfn > MAX_LOWMEM_PFN) {
419 max_low_pfn = MAX_LOWMEM_PFN;
420#ifndef CONFIG_HIGHMEM
421 /*
422 * Lowmem is memory that can be addressed
423 * directly through P1/P2
424 */
425 printk(KERN_WARNING
426 "Node %u: Only %ld MiB of memory will be used.\n",
427 node, MAX_LOWMEM >> 20);
428 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
429#else
430#error HIGHMEM is not supported by AVR32 yet
431#endif
432 }
433
434 /* Initialize the boot-time allocator with low memory only. */
435 bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
436 first_pfn, max_low_pfn);
437
438 printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n",
439 node, NODE_DATA(node)->bdata,
440 NODE_DATA(node)->bdata->node_bootmem_map);
441
442 /*
443 * Register fully available RAM pages with the bootmem
444 * allocator.
445 */
446 pages = max_low_pfn - first_pfn;
447 free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
448 PFN_PHYS(pages));
449
450 /*
451 * Reserve space for the kernel image (if present in
452 * this node)...
453 */
454 if ((kern_start >= PFN_PHYS(first_pfn)) &&
455 (kern_start < PFN_PHYS(max_pfn))) {
456 printk("Node %u: Kernel image %08lx - %08lx\n",
457 node, kern_start, kern_end);
458 reserve_bootmem_node(NODE_DATA(node), kern_start,
459 kern_end - kern_start);
460 }
461
462 /* ...the bootmem bitmap... */
463 reserve_bootmem_node(NODE_DATA(node),
464 PFN_PHYS(bootmap_pfn),
465 bootmap_size);
466
467 /* ...any RAMDISK images... */
468 for (res = mem_ramdisk; res; res = res->next) {
469 if (res->addr > PFN_PHYS(max_pfn))
470 break;
471
472 if (res->addr >= PFN_PHYS(first_pfn)) {
473 printk("Node %u: RAMDISK %08lx - %08lx\n",
474 node,
475 (unsigned long)res->addr,
476 (unsigned long)(res->addr + res->size));
477 reserve_bootmem_node(NODE_DATA(node),
478 res->addr, res->size);
479 }
480 }
481
482 /* ...and any other reserved regions. */
483 for (res = mem_reserved; res; res = res->next) {
484 if (res->addr > PFN_PHYS(max_pfn))
485 break;
486
487 if (res->addr >= PFN_PHYS(first_pfn)) {
488 printk("Node %u: Reserved %08lx - %08lx\n",
489 node,
490 (unsigned long)res->addr,
491 (unsigned long)(res->addr + res->size));
492 reserve_bootmem_node(NODE_DATA(node),
493 res->addr, res->size);
494 }
495 }
496
497 node_set_online(node);
498 }
499}
500
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700501void __init setup_arch (char **cmdline_p)
502{
503 struct clk *cpu_clk;
504
505 parse_tags(bootloader_tags);
506
507 setup_processor();
508 setup_platform();
Haavard Skinnemoenc1945882006-10-04 16:02:10 +0200509 setup_board();
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700510
511 cpu_clk = clk_get(NULL, "cpu");
512 if (IS_ERR(cpu_clk)) {
513 printk(KERN_WARNING "Warning: Unable to get CPU clock\n");
514 } else {
515 unsigned long cpu_hz = clk_get_rate(cpu_clk);
516
517 /*
518 * Well, duh, but it's probably a good idea to
519 * increment the use count.
520 */
521 clk_enable(cpu_clk);
522
523 boot_cpu_data.clk = cpu_clk;
524 boot_cpu_data.loops_per_jiffy = cpu_hz * 4;
525 printk("CPU: Running at %lu.%03lu MHz\n",
526 ((cpu_hz + 500) / 1000) / 1000,
527 ((cpu_hz + 500) / 1000) % 1000);
528 }
529
530 init_mm.start_code = (unsigned long) &_text;
531 init_mm.end_code = (unsigned long) &_etext;
532 init_mm.end_data = (unsigned long) &_edata;
533 init_mm.brk = (unsigned long) &_end;
534
Alon Bar-Levbf4352c2007-02-12 00:54:08 -0800535 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700536 *cmdline_p = command_line;
537 parse_early_param();
538
539 setup_bootmem();
540
541 board_setup_fbmem(fbmem_start, fbmem_size);
542
543#ifdef CONFIG_VT
544 conswitchp = &dummy_con;
545#endif
546
547 paging_init();
548
549 resource_init();
550}