blob: 50d8b09d5e3f1ad63691d8aa7a08a718ae007d11 [file] [log] [blame]
Michal Simek12e84142009-03-27 14:25:12 +01001/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <stdarg.h>
17#include <linux/kernel.h>
18#include <linux/string.h>
19#include <linux/init.h>
20#include <linux/threads.h>
21#include <linux/spinlock.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/stringify.h>
25#include <linux/delay.h>
26#include <linux/initrd.h>
27#include <linux/bitops.h>
28#include <linux/module.h>
29#include <linux/kexec.h>
30#include <linux/debugfs.h>
31#include <linux/irq.h>
32#include <linux/lmb.h>
33
34#include <asm/prom.h>
35#include <asm/page.h>
36#include <asm/processor.h>
37#include <asm/irq.h>
38#include <linux/io.h>
39#include <asm/system.h>
40#include <asm/mmu.h>
41#include <asm/pgtable.h>
Michal Simek12e84142009-03-27 14:25:12 +010042#include <asm/sections.h>
43#include <asm/pci-bridge.h>
44
Michal Simek12e84142009-03-27 14:25:12 +010045/* export that to outside world */
46struct device_node *of_chosen;
47
Michal Simek12e84142009-03-27 14:25:12 +010048#define early_init_dt_scan_drconf_memory(node) 0
49
50static int __init early_init_dt_scan_cpus(unsigned long node,
51 const char *uname, int depth,
52 void *data)
53{
54 static int logical_cpuid;
55 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
56 const u32 *intserv;
57 int i, nthreads;
58 int found = 0;
59
60 /* We are scanning "cpu" nodes only */
61 if (type == NULL || strcmp(type, "cpu") != 0)
62 return 0;
63
64 /* Get physical cpuid */
65 intserv = of_get_flat_dt_prop(node, "reg", NULL);
66 nthreads = 1;
67
68 /*
69 * Now see if any of these threads match our boot cpu.
70 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
71 */
72 for (i = 0; i < nthreads; i++) {
73 /*
74 * version 2 of the kexec param format adds the phys cpuid of
75 * booted proc.
76 */
77 if (initial_boot_params && initial_boot_params->version >= 2) {
78 if (intserv[i] ==
79 initial_boot_params->boot_cpuid_phys) {
80 found = 1;
81 break;
82 }
83 } else {
84 /*
85 * Check if it's the boot-cpu, set it's hw index now,
86 * unfortunately this format did not support booting
87 * off secondary threads.
88 */
89 if (of_get_flat_dt_prop(node,
90 "linux,boot-cpu", NULL) != NULL) {
91 found = 1;
92 break;
93 }
94 }
95
96#ifdef CONFIG_SMP
97 /* logical cpu id is always 0 on UP kernels */
98 logical_cpuid++;
99#endif
100 }
101
102 if (found) {
103 pr_debug("boot cpu: logical %d physical %d\n", logical_cpuid,
104 intserv[i]);
105 boot_cpuid = logical_cpuid;
106 }
107
108 return 0;
109}
110
Michal Simek12e84142009-03-27 14:25:12 +0100111static int __init early_init_dt_scan_chosen(unsigned long node,
112 const char *uname, int depth, void *data)
113{
114 unsigned long l;
115 char *p;
116
117 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
118
119 if (depth != 1 ||
120 (strcmp(uname, "chosen") != 0 &&
121 strcmp(uname, "chosen@0") != 0))
122 return 0;
123
124#ifdef CONFIG_KEXEC
125 lprop = (u64 *)of_get_flat_dt_prop(node,
126 "linux,crashkernel-base", NULL);
127 if (lprop)
128 crashk_res.start = *lprop;
129
130 lprop = (u64 *)of_get_flat_dt_prop(node,
131 "linux,crashkernel-size", NULL);
132 if (lprop)
133 crashk_res.end = crashk_res.start + *lprop - 1;
134#endif
135
136 early_init_dt_check_for_initrd(node);
137
138 /* Retreive command line */
139 p = of_get_flat_dt_prop(node, "bootargs", &l);
140 if (p != NULL && l > 0)
141 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
142
143#ifdef CONFIG_CMDLINE
Michal Simek1dff89a2009-05-21 08:20:30 +0200144#ifndef CONFIG_CMDLINE_FORCE
Michal Simek12e84142009-03-27 14:25:12 +0100145 if (p == NULL || l == 0 || (l == 1 && (*p) == 0))
Michal Simek1dff89a2009-05-21 08:20:30 +0200146#endif
Michal Simek12e84142009-03-27 14:25:12 +0100147 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
148#endif /* CONFIG_CMDLINE */
149
150 pr_debug("Command line is: %s\n", cmd_line);
151
152 /* break now */
153 return 1;
154}
155
Michal Simek12e84142009-03-27 14:25:12 +0100156static int __init early_init_dt_scan_memory(unsigned long node,
157 const char *uname, int depth, void *data)
158{
159 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
Grant Likely0f0b56c2009-12-10 23:42:17 -0700160 __be32 *reg, *endp;
Michal Simek12e84142009-03-27 14:25:12 +0100161 unsigned long l;
162
163 /* Look for the ibm,dynamic-reconfiguration-memory node */
164/* if (depth == 1 &&
165 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
166 return early_init_dt_scan_drconf_memory(node);
167*/
168 /* We are scanning "memory" nodes only */
169 if (type == NULL) {
170 /*
171 * The longtrail doesn't have a device_type on the
172 * /memory node, so look for the node called /memory@0.
173 */
174 if (depth != 1 || strcmp(uname, "memory@0") != 0)
175 return 0;
176 } else if (strcmp(type, "memory") != 0)
177 return 0;
178
Grant Likely0f0b56c2009-12-10 23:42:17 -0700179 reg = (__be32 *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
Michal Simek12e84142009-03-27 14:25:12 +0100180 if (reg == NULL)
Grant Likely0f0b56c2009-12-10 23:42:17 -0700181 reg = (__be32 *)of_get_flat_dt_prop(node, "reg", &l);
Michal Simek12e84142009-03-27 14:25:12 +0100182 if (reg == NULL)
183 return 0;
184
Grant Likely0f0b56c2009-12-10 23:42:17 -0700185 endp = reg + (l / sizeof(__be32));
Michal Simek12e84142009-03-27 14:25:12 +0100186
187 pr_debug("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
188 uname, l, reg[0], reg[1], reg[2], reg[3]);
189
190 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
191 u64 base, size;
192
193 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
194 size = dt_mem_next_cell(dt_root_size_cells, &reg);
195
196 if (size == 0)
197 continue;
198 pr_debug(" - %llx , %llx\n", (unsigned long long)base,
199 (unsigned long long)size);
200
201 lmb_add(base, size);
202 }
203 return 0;
204}
205
206#ifdef CONFIG_PHYP_DUMP
207/**
208 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
209 *
210 * Function to find the largest size we need to reserve
211 * during early boot process.
212 *
213 * It either looks for boot param and returns that OR
214 * returns larger of 256 or 5% rounded down to multiples of 256MB.
215 *
216 */
217static inline unsigned long phyp_dump_calculate_reserve_size(void)
218{
219 unsigned long tmp;
220
221 if (phyp_dump_info->reserve_bootvar)
222 return phyp_dump_info->reserve_bootvar;
223
224 /* divide by 20 to get 5% of value */
225 tmp = lmb_end_of_DRAM();
226 do_div(tmp, 20);
227
228 /* round it down in multiples of 256 */
229 tmp = tmp & ~0x0FFFFFFFUL;
230
231 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
232}
233
234/**
235 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
236 *
237 * This routine may reserve memory regions in the kernel only
238 * if the system is supported and a dump was taken in last
239 * boot instance or if the hardware is supported and the
240 * scratch area needs to be setup. In other instances it returns
241 * without reserving anything. The memory in case of dump being
242 * active is freed when the dump is collected (by userland tools).
243 */
244static void __init phyp_dump_reserve_mem(void)
245{
246 unsigned long base, size;
247 unsigned long variable_reserve_size;
248
249 if (!phyp_dump_info->phyp_dump_configured) {
250 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
251 return;
252 }
253
254 if (!phyp_dump_info->phyp_dump_at_boot) {
255 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
256 return;
257 }
258
259 variable_reserve_size = phyp_dump_calculate_reserve_size();
260
261 if (phyp_dump_info->phyp_dump_is_active) {
262 /* Reserve *everything* above RMR.Area freed by userland tools*/
263 base = variable_reserve_size;
264 size = lmb_end_of_DRAM() - base;
265
266 /* XXX crashed_ram_end is wrong, since it may be beyond
267 * the memory_limit, it will need to be adjusted. */
268 lmb_reserve(base, size);
269
270 phyp_dump_info->init_reserve_start = base;
271 phyp_dump_info->init_reserve_size = size;
272 } else {
273 size = phyp_dump_info->cpu_state_size +
274 phyp_dump_info->hpte_region_size +
275 variable_reserve_size;
276 base = lmb_end_of_DRAM() - size;
277 lmb_reserve(base, size);
278 phyp_dump_info->init_reserve_start = base;
279 phyp_dump_info->init_reserve_size = size;
280 }
281}
282#else
283static inline void __init phyp_dump_reserve_mem(void) {}
284#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
285
286#ifdef CONFIG_EARLY_PRINTK
287/* MS this is Microblaze specifig function */
288static int __init early_init_dt_scan_serial(unsigned long node,
289 const char *uname, int depth, void *data)
290{
291 unsigned long l;
292 char *p;
293 int *addr;
294
295 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
296
297/* find all serial nodes */
298 if (strncmp(uname, "serial", 6) != 0)
299 return 0;
300
301 early_init_dt_check_for_initrd(node);
302
303/* find compatible node with uartlite */
304 p = of_get_flat_dt_prop(node, "compatible", &l);
305 if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
306 (strncmp(p, "xlnx,opb-uartlite", 17) != 0))
307 return 0;
308
309 addr = of_get_flat_dt_prop(node, "reg", &l);
310 return *addr; /* return address */
311}
312
313/* this function is looking for early uartlite console - Microblaze specific */
314int __init early_uartlite_console(void)
315{
316 return of_scan_flat_dt(early_init_dt_scan_serial, NULL);
317}
318#endif
319
320void __init early_init_devtree(void *params)
321{
322 pr_debug(" -> early_init_devtree(%p)\n", params);
323
324 /* Setup flat device-tree pointer */
325 initial_boot_params = params;
326
327#ifdef CONFIG_PHYP_DUMP
328 /* scan tree to see if dump occured during last boot */
329 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL);
330#endif
331
332 /* Retrieve various informations from the /chosen node of the
333 * device-tree, including the platform type, initrd location and
334 * size, TCE reserve, and more ...
335 */
336 of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
337
338 /* Scan memory nodes and rebuild LMBs */
339 lmb_init();
340 of_scan_flat_dt(early_init_dt_scan_root, NULL);
341 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
342
343 /* Save command line for /proc/cmdline and then parse parameters */
344 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
345 parse_early_param();
346
347 lmb_analyze();
348
349 pr_debug("Phys. mem: %lx\n", (unsigned long) lmb_phys_mem_size());
350
351 pr_debug("Scanning CPUs ...\n");
352
353 /* Retreive CPU related informations from the flat tree
354 * (altivec support, boot CPU ID, ...)
355 */
356 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
357
358 pr_debug(" <- early_init_devtree()\n");
359}
360
361/**
362 * Indicates whether the root node has a given value in its
363 * compatible property.
364 */
365int machine_is_compatible(const char *compat)
366{
367 struct device_node *root;
368 int rc = 0;
369
370 root = of_find_node_by_path("/");
371 if (root) {
372 rc = of_device_is_compatible(root, compat);
373 of_node_put(root);
374 }
375 return rc;
376}
377EXPORT_SYMBOL(machine_is_compatible);
378
379/*******
380 *
381 * New implementation of the OF "find" APIs, return a refcounted
382 * object, call of_node_put() when done. The device tree and list
383 * are protected by a rw_lock.
384 *
385 * Note that property management will need some locking as well,
386 * this isn't dealt with yet.
387 *
388 *******/
389
390/**
391 * of_find_node_by_phandle - Find a node given a phandle
392 * @handle: phandle of the node to find
393 *
394 * Returns a node pointer with refcount incremented, use
395 * of_node_put() on it when done.
396 */
397struct device_node *of_find_node_by_phandle(phandle handle)
398{
399 struct device_node *np;
400
401 read_lock(&devtree_lock);
402 for (np = allnodes; np != NULL; np = np->allnext)
403 if (np->linux_phandle == handle)
404 break;
405 of_node_get(np);
406 read_unlock(&devtree_lock);
407 return np;
408}
409EXPORT_SYMBOL(of_find_node_by_phandle);
410
411/**
Michal Simek12e84142009-03-27 14:25:12 +0100412 * of_node_get - Increment refcount of a node
413 * @node: Node to inc refcount, NULL is supported to
414 * simplify writing of callers
415 *
416 * Returns node.
417 */
418struct device_node *of_node_get(struct device_node *node)
419{
420 if (node)
421 kref_get(&node->kref);
422 return node;
423}
424EXPORT_SYMBOL(of_node_get);
425
426static inline struct device_node *kref_to_device_node(struct kref *kref)
427{
428 return container_of(kref, struct device_node, kref);
429}
430
431/**
432 * of_node_release - release a dynamically allocated node
433 * @kref: kref element of the node to be released
434 *
435 * In of_node_put() this function is passed to kref_put()
436 * as the destructor.
437 */
438static void of_node_release(struct kref *kref)
439{
440 struct device_node *node = kref_to_device_node(kref);
441 struct property *prop = node->properties;
442
443 /* We should never be releasing nodes that haven't been detached. */
444 if (!of_node_check_flag(node, OF_DETACHED)) {
445 printk(KERN_INFO "WARNING: Bad of_node_put() on %s\n",
446 node->full_name);
447 dump_stack();
448 kref_init(&node->kref);
449 return;
450 }
451
452 if (!of_node_check_flag(node, OF_DYNAMIC))
453 return;
454
455 while (prop) {
456 struct property *next = prop->next;
457 kfree(prop->name);
458 kfree(prop->value);
459 kfree(prop);
460 prop = next;
461
462 if (!prop) {
463 prop = node->deadprops;
464 node->deadprops = NULL;
465 }
466 }
467 kfree(node->full_name);
468 kfree(node->data);
469 kfree(node);
470}
471
472/**
473 * of_node_put - Decrement refcount of a node
474 * @node: Node to dec refcount, NULL is supported to
475 * simplify writing of callers
476 *
477 */
478void of_node_put(struct device_node *node)
479{
480 if (node)
481 kref_put(&node->kref, of_node_release);
482}
483EXPORT_SYMBOL(of_node_put);
484
485/*
486 * Plug a device node into the tree and global list.
487 */
488void of_attach_node(struct device_node *np)
489{
490 unsigned long flags;
491
492 write_lock_irqsave(&devtree_lock, flags);
493 np->sibling = np->parent->child;
494 np->allnext = allnodes;
495 np->parent->child = np;
496 allnodes = np;
497 write_unlock_irqrestore(&devtree_lock, flags);
498}
499
500/*
501 * "Unplug" a node from the device tree. The caller must hold
502 * a reference to the node. The memory associated with the node
503 * is not freed until its refcount goes to zero.
504 */
505void of_detach_node(struct device_node *np)
506{
507 struct device_node *parent;
508 unsigned long flags;
509
510 write_lock_irqsave(&devtree_lock, flags);
511
512 parent = np->parent;
513 if (!parent)
514 goto out_unlock;
515
516 if (allnodes == np)
517 allnodes = np->allnext;
518 else {
519 struct device_node *prev;
520 for (prev = allnodes;
521 prev->allnext != np;
522 prev = prev->allnext)
523 ;
524 prev->allnext = np->allnext;
525 }
526
527 if (parent->child == np)
528 parent->child = np->sibling;
529 else {
530 struct device_node *prevsib;
531 for (prevsib = np->parent->child;
532 prevsib->sibling != np;
533 prevsib = prevsib->sibling)
534 ;
535 prevsib->sibling = np->sibling;
536 }
537
538 of_node_set_flag(np, OF_DETACHED);
539
540out_unlock:
541 write_unlock_irqrestore(&devtree_lock, flags);
542}
543
Michal Simek12e84142009-03-27 14:25:12 +0100544#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
545static struct debugfs_blob_wrapper flat_dt_blob;
546
547static int __init export_flat_device_tree(void)
548{
549 struct dentry *d;
550
551 flat_dt_blob.data = initial_boot_params;
552 flat_dt_blob.size = initial_boot_params->totalsize;
553
554 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
555 of_debugfs_root, &flat_dt_blob);
556 if (!d)
557 return 1;
558
559 return 0;
560}
561device_initcall(export_flat_device_tree);
562#endif