| Paul Mundt | 05a1178 | 2007-06-07 11:29:37 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/kernel/topology.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2007  Paul Mundt | 
|  | 5 | * | 
|  | 6 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 7 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 8 | * for more details. | 
|  | 9 | */ | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 10 | #include <linux/cpu.h> | 
|  | 11 | #include <linux/cpumask.h> | 
|  | 12 | #include <linux/init.h> | 
|  | 13 | #include <linux/percpu.h> | 
| Paul Mundt | d22d9b3 | 2007-06-01 14:21:13 +0900 | [diff] [blame] | 14 | #include <linux/node.h> | 
|  | 15 | #include <linux/nodemask.h> | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 16 |  | 
|  | 17 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 
|  | 18 |  | 
| Paul Mundt | 896f0c0 | 2009-10-16 18:00:02 +0900 | [diff] [blame] | 19 | cpumask_t cpu_core_map[NR_CPUS]; | 
| Aurelien Jarno | 24ee7d7 | 2011-01-18 20:55:34 +0000 | [diff] [blame] | 20 | EXPORT_SYMBOL(cpu_core_map); | 
| Paul Mundt | 896f0c0 | 2009-10-16 18:00:02 +0900 | [diff] [blame] | 21 |  | 
|  | 22 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | 
|  | 23 | { | 
|  | 24 | /* | 
|  | 25 | * Presently all SH-X3 SMP cores are multi-cores, so just keep it | 
|  | 26 | * simple until we have a method for determining topology.. | 
|  | 27 | */ | 
|  | 28 | return cpu_possible_map; | 
|  | 29 | } | 
|  | 30 |  | 
|  | 31 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 
|  | 32 | { | 
|  | 33 | return &cpu_core_map[cpu]; | 
|  | 34 | } | 
|  | 35 |  | 
|  | 36 | int arch_update_cpu_topology(void) | 
|  | 37 | { | 
|  | 38 | unsigned int cpu; | 
|  | 39 |  | 
|  | 40 | for_each_possible_cpu(cpu) | 
|  | 41 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 
|  | 42 |  | 
|  | 43 | return 0; | 
|  | 44 | } | 
|  | 45 |  | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 46 | static int __init topology_init(void) | 
|  | 47 | { | 
|  | 48 | int i, ret; | 
|  | 49 |  | 
| Paul Mundt | d22d9b3 | 2007-06-01 14:21:13 +0900 | [diff] [blame] | 50 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 51 | for_each_online_node(i) | 
|  | 52 | register_one_node(i); | 
|  | 53 | #endif | 
|  | 54 |  | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 55 | for_each_present_cpu(i) { | 
| Paul Mundt | 9e8c5be | 2010-04-26 18:20:29 +0900 | [diff] [blame] | 56 | struct cpu *c = &per_cpu(cpu_devices, i); | 
|  | 57 |  | 
|  | 58 | c->hotpluggable = 1; | 
|  | 59 |  | 
|  | 60 | ret = register_cpu(c, i); | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 61 | if (unlikely(ret)) | 
|  | 62 | printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", | 
| Harvey Harrison | 866e6b9 | 2008-03-04 15:23:47 -0800 | [diff] [blame] | 63 | __func__, i, ret); | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
| Paul Mundt | 05a1178 | 2007-06-07 11:29:37 +0900 | [diff] [blame] | 66 | #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) | 
|  | 67 | /* | 
|  | 68 | * In the UP case, make sure the CPU association is still | 
|  | 69 | * registered under each node. Without this, sysfs fails | 
|  | 70 | * to make the connection between nodes other than node0 | 
|  | 71 | * and cpu0. | 
|  | 72 | */ | 
|  | 73 | for_each_online_node(i) | 
|  | 74 | if (i != numa_node_id()) | 
|  | 75 | register_cpu_under_node(raw_smp_processor_id(), i); | 
|  | 76 | #endif | 
|  | 77 |  | 
| Paul Mundt | 7a302a9 | 2007-05-14 12:50:43 +0900 | [diff] [blame] | 78 | return 0; | 
|  | 79 | } | 
|  | 80 | subsys_initcall(topology_init); |