blob: fc957f2eef7c1071ed06bc83e8599d2a30e69b1e [file] [log] [blame]
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02003 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
Martin Schwidefsky395d31d2008-12-25 13:39:50 +01006#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02009#include <linux/workqueue.h>
Heiko Carstens83a24e32011-12-27 11:27:09 +010010#include <linux/bootmem.h>
11#include <linux/cpuset.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/delay.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020017#include <linux/cpu.h>
18#include <linux/smp.h>
Heiko Carstens83a24e32011-12-27 11:27:09 +010019#include <linux/mm.h>
Heiko Carstens78609132012-09-03 14:11:32 +020020#include <asm/sysinfo.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020021
Heiko Carstensc10fde02008-04-17 07:46:13 +020022#define PTF_HORIZONTAL (0UL)
23#define PTF_VERTICAL (1UL)
24#define PTF_CHECK (2UL)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020025
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020026struct mask_info {
27 struct mask_info *next;
Heiko Carstens10d38582010-05-17 10:00:12 +020028 unsigned char id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020029 cpumask_t mask;
30};
31
Heiko Carstensc9af3fa2010-10-25 16:10:43 +020032static int topology_enabled = 1;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020033static void topology_work_fn(struct work_struct *work);
Heiko Carstensc30f91b2010-10-25 16:10:53 +020034static struct sysinfo_15_1_x *tl_info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020035static void set_topology_timer(void);
36static DECLARE_WORK(topology_work, topology_work_fn);
Heiko Carstens74af2832008-11-14 18:18:07 +010037/* topology_lock protects the core linked list */
38static DEFINE_SPINLOCK(topology_lock);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020039
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020040static struct mask_info core_info;
Heiko Carstensd00aa4e2008-04-30 13:38:40 +020041cpumask_t cpu_core_map[NR_CPUS];
Heiko Carstens10d38582010-05-17 10:00:12 +020042unsigned char cpu_core_id[NR_CPUS];
Heiko Carstensd00aa4e2008-04-30 13:38:40 +020043
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020044static struct mask_info book_info;
45cpumask_t cpu_book_map[NR_CPUS];
46unsigned char cpu_book_id[NR_CPUS];
Heiko Carstens83a24e32011-12-27 11:27:09 +010047
48/* smp_cpu_state_mutex must be held when accessing this array */
49int cpu_polarization[NR_CPUS];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020050
51static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020052{
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020053 cpumask_t mask;
54
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020055 cpumask_clear(&mask);
Heiko Carstens0b527832010-10-29 16:50:38 +020056 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
57 cpumask_copy(&mask, cpumask_of(cpu));
58 return mask;
59 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020060 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020061 if (cpumask_test_cpu(cpu, &info->mask)) {
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020062 mask = info->mask;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020063 break;
64 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020065 info = info->next;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020066 }
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020067 if (cpumask_empty(&mask))
68 cpumask_copy(&mask, cpumask_of(cpu));
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020069 return mask;
70}
71
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010072static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
73 struct mask_info *book,
74 struct mask_info *core,
Heiko Carstens4baeb962011-12-27 11:27:12 +010075 int one_core_per_cpu)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020076{
77 unsigned int cpu;
78
Heiko Carstensc30f91b2010-10-25 16:10:53 +020079 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
80 cpu < TOPOLOGY_CPU_BITS;
81 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020082 {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040083 unsigned int rcpu;
84 int lcpu;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020085
Heiko Carstensc30f91b2010-10-25 16:10:53 +020086 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040087 lcpu = smp_find_processor_id(rcpu);
88 if (lcpu >= 0) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020089 cpumask_set_cpu(lcpu, &book->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020090 cpu_book_id[lcpu] = book->id;
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020091 cpumask_set_cpu(lcpu, &core->mask);
Heiko Carstens4baeb962011-12-27 11:27:12 +010092 if (one_core_per_cpu) {
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010093 cpu_core_id[lcpu] = rcpu;
94 core = core->next;
95 } else {
96 cpu_core_id[lcpu] = core->id;
97 }
Heiko Carstens83a24e32011-12-27 11:27:09 +010098 cpu_set_polarization(lcpu, tl_cpu->pp);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020099 }
100 }
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100101 return core;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200102}
103
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200104static void clear_masks(void)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200105{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200106 struct mask_info *info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200107
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200108 info = &core_info;
109 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +0200110 cpumask_clear(&info->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200111 info = info->next;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200112 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200113 info = &book_info;
114 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +0200115 cpumask_clear(&info->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200116 info = info->next;
117 }
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200118}
119
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200120static union topology_entry *next_tle(union topology_entry *tle)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200121{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200122 if (!tle->nl)
123 return (union topology_entry *)((struct topology_cpu *)tle + 1);
124 return (union topology_entry *)((struct topology_container *)tle + 1);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200125}
126
Heiko Carstens4baeb962011-12-27 11:27:12 +0100127static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200128{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200129 struct mask_info *core = &core_info;
Heiko Carstens83a24e32011-12-27 11:27:09 +0100130 struct mask_info *book = &book_info;
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200131 union topology_entry *tle, *end;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200132
Heiko Carstensc10fde02008-04-17 07:46:13 +0200133 tle = info->tle;
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200134 end = (union topology_entry *)((unsigned long)info + info->length);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200135 while (tle < end) {
136 switch (tle->nl) {
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200137 case 2:
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200138 book = book->next;
139 book->id = tle->container.id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200140 break;
141 case 1:
142 core = core->next;
Heiko Carstens10d38582010-05-17 10:00:12 +0200143 core->id = tle->container.id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200144 break;
145 case 0:
Heiko Carstens4baeb962011-12-27 11:27:12 +0100146 add_cpus_to_mask(&tle->cpu, book, core, 0);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200147 break;
148 default:
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200149 clear_masks();
Heiko Carstens4baeb962011-12-27 11:27:12 +0100150 return;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200151 }
152 tle = next_tle(tle);
153 }
Heiko Carstens4baeb962011-12-27 11:27:12 +0100154}
155
156static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
157{
158 struct mask_info *core = &core_info;
159 struct mask_info *book = &book_info;
160 union topology_entry *tle, *end;
161
162 tle = info->tle;
163 end = (union topology_entry *)((unsigned long)info + info->length);
164 while (tle < end) {
165 switch (tle->nl) {
166 case 1:
167 book = book->next;
168 book->id = tle->container.id;
169 break;
170 case 0:
171 core = add_cpus_to_mask(&tle->cpu, book, core, 1);
172 break;
173 default:
174 clear_masks();
175 return;
176 }
177 tle = next_tle(tle);
178 }
179}
180
181static void tl_to_cores(struct sysinfo_15_1_x *info)
182{
183 struct cpuid cpu_id;
184
185 get_cpu_id(&cpu_id);
186 spin_lock_irq(&topology_lock);
187 clear_masks();
188 switch (cpu_id.machine) {
189 case 0x2097:
190 case 0x2098:
191 __tl_to_cores_z10(info);
192 break;
193 default:
194 __tl_to_cores_generic(info);
195 }
Heiko Carstens74af2832008-11-14 18:18:07 +0100196 spin_unlock_irq(&topology_lock);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200197}
198
Heiko Carstensc10fde02008-04-17 07:46:13 +0200199static void topology_update_polarization_simple(void)
200{
201 int cpu;
202
203 mutex_lock(&smp_cpu_state_mutex);
Heiko Carstens54390502008-12-25 13:37:57 +0100204 for_each_possible_cpu(cpu)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100205 cpu_set_polarization(cpu, POLARIZATION_HRZ);
Heiko Carstensc10fde02008-04-17 07:46:13 +0200206 mutex_unlock(&smp_cpu_state_mutex);
207}
208
209static int ptf(unsigned long fc)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200210{
211 int rc;
212
213 asm volatile(
214 " .insn rre,0xb9a20000,%1,%1\n"
215 " ipm %0\n"
216 " srl %0,28\n"
217 : "=d" (rc)
Heiko Carstensc10fde02008-04-17 07:46:13 +0200218 : "d" (fc) : "cc");
219 return rc;
220}
221
222int topology_set_cpu_management(int fc)
223{
Heiko Carstens83a24e32011-12-27 11:27:09 +0100224 int cpu, rc;
Heiko Carstensc10fde02008-04-17 07:46:13 +0200225
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200226 if (!MACHINE_HAS_TOPOLOGY)
Heiko Carstensc10fde02008-04-17 07:46:13 +0200227 return -EOPNOTSUPP;
228 if (fc)
229 rc = ptf(PTF_VERTICAL);
230 else
231 rc = ptf(PTF_HORIZONTAL);
232 if (rc)
233 return -EBUSY;
Heiko Carstens54390502008-12-25 13:37:57 +0100234 for_each_possible_cpu(cpu)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100235 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200236 return rc;
237}
238
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200239static void update_cpu_core_map(void)
240{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200241 unsigned long flags;
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200242 int cpu;
243
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200244 spin_lock_irqsave(&topology_lock, flags);
245 for_each_possible_cpu(cpu) {
246 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200247 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200248 }
249 spin_unlock_irqrestore(&topology_lock, flags);
250}
251
Heiko Carstens96f4a702010-10-25 16:10:54 +0200252void store_topology(struct sysinfo_15_1_x *info)
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200253{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200254 int rc;
255
256 rc = stsi(info, 15, 1, 3);
257 if (rc != -ENOSYS)
258 return;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200259 stsi(info, 15, 1, 2);
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200260}
261
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100262int arch_update_cpu_topology(void)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200263{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200264 struct sysinfo_15_1_x *info = tl_info;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800265 struct device *dev;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200266 int cpu;
267
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200268 if (!MACHINE_HAS_TOPOLOGY) {
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200269 update_cpu_core_map();
Heiko Carstensc10fde02008-04-17 07:46:13 +0200270 topology_update_polarization_simple();
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100271 return 0;
Heiko Carstensc10fde02008-04-17 07:46:13 +0200272 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200273 store_topology(info);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200274 tl_to_cores(info);
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200275 update_cpu_core_map();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200276 for_each_online_cpu(cpu) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800277 dev = get_cpu_device(cpu);
278 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200279 }
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100280 return 1;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200281}
282
Heiko Carstensfd781fa2008-04-30 13:38:41 +0200283static void topology_work_fn(struct work_struct *work)
284{
Heiko Carstensf414f5f2008-12-25 13:37:59 +0100285 rebuild_sched_domains();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200286}
287
Heiko Carstensc10fde02008-04-17 07:46:13 +0200288void topology_schedule_update(void)
289{
290 schedule_work(&topology_work);
291}
292
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200293static void topology_timer_fn(unsigned long ignored)
294{
Heiko Carstensc10fde02008-04-17 07:46:13 +0200295 if (ptf(PTF_CHECK))
296 topology_schedule_update();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200297 set_topology_timer();
298}
299
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100300static struct timer_list topology_timer =
301 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
302
303static atomic_t topology_poll = ATOMIC_INIT(0);
304
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200305static void set_topology_timer(void)
306{
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100307 if (atomic_add_unless(&topology_poll, -1, 0))
308 mod_timer(&topology_timer, jiffies + HZ / 10);
309 else
310 mod_timer(&topology_timer, jiffies + HZ * 60);
311}
312
313void topology_expect_change(void)
314{
315 if (!MACHINE_HAS_TOPOLOGY)
316 return;
317 /* This is racy, but it doesn't matter since it is just a heuristic.
318 * Worst case is that we poll in a higher frequency for a bit longer.
319 */
320 if (atomic_read(&topology_poll) > 60)
321 return;
322 atomic_add(60, &topology_poll);
323 set_topology_timer();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200324}
325
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100326static int __init early_parse_topology(char *p)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200327{
Heiko Carstensc9af3fa2010-10-25 16:10:43 +0200328 if (strncmp(p, "off", 3))
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100329 return 0;
Heiko Carstensc9af3fa2010-10-25 16:10:43 +0200330 topology_enabled = 0;
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100331 return 0;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200332}
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100333early_param("topology", early_parse_topology);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200334
Sebastian Ottcaa04f62011-10-30 15:16:06 +0100335static void __init alloc_masks(struct sysinfo_15_1_x *info,
336 struct mask_info *mask, int offset)
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200337{
338 int i, nr_masks;
339
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200340 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200341 for (i = 0; i < info->mnest - offset; i++)
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200342 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200343 nr_masks = max(nr_masks, 1);
344 for (i = 0; i < nr_masks; i++) {
345 mask->next = alloc_bootmem(sizeof(struct mask_info));
346 mask = mask->next;
347 }
348}
349
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200350void __init s390_init_cpu_topology(void)
351{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200352 struct sysinfo_15_1_x *info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200353 int i;
354
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200355 if (!MACHINE_HAS_TOPOLOGY)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200356 return;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200357 tl_info = alloc_bootmem_pages(PAGE_SIZE);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200358 info = tl_info;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200359 store_topology(info);
Martin Schwidefsky395d31d2008-12-25 13:39:50 +0100360 pr_info("The CPU configuration topology of the machine is:");
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200361 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100362 printk(KERN_CONT " %d", info->mag[i]);
363 printk(KERN_CONT " / %d\n", info->mnest);
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100364 alloc_masks(info, &core_info, 1);
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100365 alloc_masks(info, &book_info, 2);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200366}
Heiko Carstens83a24e32011-12-27 11:27:09 +0100367
368static int cpu_management;
369
Linus Torvalds72f31882012-01-09 08:11:13 -0800370static ssize_t dispatching_show(struct device *dev,
371 struct device_attribute *attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100372 char *buf)
373{
374 ssize_t count;
375
376 mutex_lock(&smp_cpu_state_mutex);
377 count = sprintf(buf, "%d\n", cpu_management);
378 mutex_unlock(&smp_cpu_state_mutex);
379 return count;
380}
381
Linus Torvalds72f31882012-01-09 08:11:13 -0800382static ssize_t dispatching_store(struct device *dev,
383 struct device_attribute *attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100384 const char *buf,
385 size_t count)
386{
387 int val, rc;
388 char delim;
389
390 if (sscanf(buf, "%d %c", &val, &delim) != 1)
391 return -EINVAL;
392 if (val != 0 && val != 1)
393 return -EINVAL;
394 rc = 0;
395 get_online_cpus();
396 mutex_lock(&smp_cpu_state_mutex);
397 if (cpu_management == val)
398 goto out;
399 rc = topology_set_cpu_management(val);
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100400 if (rc)
401 goto out;
402 cpu_management = val;
403 topology_expect_change();
Heiko Carstens83a24e32011-12-27 11:27:09 +0100404out:
405 mutex_unlock(&smp_cpu_state_mutex);
406 put_online_cpus();
407 return rc ? rc : count;
408}
Linus Torvalds72f31882012-01-09 08:11:13 -0800409static DEVICE_ATTR(dispatching, 0644, dispatching_show,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100410 dispatching_store);
411
Linus Torvalds72f31882012-01-09 08:11:13 -0800412static ssize_t cpu_polarization_show(struct device *dev,
413 struct device_attribute *attr, char *buf)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100414{
415 int cpu = dev->id;
416 ssize_t count;
417
418 mutex_lock(&smp_cpu_state_mutex);
419 switch (cpu_read_polarization(cpu)) {
420 case POLARIZATION_HRZ:
421 count = sprintf(buf, "horizontal\n");
422 break;
423 case POLARIZATION_VL:
424 count = sprintf(buf, "vertical:low\n");
425 break;
426 case POLARIZATION_VM:
427 count = sprintf(buf, "vertical:medium\n");
428 break;
429 case POLARIZATION_VH:
430 count = sprintf(buf, "vertical:high\n");
431 break;
432 default:
433 count = sprintf(buf, "unknown\n");
434 break;
435 }
436 mutex_unlock(&smp_cpu_state_mutex);
437 return count;
438}
Linus Torvalds72f31882012-01-09 08:11:13 -0800439static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100440
441static struct attribute *topology_cpu_attrs[] = {
Linus Torvalds72f31882012-01-09 08:11:13 -0800442 &dev_attr_polarization.attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100443 NULL,
444};
445
446static struct attribute_group topology_cpu_attr_group = {
447 .attrs = topology_cpu_attrs,
448};
449
450int topology_cpu_init(struct cpu *cpu)
451{
Linus Torvalds72f31882012-01-09 08:11:13 -0800452 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100453}
454
455static int __init topology_init(void)
456{
457 if (!MACHINE_HAS_TOPOLOGY) {
458 topology_update_polarization_simple();
459 goto out;
460 }
Heiko Carstens83a24e32011-12-27 11:27:09 +0100461 set_topology_timer();
462out:
463 update_cpu_core_map();
Linus Torvalds72f31882012-01-09 08:11:13 -0800464 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100465}
466device_initcall(topology_init);