blob: 54d93f4b681813927aabd8adbffbeb6204387d4b [file] [log] [blame]
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * Copyright IBM Corp. 2007, 2011
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02003 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
Martin Schwidefsky395d31d2008-12-25 13:39:50 +01006#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02009#include <linux/workqueue.h>
Heiko Carstens83a24e32011-12-27 11:27:09 +010010#include <linux/bootmem.h>
11#include <linux/cpuset.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/delay.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020017#include <linux/cpu.h>
18#include <linux/smp.h>
Heiko Carstens83a24e32011-12-27 11:27:09 +010019#include <linux/mm.h>
Heiko Carstens78609132012-09-03 14:11:32 +020020#include <asm/sysinfo.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020021
Heiko Carstensc10fde02008-04-17 07:46:13 +020022#define PTF_HORIZONTAL (0UL)
23#define PTF_VERTICAL (1UL)
24#define PTF_CHECK (2UL)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020025
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020026struct mask_info {
27 struct mask_info *next;
Heiko Carstens10d38582010-05-17 10:00:12 +020028 unsigned char id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020029 cpumask_t mask;
30};
31
Heiko Carstensc9af3fa2010-10-25 16:10:43 +020032static int topology_enabled = 1;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020033static void topology_work_fn(struct work_struct *work);
Heiko Carstensc30f91b2010-10-25 16:10:53 +020034static struct sysinfo_15_1_x *tl_info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020035static void set_topology_timer(void);
36static DECLARE_WORK(topology_work, topology_work_fn);
Heiko Carstens74af2832008-11-14 18:18:07 +010037/* topology_lock protects the core linked list */
38static DEFINE_SPINLOCK(topology_lock);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020039
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020040static struct mask_info core_info;
Heiko Carstensd00aa4e2008-04-30 13:38:40 +020041cpumask_t cpu_core_map[NR_CPUS];
Heiko Carstens10d38582010-05-17 10:00:12 +020042unsigned char cpu_core_id[NR_CPUS];
Heiko Carstensd00aa4e2008-04-30 13:38:40 +020043
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020044static struct mask_info book_info;
45cpumask_t cpu_book_map[NR_CPUS];
46unsigned char cpu_book_id[NR_CPUS];
Heiko Carstens83a24e32011-12-27 11:27:09 +010047
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020048static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020049{
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020050 cpumask_t mask;
51
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020052 cpumask_clear(&mask);
Heiko Carstens0b527832010-10-29 16:50:38 +020053 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
54 cpumask_copy(&mask, cpumask_of(cpu));
55 return mask;
56 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020057 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020058 if (cpumask_test_cpu(cpu, &info->mask)) {
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020059 mask = info->mask;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020060 break;
61 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020062 info = info->next;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020063 }
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020064 if (cpumask_empty(&mask))
65 cpumask_copy(&mask, cpumask_of(cpu));
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020066 return mask;
67}
68
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010069static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
70 struct mask_info *book,
71 struct mask_info *core,
Heiko Carstens4baeb962011-12-27 11:27:12 +010072 int one_core_per_cpu)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020073{
74 unsigned int cpu;
75
Wei Yongjun0327dab2012-09-14 10:31:21 +080076 for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040077 unsigned int rcpu;
78 int lcpu;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020079
Heiko Carstensc30f91b2010-10-25 16:10:53 +020080 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040081 lcpu = smp_find_processor_id(rcpu);
82 if (lcpu >= 0) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020083 cpumask_set_cpu(lcpu, &book->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020084 cpu_book_id[lcpu] = book->id;
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020085 cpumask_set_cpu(lcpu, &core->mask);
Heiko Carstens4baeb962011-12-27 11:27:12 +010086 if (one_core_per_cpu) {
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010087 cpu_core_id[lcpu] = rcpu;
88 core = core->next;
89 } else {
90 cpu_core_id[lcpu] = core->id;
91 }
Heiko Carstens50ab9a92012-09-04 17:36:16 +020092 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020093 }
94 }
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010095 return core;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020096}
97
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020098static void clear_masks(void)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020099{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200100 struct mask_info *info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200101
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200102 info = &core_info;
103 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +0200104 cpumask_clear(&info->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200105 info = info->next;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200106 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200107 info = &book_info;
108 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +0200109 cpumask_clear(&info->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200110 info = info->next;
111 }
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200112}
113
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200114static union topology_entry *next_tle(union topology_entry *tle)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200115{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200116 if (!tle->nl)
117 return (union topology_entry *)((struct topology_cpu *)tle + 1);
118 return (union topology_entry *)((struct topology_container *)tle + 1);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200119}
120
Heiko Carstens4baeb962011-12-27 11:27:12 +0100121static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200122{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200123 struct mask_info *core = &core_info;
Heiko Carstens83a24e32011-12-27 11:27:09 +0100124 struct mask_info *book = &book_info;
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200125 union topology_entry *tle, *end;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200126
Heiko Carstensc10fde02008-04-17 07:46:13 +0200127 tle = info->tle;
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200128 end = (union topology_entry *)((unsigned long)info + info->length);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200129 while (tle < end) {
130 switch (tle->nl) {
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200131 case 2:
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200132 book = book->next;
133 book->id = tle->container.id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200134 break;
135 case 1:
136 core = core->next;
Heiko Carstens10d38582010-05-17 10:00:12 +0200137 core->id = tle->container.id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200138 break;
139 case 0:
Heiko Carstens4baeb962011-12-27 11:27:12 +0100140 add_cpus_to_mask(&tle->cpu, book, core, 0);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200141 break;
142 default:
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200143 clear_masks();
Heiko Carstens4baeb962011-12-27 11:27:12 +0100144 return;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200145 }
146 tle = next_tle(tle);
147 }
Heiko Carstens4baeb962011-12-27 11:27:12 +0100148}
149
150static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
151{
152 struct mask_info *core = &core_info;
153 struct mask_info *book = &book_info;
154 union topology_entry *tle, *end;
155
156 tle = info->tle;
157 end = (union topology_entry *)((unsigned long)info + info->length);
158 while (tle < end) {
159 switch (tle->nl) {
160 case 1:
161 book = book->next;
162 book->id = tle->container.id;
163 break;
164 case 0:
165 core = add_cpus_to_mask(&tle->cpu, book, core, 1);
166 break;
167 default:
168 clear_masks();
169 return;
170 }
171 tle = next_tle(tle);
172 }
173}
174
175static void tl_to_cores(struct sysinfo_15_1_x *info)
176{
177 struct cpuid cpu_id;
178
179 get_cpu_id(&cpu_id);
180 spin_lock_irq(&topology_lock);
181 clear_masks();
182 switch (cpu_id.machine) {
183 case 0x2097:
184 case 0x2098:
185 __tl_to_cores_z10(info);
186 break;
187 default:
188 __tl_to_cores_generic(info);
189 }
Heiko Carstens74af2832008-11-14 18:18:07 +0100190 spin_unlock_irq(&topology_lock);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200191}
192
Heiko Carstensc10fde02008-04-17 07:46:13 +0200193static void topology_update_polarization_simple(void)
194{
195 int cpu;
196
197 mutex_lock(&smp_cpu_state_mutex);
Heiko Carstens54390502008-12-25 13:37:57 +0100198 for_each_possible_cpu(cpu)
Heiko Carstens50ab9a92012-09-04 17:36:16 +0200199 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
Heiko Carstensc10fde02008-04-17 07:46:13 +0200200 mutex_unlock(&smp_cpu_state_mutex);
201}
202
203static int ptf(unsigned long fc)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200204{
205 int rc;
206
207 asm volatile(
208 " .insn rre,0xb9a20000,%1,%1\n"
209 " ipm %0\n"
210 " srl %0,28\n"
211 : "=d" (rc)
Heiko Carstensc10fde02008-04-17 07:46:13 +0200212 : "d" (fc) : "cc");
213 return rc;
214}
215
216int topology_set_cpu_management(int fc)
217{
Heiko Carstens83a24e32011-12-27 11:27:09 +0100218 int cpu, rc;
Heiko Carstensc10fde02008-04-17 07:46:13 +0200219
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200220 if (!MACHINE_HAS_TOPOLOGY)
Heiko Carstensc10fde02008-04-17 07:46:13 +0200221 return -EOPNOTSUPP;
222 if (fc)
223 rc = ptf(PTF_VERTICAL);
224 else
225 rc = ptf(PTF_HORIZONTAL);
226 if (rc)
227 return -EBUSY;
Heiko Carstens54390502008-12-25 13:37:57 +0100228 for_each_possible_cpu(cpu)
Heiko Carstens50ab9a92012-09-04 17:36:16 +0200229 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200230 return rc;
231}
232
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200233static void update_cpu_core_map(void)
234{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200235 unsigned long flags;
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200236 int cpu;
237
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200238 spin_lock_irqsave(&topology_lock, flags);
239 for_each_possible_cpu(cpu) {
240 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200241 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200242 }
243 spin_unlock_irqrestore(&topology_lock, flags);
244}
245
Heiko Carstens96f4a702010-10-25 16:10:54 +0200246void store_topology(struct sysinfo_15_1_x *info)
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200247{
Heiko Carstensfade4dc2012-09-04 14:26:03 +0200248 if (topology_max_mnest >= 3)
249 stsi(info, 15, 1, 3);
250 else
251 stsi(info, 15, 1, 2);
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200252}
253
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100254int arch_update_cpu_topology(void)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200255{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200256 struct sysinfo_15_1_x *info = tl_info;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800257 struct device *dev;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200258 int cpu;
259
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200260 if (!MACHINE_HAS_TOPOLOGY) {
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200261 update_cpu_core_map();
Heiko Carstensc10fde02008-04-17 07:46:13 +0200262 topology_update_polarization_simple();
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100263 return 0;
Heiko Carstensc10fde02008-04-17 07:46:13 +0200264 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200265 store_topology(info);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200266 tl_to_cores(info);
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200267 update_cpu_core_map();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200268 for_each_online_cpu(cpu) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800269 dev = get_cpu_device(cpu);
270 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200271 }
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100272 return 1;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200273}
274
Heiko Carstensfd781fa2008-04-30 13:38:41 +0200275static void topology_work_fn(struct work_struct *work)
276{
Heiko Carstensf414f5f2008-12-25 13:37:59 +0100277 rebuild_sched_domains();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200278}
279
Heiko Carstensc10fde02008-04-17 07:46:13 +0200280void topology_schedule_update(void)
281{
282 schedule_work(&topology_work);
283}
284
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200285static void topology_timer_fn(unsigned long ignored)
286{
Heiko Carstensc10fde02008-04-17 07:46:13 +0200287 if (ptf(PTF_CHECK))
288 topology_schedule_update();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200289 set_topology_timer();
290}
291
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100292static struct timer_list topology_timer =
293 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
294
295static atomic_t topology_poll = ATOMIC_INIT(0);
296
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200297static void set_topology_timer(void)
298{
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100299 if (atomic_add_unless(&topology_poll, -1, 0))
300 mod_timer(&topology_timer, jiffies + HZ / 10);
301 else
302 mod_timer(&topology_timer, jiffies + HZ * 60);
303}
304
305void topology_expect_change(void)
306{
307 if (!MACHINE_HAS_TOPOLOGY)
308 return;
309 /* This is racy, but it doesn't matter since it is just a heuristic.
310 * Worst case is that we poll in a higher frequency for a bit longer.
311 */
312 if (atomic_read(&topology_poll) > 60)
313 return;
314 atomic_add(60, &topology_poll);
315 set_topology_timer();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200316}
317
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100318static int __init early_parse_topology(char *p)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200319{
Heiko Carstensc9af3fa2010-10-25 16:10:43 +0200320 if (strncmp(p, "off", 3))
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100321 return 0;
Heiko Carstensc9af3fa2010-10-25 16:10:43 +0200322 topology_enabled = 0;
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100323 return 0;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200324}
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100325early_param("topology", early_parse_topology);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200326
Sebastian Ottcaa04f62011-10-30 15:16:06 +0100327static void __init alloc_masks(struct sysinfo_15_1_x *info,
328 struct mask_info *mask, int offset)
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200329{
330 int i, nr_masks;
331
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200332 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200333 for (i = 0; i < info->mnest - offset; i++)
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200334 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200335 nr_masks = max(nr_masks, 1);
336 for (i = 0; i < nr_masks; i++) {
337 mask->next = alloc_bootmem(sizeof(struct mask_info));
338 mask = mask->next;
339 }
340}
341
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200342void __init s390_init_cpu_topology(void)
343{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200344 struct sysinfo_15_1_x *info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200345 int i;
346
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200347 if (!MACHINE_HAS_TOPOLOGY)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200348 return;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200349 tl_info = alloc_bootmem_pages(PAGE_SIZE);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200350 info = tl_info;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200351 store_topology(info);
Martin Schwidefsky395d31d2008-12-25 13:39:50 +0100352 pr_info("The CPU configuration topology of the machine is:");
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200353 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100354 printk(KERN_CONT " %d", info->mag[i]);
355 printk(KERN_CONT " / %d\n", info->mnest);
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100356 alloc_masks(info, &core_info, 1);
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100357 alloc_masks(info, &book_info, 2);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200358}
Heiko Carstens83a24e32011-12-27 11:27:09 +0100359
360static int cpu_management;
361
Linus Torvalds72f31882012-01-09 08:11:13 -0800362static ssize_t dispatching_show(struct device *dev,
363 struct device_attribute *attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100364 char *buf)
365{
366 ssize_t count;
367
368 mutex_lock(&smp_cpu_state_mutex);
369 count = sprintf(buf, "%d\n", cpu_management);
370 mutex_unlock(&smp_cpu_state_mutex);
371 return count;
372}
373
Linus Torvalds72f31882012-01-09 08:11:13 -0800374static ssize_t dispatching_store(struct device *dev,
375 struct device_attribute *attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100376 const char *buf,
377 size_t count)
378{
379 int val, rc;
380 char delim;
381
382 if (sscanf(buf, "%d %c", &val, &delim) != 1)
383 return -EINVAL;
384 if (val != 0 && val != 1)
385 return -EINVAL;
386 rc = 0;
387 get_online_cpus();
388 mutex_lock(&smp_cpu_state_mutex);
389 if (cpu_management == val)
390 goto out;
391 rc = topology_set_cpu_management(val);
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100392 if (rc)
393 goto out;
394 cpu_management = val;
395 topology_expect_change();
Heiko Carstens83a24e32011-12-27 11:27:09 +0100396out:
397 mutex_unlock(&smp_cpu_state_mutex);
398 put_online_cpus();
399 return rc ? rc : count;
400}
Linus Torvalds72f31882012-01-09 08:11:13 -0800401static DEVICE_ATTR(dispatching, 0644, dispatching_show,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100402 dispatching_store);
403
Linus Torvalds72f31882012-01-09 08:11:13 -0800404static ssize_t cpu_polarization_show(struct device *dev,
405 struct device_attribute *attr, char *buf)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100406{
407 int cpu = dev->id;
408 ssize_t count;
409
410 mutex_lock(&smp_cpu_state_mutex);
Heiko Carstens50ab9a92012-09-04 17:36:16 +0200411 switch (smp_cpu_get_polarization(cpu)) {
Heiko Carstens83a24e32011-12-27 11:27:09 +0100412 case POLARIZATION_HRZ:
413 count = sprintf(buf, "horizontal\n");
414 break;
415 case POLARIZATION_VL:
416 count = sprintf(buf, "vertical:low\n");
417 break;
418 case POLARIZATION_VM:
419 count = sprintf(buf, "vertical:medium\n");
420 break;
421 case POLARIZATION_VH:
422 count = sprintf(buf, "vertical:high\n");
423 break;
424 default:
425 count = sprintf(buf, "unknown\n");
426 break;
427 }
428 mutex_unlock(&smp_cpu_state_mutex);
429 return count;
430}
Linus Torvalds72f31882012-01-09 08:11:13 -0800431static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100432
433static struct attribute *topology_cpu_attrs[] = {
Linus Torvalds72f31882012-01-09 08:11:13 -0800434 &dev_attr_polarization.attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100435 NULL,
436};
437
438static struct attribute_group topology_cpu_attr_group = {
439 .attrs = topology_cpu_attrs,
440};
441
442int topology_cpu_init(struct cpu *cpu)
443{
Linus Torvalds72f31882012-01-09 08:11:13 -0800444 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100445}
446
447static int __init topology_init(void)
448{
449 if (!MACHINE_HAS_TOPOLOGY) {
450 topology_update_polarization_simple();
451 goto out;
452 }
Heiko Carstens83a24e32011-12-27 11:27:09 +0100453 set_topology_timer();
454out:
455 update_cpu_core_map();
Linus Torvalds72f31882012-01-09 08:11:13 -0800456 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100457}
458device_initcall(topology_init);