blob: edd77e7508b3538ec7e82917b8b7fd6b7aaa0f7b [file] [log] [blame]
Paul Gortmaker69c60c82011-05-26 12:22:53 -04001#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#include <linux/init.h>
3#include <linux/bitops.h>
Stephen Rothwell5cdd1742011-08-10 11:49:56 +10004#include <linux/elf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/mm.h>
Yinghai Lu8d71a2e2008-09-07 17:58:53 -07006
Alan Cox8bdbd962009-07-04 00:35:45 +01007#include <linux/io.h>
Borislav Petkovc98fdea2012-02-07 13:08:52 +01008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/processor.h>
Andi Kleend3f7eae2007-08-10 22:31:07 +020010#include <asm/apic.h>
Yinghai Lu1f442d72009-03-07 23:46:26 -080011#include <asm/cpu.h>
Andreas Herrmann42937e82009-06-08 15:55:09 +020012#include <asm/pci-direct.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Yinghai Lu8d71a2e2008-09-07 17:58:53 -070014#ifdef CONFIG_X86_64
Yinghai Lu8d71a2e2008-09-07 17:58:53 -070015# include <asm/mmconfig.h>
16# include <asm/cacheflush.h>
17#endif
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "cpu.h"
20
Borislav Petkov2c929ce2012-06-01 16:52:38 +020021static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{
23 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
24 u32 gprs[8] = { 0 };
25 int err;
26
27 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
28
29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a;
31
32 err = rdmsr_safe_regs(gprs);
33
34 *p = gprs[0] | ((u64)gprs[2] << 32);
35
36 return err;
37}
38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{
41 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
42 u32 gprs[8] = { 0 };
43
44 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
45
46 gprs[0] = (u32)val;
47 gprs[1] = msr;
48 gprs[2] = val >> 32;
49 gprs[7] = 0x9c5a203a;
50
51 return wrmsr_safe_regs(gprs);
52}
53
Yinghai Lu6c62aa42008-09-07 17:58:54 -070054#ifdef CONFIG_X86_32
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/*
56 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
57 * misexecution of code under Linux. Owners of such processors should
58 * contact AMD for precise details and a CPU swap.
59 *
60 * See http://www.multimania.com/poulot/k6bug.html
Andreas Herrmannd7de8642012-04-11 17:12:38 +020061 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
62 * (Publication # 21266 Issue Date: August 1998)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 *
64 * The following test is erm.. interesting. AMD neglected to up
65 * the chip setting when fixing the bug but they also tweaked some
66 * performance at the same time..
67 */
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +010068
Linus Torvalds1da177e2005-04-16 15:20:36 -070069extern void vide(void);
70__asm__(".align 4\nvide: ret");
71
Yinghai Lu11fdd252008-09-07 17:58:50 -070072static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
73{
74/*
75 * General Systems BIOSen alias the cpu frequency registers
76 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
77 * drivers subsequently pokes it, and changes the CPU speed.
78 * Workaround : Remove the unneeded alias.
79 */
80#define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
81#define CBAR_ENB (0x80000000)
82#define CBAR_KEY (0X000000CB)
83 if (c->x86_model == 9 || c->x86_model == 10) {
Alan Cox8bdbd962009-07-04 00:35:45 +010084 if (inl(CBAR) & CBAR_ENB)
85 outl(0 | CBAR_KEY, CBAR);
Yinghai Lu11fdd252008-09-07 17:58:50 -070086 }
87}
88
89
90static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
91{
92 u32 l, h;
93 int mbytes = num_physpages >> (20-PAGE_SHIFT);
94
95 if (c->x86_model < 6) {
96 /* Based on AMD doc 20734R - June 2000 */
97 if (c->x86_model == 0) {
98 clear_cpu_cap(c, X86_FEATURE_APIC);
99 set_cpu_cap(c, X86_FEATURE_PGE);
100 }
101 return;
102 }
103
104 if (c->x86_model == 6 && c->x86_mask == 1) {
105 const int K6_BUG_LOOP = 1000000;
106 int n;
107 void (*f_vide)(void);
108 unsigned long d, d2;
109
110 printk(KERN_INFO "AMD K6 stepping B detected - ");
111
112 /*
113 * It looks like AMD fixed the 2.6.2 bug and improved indirect
114 * calls at the same time.
115 */
116
117 n = K6_BUG_LOOP;
118 f_vide = vide;
119 rdtscl(d);
120 while (n--)
121 f_vide();
122 rdtscl(d2);
123 d = d2-d;
124
125 if (d > 20*K6_BUG_LOOP)
Alan Cox8bdbd962009-07-04 00:35:45 +0100126 printk(KERN_CONT
127 "system stability may be impaired when more than 32 MB are used.\n");
Yinghai Lu11fdd252008-09-07 17:58:50 -0700128 else
Alan Cox8bdbd962009-07-04 00:35:45 +0100129 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
Yinghai Lu11fdd252008-09-07 17:58:50 -0700130 }
131
132 /* K6 with old style WHCR */
133 if (c->x86_model < 8 ||
134 (c->x86_model == 8 && c->x86_mask < 8)) {
135 /* We can only write allocate on the low 508Mb */
136 if (mbytes > 508)
137 mbytes = 508;
138
139 rdmsr(MSR_K6_WHCR, l, h);
140 if ((l&0x0000FFFF) == 0) {
141 unsigned long flags;
142 l = (1<<0)|((mbytes/4)<<1);
143 local_irq_save(flags);
144 wbinvd();
145 wrmsr(MSR_K6_WHCR, l, h);
146 local_irq_restore(flags);
147 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
148 mbytes);
149 }
150 return;
151 }
152
153 if ((c->x86_model == 8 && c->x86_mask > 7) ||
154 c->x86_model == 9 || c->x86_model == 13) {
155 /* The more serious chips .. */
156
157 if (mbytes > 4092)
158 mbytes = 4092;
159
160 rdmsr(MSR_K6_WHCR, l, h);
161 if ((l&0xFFFF0000) == 0) {
162 unsigned long flags;
163 l = ((mbytes>>2)<<22)|(1<<16);
164 local_irq_save(flags);
165 wbinvd();
166 wrmsr(MSR_K6_WHCR, l, h);
167 local_irq_restore(flags);
168 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
169 mbytes);
170 }
171
172 return;
173 }
174
175 if (c->x86_model == 10) {
176 /* AMD Geode LX is model 10 */
177 /* placeholder for any needed mods */
178 return;
179 }
180}
181
Yinghai Lu1f442d72009-03-07 23:46:26 -0800182static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
183{
Yinghai Lu1f442d72009-03-07 23:46:26 -0800184 /* calling is from identify_secondary_cpu() ? */
Robert Richterf6e9456c2010-07-21 19:03:58 +0200185 if (!c->cpu_index)
Yinghai Lu1f442d72009-03-07 23:46:26 -0800186 return;
187
188 /*
189 * Certain Athlons might work (for various values of 'work') in SMP
190 * but they are not certified as MP capable.
191 */
192 /* Athlon 660/661 is valid. */
193 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
194 (c->x86_mask == 1)))
195 goto valid_k7;
196
197 /* Duron 670 is valid */
198 if ((c->x86_model == 7) && (c->x86_mask == 0))
199 goto valid_k7;
200
201 /*
202 * Athlon 662, Duron 671, and Athlon >model 7 have capability
203 * bit. It's worth noting that the A5 stepping (662) of some
204 * Athlon XP's have the MP bit set.
205 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
206 * more.
207 */
208 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
209 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
210 (c->x86_model > 7))
211 if (cpu_has_mp)
212 goto valid_k7;
213
214 /* If we get here, not a certified SMP capable AMD system. */
215
216 /*
217 * Don't taint if we are running SMP kernel on a single non-MP
218 * approved Athlon
219 */
220 WARN_ONCE(1, "WARNING: This combination of AMD"
Michael Tokarev7da8b6d2009-07-22 17:50:23 +0400221 " processors is not suitable for SMP.\n");
Yinghai Lu1f442d72009-03-07 23:46:26 -0800222 if (!test_taint(TAINT_UNSAFE_SMP))
223 add_taint(TAINT_UNSAFE_SMP);
224
225valid_k7:
226 ;
Yinghai Lu1f442d72009-03-07 23:46:26 -0800227}
228
Yinghai Lu11fdd252008-09-07 17:58:50 -0700229static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
230{
231 u32 l, h;
232
233 /*
234 * Bit 15 of Athlon specific MSR 15, needs to be 0
235 * to enable SSE on Palomino/Morgan/Barton CPU's.
236 * If the BIOS didn't enable it already, enable it here.
237 */
238 if (c->x86_model >= 6 && c->x86_model <= 10) {
239 if (!cpu_has(c, X86_FEATURE_XMM)) {
240 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
241 rdmsr(MSR_K7_HWCR, l, h);
242 l &= ~0x00008000;
243 wrmsr(MSR_K7_HWCR, l, h);
244 set_cpu_cap(c, X86_FEATURE_XMM);
245 }
246 }
247
248 /*
249 * It's been determined by AMD that Athlons since model 8 stepping 1
250 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
251 * As per AMD technical note 27212 0.2
252 */
253 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
254 rdmsr(MSR_K7_CLK_CTL, l, h);
255 if ((l & 0xfff00000) != 0x20000000) {
Alan Cox8bdbd962009-07-04 00:35:45 +0100256 printk(KERN_INFO
257 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
258 l, ((l & 0x000fffff)|0x20000000));
Yinghai Lu11fdd252008-09-07 17:58:50 -0700259 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
260 }
261 }
262
263 set_cpu_cap(c, X86_FEATURE_K7);
Yinghai Lu1f442d72009-03-07 23:46:26 -0800264
265 amd_k7_smp_check(c);
Yinghai Lu11fdd252008-09-07 17:58:50 -0700266}
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700267#endif
268
Tejun Heo645a7912011-01-23 14:37:40 +0100269#ifdef CONFIG_NUMA
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100270/*
271 * To workaround broken NUMA config. Read the comment in
272 * srat_detect_node().
273 */
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700274static int __cpuinit nearby_node(int apicid)
275{
276 int i, node;
277
278 for (i = apicid - 1; i >= 0; i--) {
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100279 node = __apicid_to_node[i];
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700280 if (node != NUMA_NO_NODE && node_online(node))
281 return node;
282 }
283 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100284 node = __apicid_to_node[i];
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700285 if (node != NUMA_NO_NODE && node_online(node))
286 return node;
287 }
288 return first_node(node_online_map); /* Shouldn't happen */
289}
290#endif
Yinghai Lu11fdd252008-09-07 17:58:50 -0700291
292/*
Andreas Herrmann23588c32010-09-30 14:36:28 +0200293 * Fixup core topology information for
294 * (1) AMD multi-node processors
295 * Assumption: Number of cores in each internal node is the same.
Andreas Herrmann6057b4d2010-09-30 14:38:57 +0200296 * (2) AMD processors supporting compute units
Andreas Herrmann4a376ec2009-09-03 09:40:21 +0200297 */
298#ifdef CONFIG_X86_HT
Andreas Herrmann23588c32010-09-30 14:36:28 +0200299static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
Andreas Herrmann4a376ec2009-09-03 09:40:21 +0200300{
Borislav Petkov9e815092011-02-14 18:14:51 +0100301 u32 nodes, cores_per_cu = 1;
Andreas Herrmann23588c32010-09-30 14:36:28 +0200302 u8 node_id;
Andreas Herrmann4a376ec2009-09-03 09:40:21 +0200303 int cpu = smp_processor_id();
304
Andreas Herrmann23588c32010-09-30 14:36:28 +0200305 /* get information required for multi-node processors */
Andreas Herrmann193f3fc2012-10-19 10:58:13 +0200306 if (cpu_has_topoext) {
Andreas Herrmann6057b4d2010-09-30 14:38:57 +0200307 u32 eax, ebx, ecx, edx;
308
309 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
310 nodes = ((ecx >> 8) & 7) + 1;
311 node_id = ecx & 7;
312
313 /* get compute unit information */
314 smp_num_siblings = ((ebx >> 8) & 3) + 1;
315 c->compute_unit_id = ebx & 0xff;
Borislav Petkov9e815092011-02-14 18:14:51 +0100316 cores_per_cu += ((ebx >> 8) & 3);
Andreas Herrmann23588c32010-09-30 14:36:28 +0200317 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
Andreas Herrmann6057b4d2010-09-30 14:38:57 +0200318 u64 value;
319
Andreas Herrmann23588c32010-09-30 14:36:28 +0200320 rdmsrl(MSR_FAM10H_NODE_ID, value);
321 nodes = ((value >> 3) & 7) + 1;
322 node_id = value & 7;
323 } else
Andreas Herrmann9d260eb2009-12-16 15:43:55 +0100324 return;
325
Andreas Herrmann23588c32010-09-30 14:36:28 +0200326 /* fixup multi-node processor information */
327 if (nodes > 1) {
Andreas Herrmann6057b4d2010-09-30 14:38:57 +0200328 u32 cores_per_node;
Andreas Herrmannd5185732011-01-24 16:05:40 +0100329 u32 cus_per_node;
Andreas Herrmann6057b4d2010-09-30 14:38:57 +0200330
Andreas Herrmann23588c32010-09-30 14:36:28 +0200331 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
332 cores_per_node = c->x86_max_cores / nodes;
Andreas Herrmannd5185732011-01-24 16:05:40 +0100333 cus_per_node = cores_per_node / cores_per_cu;
Andreas Herrmann4a376ec2009-09-03 09:40:21 +0200334
Andreas Herrmann23588c32010-09-30 14:36:28 +0200335 /* store NodeID, use llc_shared_map to store sibling info */
336 per_cpu(cpu_llc_id, cpu) = node_id;
Andreas Herrmann9d260eb2009-12-16 15:43:55 +0100337
Borislav Petkov9e815092011-02-14 18:14:51 +0100338 /* core id has to be in the [0 .. cores_per_node - 1] range */
Andreas Herrmannd5185732011-01-24 16:05:40 +0100339 c->cpu_core_id %= cores_per_node;
340 c->compute_unit_id %= cus_per_node;
Andreas Herrmann23588c32010-09-30 14:36:28 +0200341 }
Andreas Herrmann4a376ec2009-09-03 09:40:21 +0200342}
343#endif
344
345/*
Yinghai Lu11fdd252008-09-07 17:58:50 -0700346 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
347 * Assumes number of cores is a power of two.
348 */
349static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
350{
351#ifdef CONFIG_X86_HT
352 unsigned bits;
Andreas Herrmann99bd0c02009-06-19 10:59:09 +0200353 int cpu = smp_processor_id();
Yinghai Lu11fdd252008-09-07 17:58:50 -0700354
355 bits = c->x86_coreid_bits;
Yinghai Lu11fdd252008-09-07 17:58:50 -0700356 /* Low order bits define the core id (index of core in socket) */
357 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
358 /* Convert the initial APIC ID into the socket ID */
359 c->phys_proc_id = c->initial_apicid >> bits;
Andreas Herrmann99bd0c02009-06-19 10:59:09 +0200360 /* use socket ID also for last level cache */
361 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
Andreas Herrmann23588c32010-09-30 14:36:28 +0200362 amd_get_topology(c);
Yinghai Lu11fdd252008-09-07 17:58:50 -0700363#endif
364}
365
Daniel J Blueman8b84c8d2012-11-27 14:32:10 +0800366u16 amd_get_nb_id(int cpu)
Andreas Herrmann6a812692009-09-16 11:33:40 +0200367{
Daniel J Blueman8b84c8d2012-11-27 14:32:10 +0800368 u16 id = 0;
Andreas Herrmann6a812692009-09-16 11:33:40 +0200369#ifdef CONFIG_SMP
370 id = per_cpu(cpu_llc_id, cpu);
371#endif
372 return id;
373}
374EXPORT_SYMBOL_GPL(amd_get_nb_id);
375
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700376static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
377{
Tejun Heo645a7912011-01-23 14:37:40 +0100378#ifdef CONFIG_NUMA
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700379 int cpu = smp_processor_id();
380 int node;
Yinghai Lu0d96b9f2009-08-29 13:17:14 -0700381 unsigned apicid = c->apicid;
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700382
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100383 node = numa_cpu_node(cpu);
384 if (node == NUMA_NO_NODE)
385 node = per_cpu(cpu_llc_id, cpu);
Andreas Herrmann4a376ec2009-09-03 09:40:21 +0200386
Daniel J Blueman64be4c12011-12-05 16:20:37 +0800387 /*
Andreas Herrmann68894632012-04-02 18:06:48 +0200388 * On multi-fabric platform (e.g. Numascale NumaChip) a
389 * platform-specific handler needs to be called to fixup some
390 * IDs of the CPU.
Daniel J Blueman64be4c12011-12-05 16:20:37 +0800391 */
Andreas Herrmann68894632012-04-02 18:06:48 +0200392 if (x86_cpuinit.fixup_cpu_id)
Daniel J Blueman64be4c12011-12-05 16:20:37 +0800393 x86_cpuinit.fixup_cpu_id(c, node);
394
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700395 if (!node_online(node)) {
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100396 /*
397 * Two possibilities here:
398 *
399 * - The CPU is missing memory and no node was created. In
400 * that case try picking one from a nearby CPU.
401 *
402 * - The APIC IDs differ from the HyperTransport node IDs
403 * which the K8 northbridge parsing fills in. Assume
404 * they are all increased by a constant offset, but in
405 * the same order as the HT nodeids. If that doesn't
406 * result in a usable node fall back to the path for the
407 * previous case.
408 *
409 * This workaround operates directly on the mapping between
410 * APIC ID and NUMA node, assuming certain relationship
411 * between APIC ID, HT node ID and NUMA topology. As going
412 * through CPU mapping may alter the outcome, directly
413 * access __apicid_to_node[].
414 */
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700415 int ht_nodeid = c->initial_apicid;
416
417 if (ht_nodeid >= 0 &&
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100418 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
419 node = __apicid_to_node[ht_nodeid];
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700420 /* Pick a nearby node */
421 if (!node_online(node))
422 node = nearby_node(apicid);
423 }
424 numa_set_node(cpu, node);
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700425#endif
426}
427
Yinghai Lu11fdd252008-09-07 17:58:50 -0700428static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
429{
430#ifdef CONFIG_X86_HT
431 unsigned bits, ecx;
432
433 /* Multi core CPU? */
434 if (c->extended_cpuid_level < 0x80000008)
435 return;
436
437 ecx = cpuid_ecx(0x80000008);
438
439 c->x86_max_cores = (ecx & 0xff) + 1;
440
441 /* CPU telling us the core id bits shift? */
442 bits = (ecx >> 12) & 0xF;
443
444 /* Otherwise recompute */
445 if (bits == 0) {
446 while ((1 << bits) < c->x86_max_cores)
447 bits++;
448 }
449
450 c->x86_coreid_bits = bits;
451#endif
452}
453
Borislav Petkov8fa8b032011-08-05 20:04:09 +0200454static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
455{
456 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
457
458 if (c->x86 > 0x10 ||
459 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
460 u64 val;
461
462 rdmsrl(MSR_K7_HWCR, val);
463 if (!(val & BIT(24)))
464 printk(KERN_WARNING FW_BUG "TSC doesn't count "
465 "with P0 frequency!\n");
466 }
467 }
468
469 if (c->x86 == 0x15) {
470 unsigned long upperbit;
471 u32 cpuid, assoc;
472
473 cpuid = cpuid_edx(0x80000005);
474 assoc = cpuid >> 16 & 0xff;
475 upperbit = ((cpuid >> 24) << 10) / assoc;
476
477 va_align.mask = (upperbit - 1) & PAGE_MASK;
478 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
479 }
480}
481
Thomas Petazzoni03ae5762008-02-15 12:00:23 +0100482static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
Andi Kleen2b16a232008-01-30 13:32:40 +0100483{
Yinghai Lu11fdd252008-09-07 17:58:50 -0700484 early_init_amd_mc(c);
485
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800486 /*
487 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
488 * with P/T states and does not stop in deep C-states
489 */
490 if (c->x86_power & (1 << 8)) {
Yinghai Lue3224232008-09-06 01:52:28 -0700491 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800492 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
Borislav Petkovc98fdea2012-02-07 13:08:52 +0100493 if (!check_tsc_unstable())
494 sched_clock_stable = 1;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800495 }
Yinghai Lu5fef55f2008-09-04 21:09:43 +0200496
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700497#ifdef CONFIG_X86_64
498 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
499#else
Yinghai Lu5fef55f2008-09-04 21:09:43 +0200500 /* Set MTRR capability flag if appropriate */
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700501 if (c->x86 == 5)
502 if (c->x86_model == 13 || c->x86_model == 9 ||
503 (c->x86_model == 8 && c->x86_mask >= 8))
504 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
505#endif
Andreas Herrmann42937e82009-06-08 15:55:09 +0200506#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
507 /* check CPU config space for extended APIC ID */
Jeremy Fitzhardinge2cb07862009-07-22 09:59:35 -0700508 if (cpu_has_apic && c->x86 >= 0xf) {
Andreas Herrmann42937e82009-06-08 15:55:09 +0200509 unsigned int val;
510 val = read_pci_config(0, 24, 0, 0x68);
511 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
512 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
513 }
514#endif
Andi Kleen2b16a232008-01-30 13:32:40 +0100515}
516
Magnus Dammb4af3f72006-09-26 10:52:36 +0200517static void __cpuinit init_amd(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Linus Torvalds8e8da022011-12-04 11:57:09 -0800519 u32 dummy;
Andi Kleen3c92c2b2005-10-11 01:28:33 +0200520 unsigned long long value;
Andi Kleen7d318d72005-09-29 22:05:55 +0200521
Boris Ostrovsky6bf08a82013-01-29 16:32:16 -0500522#ifdef CONFIG_SMP
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100523 /*
524 * Disable TLB flush filter by setting HWCR.FFDIS on K8
Andi Kleen7d318d72005-09-29 22:05:55 +0200525 * bit 6 of msr C001_0015
526 *
527 * Errata 63 for SH-B3 steppings
528 * Errata 122 for all steppings (F+ have it disabled by default)
529 */
Yinghai Lu11fdd252008-09-07 17:58:50 -0700530 if (c->x86 == 0xf) {
Andi Kleen7d318d72005-09-29 22:05:55 +0200531 rdmsrl(MSR_K7_HWCR, value);
532 value |= 1 << 6;
533 wrmsrl(MSR_K7_HWCR, value);
534 }
535#endif
536
Andi Kleen2b16a232008-01-30 13:32:40 +0100537 early_init_amd(c);
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /*
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100540 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
Ingo Molnar16282a82008-02-26 08:49:57 +0100541 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100542 */
Ingo Molnar16282a82008-02-26 08:49:57 +0100543 clear_cpu_cap(c, 0*32+31);
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100544
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700545#ifdef CONFIG_X86_64
546 /* On C+ stepping K8 rep microcode works well for copy/memset */
547 if (c->x86 == 0xf) {
548 u32 level;
549
550 level = cpuid_eax(1);
Alan Cox8bdbd962009-07-04 00:35:45 +0100551 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700552 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
Kevin Winchesterfbd8b182009-08-10 19:56:45 -0300553
554 /*
555 * Some BIOSes incorrectly force this feature, but only K8
556 * revision D (model = 0x14) and later actually support it.
Borislav Petkov6b0f43d2009-08-31 09:50:11 +0200557 * (AMD Erratum #110, docId: 25759).
Kevin Winchesterfbd8b182009-08-10 19:56:45 -0300558 */
Borislav Petkov6b0f43d2009-08-31 09:50:11 +0200559 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
Kevin Winchesterfbd8b182009-08-10 19:56:45 -0300560 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
Boris Ostrovsky6bf08a82013-01-29 16:32:16 -0500561 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
562 value &= ~(1ULL << 32);
563 wrmsrl_amd_safe(0xc001100d, value);
Borislav Petkov6b0f43d2009-08-31 09:50:11 +0200564 }
565 }
566
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700567 }
Borislav Petkov12d8a962010-06-02 20:29:21 +0200568 if (c->x86 >= 0x10)
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700569 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
Yinghai Lu0d96b9f2009-08-29 13:17:14 -0700570
571 /* get apicid instead of initial apic id from cpuid */
572 c->apicid = hard_smp_processor_id();
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700573#else
574
575 /*
576 * FIXME: We should handle the K5 here. Set up the write
577 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
578 * no bus pipeline)
579 */
580
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100581 switch (c->x86) {
582 case 4:
Yinghai Lu11fdd252008-09-07 17:58:50 -0700583 init_amd_k5(c);
584 break;
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100585 case 5:
Yinghai Lu11fdd252008-09-07 17:58:50 -0700586 init_amd_k6(c);
587 break;
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100588 case 6: /* An Athlon/Duron */
Yinghai Lu11fdd252008-09-07 17:58:50 -0700589 init_amd_k7(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 break;
Andi Kleen67cddd92007-07-21 17:10:03 +0200591 }
Andi Kleen3556ddf2007-04-02 12:14:12 +0200592
Andi Kleenc12ceb72007-05-21 14:31:47 +0200593 /* K6s reports MCEs but don't actually have all the MSRs */
594 if (c->x86 < 6)
Ingo Molnar16282a82008-02-26 08:49:57 +0100595 clear_cpu_cap(c, X86_FEATURE_MCE);
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700596#endif
Andi Kleende421862008-01-30 13:32:37 +0100597
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700598 /* Enable workaround for FXSAVE leak */
Yinghai Lu11fdd252008-09-07 17:58:50 -0700599 if (c->x86 >= 6)
600 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
601
602 if (!c->x86_model_id[0]) {
603 switch (c->x86) {
604 case 0xf:
605 /* Should distinguish Models here, but this is only
606 a fallback anyways. */
607 strcpy(c->x86_model_id, "Hammer");
608 break;
609 }
610 }
611
Andreas Herrmannf7f286a2012-04-03 12:13:07 +0200612 /* re-enable TopologyExtensions if switched off by BIOS */
613 if ((c->x86 == 0x15) &&
614 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
615 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
Andreas Herrmannf7f286a2012-04-03 12:13:07 +0200616
Boris Ostrovsky6bf08a82013-01-29 16:32:16 -0500617 if (!rdmsrl_safe(0xc0011005, &value)) {
618 value |= 1ULL << 54;
619 wrmsrl_safe(0xc0011005, value);
620 rdmsrl(0xc0011005, value);
621 if (value & (1ULL << 54)) {
Andreas Herrmannf7f286a2012-04-03 12:13:07 +0200622 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
623 printk(KERN_INFO FW_INFO "CPU: Re-enabling "
624 "disabled Topology Extensions Support\n");
625 }
626 }
627 }
628
Andre Przywara2bbf0a12012-10-31 17:20:50 +0100629 /*
630 * The way access filter has a performance penalty on some workloads.
631 * Disable it on the affected CPUs.
632 */
633 if ((c->x86 == 0x15) &&
634 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
Andre Przywara2bbf0a12012-10-31 17:20:50 +0100635
Boris Ostrovsky6bf08a82013-01-29 16:32:16 -0500636 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
637 value |= 0x1E;
638 wrmsrl_safe(0xc0011021, value);
Andre Przywara2bbf0a12012-10-31 17:20:50 +0100639 }
640 }
641
Borislav Petkov27c13ec2009-11-21 14:01:45 +0100642 cpu_detect_cache_sizes(c);
Yinghai Lu11fdd252008-09-07 17:58:50 -0700643
644 /* Multi core CPU? */
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700645 if (c->extended_cpuid_level >= 0x80000008) {
Yinghai Lu11fdd252008-09-07 17:58:50 -0700646 amd_detect_cmp(c);
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700647 srat_detect_node(c);
648 }
Yinghai Lu11fdd252008-09-07 17:58:50 -0700649
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700650#ifdef CONFIG_X86_32
Yinghai Lu11fdd252008-09-07 17:58:50 -0700651 detect_ht(c);
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700652#endif
Yinghai Lu11fdd252008-09-07 17:58:50 -0700653
Andreas Herrmann04a15412012-10-19 10:59:33 +0200654 init_amd_cacheinfo(c);
Yinghai Lu11fdd252008-09-07 17:58:50 -0700655
Borislav Petkov12d8a962010-06-02 20:29:21 +0200656 if (c->x86 >= 0xf)
Yinghai Lu11fdd252008-09-07 17:58:50 -0700657 set_cpu_cap(c, X86_FEATURE_K8);
658
659 if (cpu_has_xmm2) {
660 /* MFENCE stops RDTSC speculation */
Ingo Molnar16282a82008-02-26 08:49:57 +0100661 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
Yinghai Lu11fdd252008-09-07 17:58:50 -0700662 }
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700663
664#ifdef CONFIG_X86_64
665 if (c->x86 == 0x10) {
666 /* do this for boot cpu */
667 if (c == &boot_cpu_data)
668 check_enable_amd_mmconf_dmi();
669
670 fam10h_check_enable_mmcfg();
671 }
672
Borislav Petkov12d8a962010-06-02 20:29:21 +0200673 if (c == &boot_cpu_data && c->x86 >= 0xf) {
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700674 unsigned long long tseg;
675
676 /*
677 * Split up direct mapping around the TSEG SMM area.
678 * Don't do it for gbpages because there seems very little
679 * benefit in doing so.
680 */
681 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
Jacob Shindda56e12012-11-16 19:38:48 -0800682 unsigned long pfn = tseg >> PAGE_SHIFT;
683
Alan Cox8bdbd962009-07-04 00:35:45 +0100684 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
Jacob Shindda56e12012-11-16 19:38:48 -0800685 if (pfn_range_is_mapped(pfn, pfn + 1))
Alan Cox8bdbd962009-07-04 00:35:45 +0100686 set_memory_4k((unsigned long)__va(tseg), 1);
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700687 }
688 }
689#endif
Boris Ostrovskyb87cf802011-03-15 12:13:44 -0400690
Boris Ostrovskye9cdd342011-05-26 11:19:52 -0400691 /*
692 * Family 0x12 and above processors have APIC timer
693 * running in deep C states.
694 */
695 if (c->x86 > 0x11)
Boris Ostrovskyb87cf802011-03-15 12:13:44 -0400696 set_cpu_cap(c, X86_FEATURE_ARAT);
Joerg Roedel5bbc0972011-04-15 14:47:40 +0200697
Joerg Roedel5bbc0972011-04-15 14:47:40 +0200698 if (c->x86 == 0x10) {
699 /*
Boris Ostrovskyf0322bd2013-01-29 16:32:49 -0500700 * Disable GART TLB Walk Errors on Fam10h. We do this here
701 * because this is always needed when GART is enabled, even in a
702 * kernel which has no MCE support built in.
Joerg Roedel5bbc0972011-04-15 14:47:40 +0200703 * BIOS should disable GartTlbWlk Errors themself. If
704 * it doesn't do it here as suggested by the BKDG.
705 *
706 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
707 */
708 u64 mask;
Roedel, Joergd47cc0d2011-05-19 11:13:39 +0200709 int err;
Joerg Roedel5bbc0972011-04-15 14:47:40 +0200710
Roedel, Joergd47cc0d2011-05-19 11:13:39 +0200711 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
712 if (err == 0) {
713 mask |= (1 << 10);
H. Peter Anvin715c85b2012-06-07 13:32:04 -0700714 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
Roedel, Joergd47cc0d2011-05-19 11:13:39 +0200715 }
Boris Ostrovskyf0322bd2013-01-29 16:32:49 -0500716
717 /*
718 * On family 10h BIOS may not have properly enabled WC+ support,
719 * causing it to be converted to CD memtype. This may result in
720 * performance degradation for certain nested-paging guests.
721 * Prevent this conversion by clearing bit 24 in
722 * MSR_AMD64_BU_CFG2.
Borislav Petkov52d3d062013-02-19 19:33:12 +0100723 *
724 * NOTE: we want to use the _safe accessors so as not to #GP kvm
725 * guests on older kvm hosts.
Boris Ostrovskyf0322bd2013-01-29 16:32:49 -0500726 */
Borislav Petkov52d3d062013-02-19 19:33:12 +0100727
728 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
729 value &= ~(1ULL << 24);
730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
Joerg Roedel5bbc0972011-04-15 14:47:40 +0200731 }
Linus Torvalds8e8da022011-12-04 11:57:09 -0800732
733 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700736#ifdef CONFIG_X86_32
Alan Cox8bdbd962009-07-04 00:35:45 +0100737static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
738 unsigned int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739{
740 /* AMD errata T13 (order #21922) */
741 if ((c->x86 == 6)) {
Alan Cox8bdbd962009-07-04 00:35:45 +0100742 /* Duron Rev A0 */
743 if (c->x86_model == 3 && c->x86_mask == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 size = 64;
Alan Cox8bdbd962009-07-04 00:35:45 +0100745 /* Tbird rev A1/A2 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (c->x86_model == 4 &&
Alan Cox8bdbd962009-07-04 00:35:45 +0100747 (c->x86_mask == 0 || c->x86_mask == 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 size = 256;
749 }
750 return size;
751}
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700752#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Borislav Petkov057237b2012-08-06 19:00:39 +0200754static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
755{
Borislav Petkov057237b2012-08-06 19:00:39 +0200756 tlb_flushall_shift = 5;
757
758 if (c->x86 <= 0x11)
759 tlb_flushall_shift = 4;
760}
761
Borislav Petkovb46882e2012-08-06 19:00:38 +0200762static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
763{
764 u32 ebx, eax, ecx, edx;
765 u16 mask = 0xfff;
766
767 if (c->x86 < 0xf)
768 return;
769
770 if (c->extended_cpuid_level < 0x80000006)
771 return;
772
773 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
774
775 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
776 tlb_lli_4k[ENTRIES] = ebx & mask;
777
778 /*
779 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
780 * characteristics from the CPUID function 0x80000005 instead.
781 */
782 if (c->x86 == 0xf) {
783 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
784 mask = 0xff;
785 }
786
787 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
788 if (!((eax >> 16) & mask)) {
789 u32 a, b, c, d;
790
791 cpuid(0x80000005, &a, &b, &c, &d);
792 tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff;
793 } else {
794 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
795 }
796
797 /* a 4M entry uses two 2M entries */
798 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
799
800 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
801 if (!(eax & mask)) {
802 /* Erratum 658 */
803 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
804 tlb_lli_2m[ENTRIES] = 1024;
805 } else {
806 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
807 tlb_lli_2m[ENTRIES] = eax & 0xff;
808 }
809 } else
810 tlb_lli_2m[ENTRIES] = eax & mask;
811
812 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
Borislav Petkov057237b2012-08-06 19:00:39 +0200813
814 cpu_set_tlb_flushall_shift(c);
Borislav Petkovb46882e2012-08-06 19:00:38 +0200815}
816
Jan Beulich02dde8b2009-03-12 12:08:49 +0000817static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 .c_vendor = "AMD",
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100819 .c_ident = { "AuthenticAMD" },
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700820#ifdef CONFIG_X86_32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 .c_models = {
822 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
823 {
824 [3] = "486 DX/2",
825 [7] = "486 DX/2-WB",
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100826 [8] = "486 DX/4",
827 [9] = "486 DX/4-WB",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 [14] = "Am5x86-WT",
Paolo Ciarrocchifb87a292008-02-22 23:10:33 +0100829 [15] = "Am5x86-WB"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
831 },
832 },
Yinghai Lu6c62aa42008-09-07 17:58:54 -0700833 .c_size_cache = amd_size_cache,
834#endif
Thomas Petazzoni03ae5762008-02-15 12:00:23 +0100835 .c_early_init = early_init_amd,
Borislav Petkovb46882e2012-08-06 19:00:38 +0200836 .c_detect_tlb = cpu_detect_tlb_amd,
Borislav Petkov8fa8b032011-08-05 20:04:09 +0200837 .c_bsp_init = bsp_init_amd,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 .c_init = init_amd,
Yinghai Lu10a434f2008-09-04 21:09:45 +0200839 .c_x86_vendor = X86_VENDOR_AMD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840};
841
Yinghai Lu10a434f2008-09-04 21:09:45 +0200842cpu_dev_register(amd_cpu_dev);
Hans Rosenfeldd78d6712010-07-28 19:09:30 +0200843
844/*
845 * AMD errata checking
846 *
847 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
848 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
849 * have an OSVW id assigned, which it takes as first argument. Both take a
850 * variable number of family-specific model-stepping ranges created by
851 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
852 * int[] in arch/x86/include/asm/processor.h.
853 *
854 * Example:
855 *
856 * const int amd_erratum_319[] =
857 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
858 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
859 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
860 */
861
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200862const int amd_erratum_400[] =
Borislav Petkov328935e2011-05-17 14:55:18 +0200863 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200864 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
H. Peter Anvina5b91602010-07-28 16:23:20 -0700865EXPORT_SYMBOL_GPL(amd_erratum_400);
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200866
Hans Rosenfeld1be85a62010-07-28 19:09:32 +0200867const int amd_erratum_383[] =
868 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
H. Peter Anvina5b91602010-07-28 16:23:20 -0700869EXPORT_SYMBOL_GPL(amd_erratum_383);
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200870
Hans Rosenfeldd78d6712010-07-28 19:09:30 +0200871bool cpu_has_amd_erratum(const int *erratum)
872{
Tejun Heo7b543a52010-12-18 16:30:05 +0100873 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
Hans Rosenfeldd78d6712010-07-28 19:09:30 +0200874 int osvw_id = *erratum++;
875 u32 range;
876 u32 ms;
877
878 /*
879 * If called early enough that current_cpu_data hasn't been initialized
880 * yet, fall back to boot_cpu_data.
881 */
882 if (cpu->x86 == 0)
883 cpu = &boot_cpu_data;
884
885 if (cpu->x86_vendor != X86_VENDOR_AMD)
886 return false;
887
888 if (osvw_id >= 0 && osvw_id < 65536 &&
889 cpu_has(cpu, X86_FEATURE_OSVW)) {
890 u64 osvw_len;
891
892 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
893 if (osvw_id < osvw_len) {
894 u64 osvw_bits;
895
896 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
897 osvw_bits);
898 return osvw_bits & (1ULL << (osvw_id & 0x3f));
899 }
900 }
901
902 /* OSVW unavailable or ID unknown, match family-model-stepping range */
Hans Rosenfeld07a77952010-08-18 16:19:50 +0200903 ms = (cpu->x86_model << 4) | cpu->x86_mask;
Hans Rosenfeldd78d6712010-07-28 19:09:30 +0200904 while ((range = *erratum++))
905 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
906 (ms >= AMD_MODEL_RANGE_START(range)) &&
907 (ms <= AMD_MODEL_RANGE_END(range)))
908 return true;
909
910 return false;
911}
H. Peter Anvina5b91602010-07-28 16:23:20 -0700912
913EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);