blob: ff87cc22b323aee87165e19be9688bc300dcc978 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/init.h>
2#include <linux/bitops.h>
3#include <linux/delay.h>
4#include <linux/pci.h>
5#include <asm/dma.h>
6#include <asm/io.h>
7#include <asm/processor.h>
8#include <asm/timer.h>
9
10#include "cpu.h"
11
12/*
13 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
14 */
15static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
16{
17 unsigned char ccr2, ccr3;
18 unsigned long flags;
19
20 /* we test for DEVID by checking whether CCR3 is writable */
21 local_irq_save(flags);
22 ccr3 = getCx86(CX86_CCR3);
23 setCx86(CX86_CCR3, ccr3 ^ 0x80);
24 getCx86(0xc0); /* dummy to change bus */
25
26 if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */
27 ccr2 = getCx86(CX86_CCR2);
28 setCx86(CX86_CCR2, ccr2 ^ 0x04);
29 getCx86(0xc0); /* dummy */
30
31 if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
32 *dir0 = 0xfd;
33 else { /* Cx486S A step */
34 setCx86(CX86_CCR2, ccr2);
35 *dir0 = 0xfe;
36 }
37 }
38 else {
39 setCx86(CX86_CCR3, ccr3); /* restore CCR3 */
40
41 /* read DIR0 and DIR1 CPU registers */
42 *dir0 = getCx86(CX86_DIR0);
43 *dir1 = getCx86(CX86_DIR1);
44 }
45 local_irq_restore(flags);
46}
47
48/*
49 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
50 * order to identify the Cyrix CPU model after we're out of setup.c
51 *
52 * Actually since bugs.h doesn't even reference this perhaps someone should
53 * fix the documentation ???
54 */
55static unsigned char Cx86_dir0_msb __initdata = 0;
56
57static char Cx86_model[][9] __initdata = {
58 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
59 "M II ", "Unknown"
60};
61static char Cx486_name[][5] __initdata = {
62 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
63 "SRx2", "DRx2"
64};
65static char Cx486S_name[][4] __initdata = {
66 "S", "S2", "Se", "S2e"
67};
68static char Cx486D_name[][4] __initdata = {
69 "DX", "DX2", "?", "?", "?", "DX4"
70};
71static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";
72static char cyrix_model_mult1[] __initdata = "12??43";
73static char cyrix_model_mult2[] __initdata = "12233445";
74
75/*
76 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
77 * BIOSes for compatibility with DOS games. This makes the udelay loop
78 * work correctly, and improves performance.
79 *
80 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
81 */
82
83extern void calibrate_delay(void) __init;
84
85static void __init check_cx686_slop(struct cpuinfo_x86 *c)
86{
87 unsigned long flags;
88
89 if (Cx86_dir0_msb == 3) {
90 unsigned char ccr3, ccr5;
91
92 local_irq_save(flags);
93 ccr3 = getCx86(CX86_CCR3);
94 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
95 ccr5 = getCx86(CX86_CCR5);
96 if (ccr5 & 2)
97 setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
98 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
99 local_irq_restore(flags);
100
101 if (ccr5 & 2) { /* possible wrong calibration done */
102 printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
103 calibrate_delay();
104 c->loops_per_jiffy = loops_per_jiffy;
105 }
106 }
107}
108
109
110static void __init set_cx86_reorder(void)
111{
112 u8 ccr3;
113
114 printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
115 ccr3 = getCx86(CX86_CCR3);
116 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
117
118 /* Load/Store Serialize to mem access disable (=reorder it)  */
119 setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
120 /* set load/store serialize from 1GB to 4GB */
121 ccr3 |= 0xe0;
122 setCx86(CX86_CCR3, ccr3);
123}
124
125static void __init set_cx86_memwb(void)
126{
127 u32 cr0;
128
129 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
130
131 /* CCR2 bit 2: unlock NW bit */
132 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
133 /* set 'Not Write-through' */
134 cr0 = 0x20000000;
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700135 write_cr0(read_cr0() | cr0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 /* CCR2 bit 2: lock NW bit and set WT1 */
137 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
138}
139
140static void __init set_cx86_inc(void)
141{
142 unsigned char ccr3;
143
144 printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
145
146 ccr3 = getCx86(CX86_CCR3);
147 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
148 /* PCR1 -- Performance Control */
149 /* Incrementor on, whatever that is */
150 setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
151 /* PCR0 -- Performance Control */
152 /* Incrementor Margin 10 */
153 setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
154 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
155}
156
157/*
158 * Configure later MediaGX and/or Geode processor.
159 */
160
161static void __init geode_configure(void)
162{
163 unsigned long flags;
164 u8 ccr3, ccr4;
165 local_irq_save(flags);
166
167 /* Suspend on halt power saving and enable #SUSP pin */
168 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
169
170 ccr3 = getCx86(CX86_CCR3);
171 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* Enable */
172
173 ccr4 = getCx86(CX86_CCR4);
174 ccr4 |= 0x38; /* FPU fast, DTE cache, Mem bypass */
175
176 setCx86(CX86_CCR3, ccr3);
177
178 set_cx86_memwb();
179 set_cx86_reorder();
180 set_cx86_inc();
181
182 local_irq_restore(flags);
183}
184
185
186#ifdef CONFIG_PCI
187static struct pci_device_id cyrix_55x0[] = {
188 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510) },
189 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520) },
190 { },
191};
192#endif
193
194static void __init init_cyrix(struct cpuinfo_x86 *c)
195{
196 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
197 char *buf = c->x86_model_id;
198 const char *p = NULL;
199
200 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
201 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
202 clear_bit(0*32+31, c->x86_capability);
203
204 /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
205 if ( test_bit(1*32+24, c->x86_capability) ) {
206 clear_bit(1*32+24, c->x86_capability);
207 set_bit(X86_FEATURE_CXMMX, c->x86_capability);
208 }
209
210 do_cyrix_devid(&dir0, &dir1);
211
212 check_cx686_slop(c);
213
214 Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */
215 dir0_lsn = dir0 & 0xf; /* model or clock multiplier */
216
217 /* common case step number/rev -- exceptions handled below */
218 c->x86_model = (dir1 >> 4) + 1;
219 c->x86_mask = dir1 & 0xf;
220
221 /* Now cook; the original recipe is by Channing Corn, from Cyrix.
222 * We do the same thing for each generation: we work out
223 * the model, multiplier and stepping. Black magic included,
224 * to make the silicon step/rev numbers match the printed ones.
225 */
226
227 switch (dir0_msn) {
228 unsigned char tmp;
229
230 case 0: /* Cx486SLC/DLC/SRx/DRx */
231 p = Cx486_name[dir0_lsn & 7];
232 break;
233
234 case 1: /* Cx486S/DX/DX2/DX4 */
235 p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
236 : Cx486S_name[dir0_lsn & 3];
237 break;
238
239 case 2: /* 5x86 */
240 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
241 p = Cx86_cb+2;
242 break;
243
244 case 3: /* 6x86/6x86L */
245 Cx86_cb[1] = ' ';
246 Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
247 if (dir1 > 0x21) { /* 686L */
248 Cx86_cb[0] = 'L';
249 p = Cx86_cb;
250 (c->x86_model)++;
251 } else /* 686 */
252 p = Cx86_cb+1;
253 /* Emulate MTRRs using Cyrix's ARRs. */
254 set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
255 /* 6x86's contain this bug */
256 c->coma_bug = 1;
257 break;
258
259 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
260#ifdef CONFIG_PCI
261 /* It isn't really a PCI quirk directly, but the cure is the
262 same. The MediaGX has deep magic SMM stuff that handles the
263 SB emulation. It thows away the fifo on disable_dma() which
264 is wrong and ruins the audio.
265
266 Bug2: VSA1 has a wrap bug so that using maximum sized DMA
267 causes bad things. According to NatSemi VSA2 has another
268 bug to do with 'hlt'. I've not seen any boards using VSA2
269 and X doesn't seem to support it either so who cares 8).
270 VSA1 we work around however.
271 */
272
273 printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
274 isa_dma_bridge_buggy = 2;
275#endif
276 c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
277
278 /*
279 * The 5510/5520 companion chips have a funky PIT.
280 */
281 if (pci_dev_present(cyrix_55x0))
282 pit_latch_buggy = 1;
283
284 /* GXm supports extended cpuid levels 'ala' AMD */
285 if (c->cpuid_level == 2) {
286 /* Enable cxMMX extensions (GX1 Datasheet 54) */
287 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
288
289 /* GXlv/GXm/GX1 */
290 if((dir1 >= 0x50 && dir1 <= 0x54) || dir1 >= 0x63)
291 geode_configure();
292 get_model_name(c); /* get CPU marketing name */
293 return;
294 }
295 else { /* MediaGX */
296 Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
297 p = Cx86_cb+2;
298 c->x86_model = (dir1 & 0x20) ? 1 : 2;
299 }
300 break;
301
302 case 5: /* 6x86MX/M II */
303 if (dir1 > 7)
304 {
305 dir0_msn++; /* M II */
306 /* Enable MMX extensions (App note 108) */
307 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
308 }
309 else
310 {
311 c->coma_bug = 1; /* 6x86MX, it has the bug. */
312 }
313 tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
314 Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
315 p = Cx86_cb+tmp;
316 if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
317 (c->x86_model)++;
318 /* Emulate MTRRs using Cyrix's ARRs. */
319 set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
320 break;
321
322 case 0xf: /* Cyrix 486 without DEVID registers */
323 switch (dir0_lsn) {
324 case 0xd: /* either a 486SLC or DLC w/o DEVID */
325 dir0_msn = 0;
326 p = Cx486_name[(c->hard_math) ? 1 : 0];
327 break;
328
329 case 0xe: /* a 486S A step */
330 dir0_msn = 0;
331 p = Cx486S_name[0];
332 break;
333 }
334 break;
335
336 default: /* unknown (shouldn't happen, we know everyone ;-) */
337 dir0_msn = 7;
338 break;
339 }
340 strcpy(buf, Cx86_model[dir0_msn & 7]);
341 if (p) strcat(buf, p);
342 return;
343}
344
345/*
346 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
347 * by the fact that they preserve the flags across the division of 5/2.
348 * PII and PPro exhibit this behavior too, but they have cpuid available.
349 */
350
351/*
352 * Perform the Cyrix 5/2 test. A Cyrix won't change
353 * the flags, while other 486 chips will.
354 */
355static inline int test_cyrix_52div(void)
356{
357 unsigned int test;
358
359 __asm__ __volatile__(
360 "sahf\n\t" /* clear flags (%eax = 0x0005) */
361 "div %b2\n\t" /* divide 5 by 2 */
362 "lahf" /* store flags into %ah */
363 : "=a" (test)
364 : "0" (5), "q" (2)
365 : "cc");
366
367 /* AH is 0x02 on Cyrix after the divide.. */
368 return (unsigned char) (test >> 8) == 0x02;
369}
370
371static void cyrix_identify(struct cpuinfo_x86 * c)
372{
373 /* Detect Cyrix with disabled CPUID */
374 if ( c->x86 == 4 && test_cyrix_52div() ) {
375 unsigned char dir0, dir1;
376
377 strcpy(c->x86_vendor_id, "CyrixInstead");
378 c->x86_vendor = X86_VENDOR_CYRIX;
379
380 /* Actually enable cpuid on the older cyrix */
381
382 /* Retrieve CPU revisions */
383
384 do_cyrix_devid(&dir0, &dir1);
385
386 dir0>>=4;
387
388 /* Check it is an affected model */
389
390 if (dir0 == 5 || dir0 == 3)
391 {
392 unsigned char ccr3, ccr4;
393 unsigned long flags;
394 printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
395 local_irq_save(flags);
396 ccr3 = getCx86(CX86_CCR3);
397 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
398 ccr4 = getCx86(CX86_CCR4);
399 setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
400 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
401 local_irq_restore(flags);
402 }
403 }
404 generic_identify(c);
405}
406
407static struct cpu_dev cyrix_cpu_dev __initdata = {
408 .c_vendor = "Cyrix",
409 .c_ident = { "CyrixInstead" },
410 .c_init = init_cyrix,
411 .c_identify = cyrix_identify,
412};
413
414int __init cyrix_init_cpu(void)
415{
416 cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
417 return 0;
418}
419
420//early_arch_initcall(cyrix_init_cpu);
421
422static struct cpu_dev nsc_cpu_dev __initdata = {
423 .c_vendor = "NSC",
424 .c_ident = { "Geode by NSC" },
425 .c_init = init_cyrix,
426 .c_identify = generic_identify,
427};
428
429int __init nsc_init_cpu(void)
430{
431 cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
432 return 0;
433}
434
435//early_arch_initcall(nsc_init_cpu);