blob: a0f5a5dca1b20bfbd97b1dbe89587327561e544e [file] [log] [blame]
Ralf Baechlec78cbf42005-09-30 13:59:37 +01001#include <linux/types.h>
Ralf Baechlec78cbf42005-09-30 13:59:37 +01002#include <linux/init.h>
3#include <linux/kernel_stat.h>
4#include <linux/sched.h>
5#include <linux/spinlock.h>
Nicolas Kaiserdd6bfd62006-11-07 09:56:24 +01006#include <linux/interrupt.h>
7#include <linux/mc146818rtc.h>
Ralf Baechlef6e23732007-07-10 17:32:56 +01008#include <linux/smp.h>
Nicolas Kaiserdd6bfd62006-11-07 09:56:24 +01009#include <linux/timex.h>
Ralf Baechlec78cbf42005-09-30 13:59:37 +010010
Ralf Baechlec78cbf42005-09-30 13:59:37 +010011#include <asm/hardirq.h>
12#include <asm/div64.h>
13#include <asm/cpu.h>
14#include <asm/time.h>
Ralf Baechlec78cbf42005-09-30 13:59:37 +010015#include <asm/irq.h>
Ralf Baechlec78cbf42005-09-30 13:59:37 +010016#include <asm/mc146818-time.h>
17#include <asm/msc01_ic.h>
18
19#include <asm/mips-boards/generic.h>
20#include <asm/mips-boards/prom.h>
21#include <asm/mips-boards/simint.h>
Ralf Baechlec78cbf42005-09-30 13:59:37 +010022
23
24unsigned long cpu_khz;
25
Ralf Baechle937a8012006-10-07 19:44:33 +010026irqreturn_t sim_timer_interrupt(int irq, void *dev_id)
Ralf Baechlec78cbf42005-09-30 13:59:37 +010027{
28#ifdef CONFIG_SMP
29 int cpu = smp_processor_id();
30
31 /*
32 * CPU 0 handles the global timer interrupt job
33 * resets count/compare registers to trigger next timer int.
34 */
35#ifndef CONFIG_MIPS_MT_SMTC
36 if (cpu == 0) {
Ralf Baechle937a8012006-10-07 19:44:33 +010037 timer_interrupt(irq, dev_id);
Ralf Baechlef6e23732007-07-10 17:32:56 +010038 } else {
Ralf Baechlec78cbf42005-09-30 13:59:37 +010039 /* Everyone else needs to reset the timer int here as
40 ll_local_timer_interrupt doesn't */
41 /*
42 * FIXME: need to cope with counter underflow.
43 * More support needs to be added to kernel/time for
44 * counter/timer interrupts on multiple CPU's
45 */
46 write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
47 }
48#else /* SMTC */
49 /*
50 * In SMTC system, one Count/Compare set exists per VPE.
51 * Which TC within a VPE gets the interrupt is essentially
52 * random - we only know that it shouldn't be one with
53 * IXMT set. Whichever TC gets the interrupt needs to
54 * send special interprocessor interrupts to the other
55 * TCs to make sure that they schedule, etc.
56 *
57 * That code is specific to the SMTC kernel, not to
58 * the simulation platform, so it's invoked from
59 * the general MIPS timer_interrupt routine.
60 *
61 * We have a problem in that the interrupt vector code
62 * had to turn off the timer IM bit to avoid redundant
63 * entries, but we may never get to mips_cpu_irq_end
64 * to turn it back on again if the scheduler gets
65 * involved. So we clear the pending timer here,
66 * and re-enable the mask...
67 */
68
69 int vpflags = dvpe();
70 write_c0_compare (read_c0_count() - 1);
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +010071 clear_c0_cause(0x100 << cp0_compare_irq);
72 set_c0_status(0x100 << cp0_compare_irq);
Ralf Baechlec78cbf42005-09-30 13:59:37 +010073 irq_enable_hazard();
74 evpe(vpflags);
75
Ralf Baechlef6e23732007-07-10 17:32:56 +010076 if (cpu_data[cpu].vpe_id == 0)
77 timer_interrupt(irq, dev_id);
78 else
79 write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
Ralf Baechlec78cbf42005-09-30 13:59:37 +010080 smtc_timer_broadcast(cpu_data[cpu].vpe_id);
81
82#endif /* CONFIG_MIPS_MT_SMTC */
83
84 /*
85 * every CPU should do profiling and process accounting
86 */
Ralf Baechlef6e23732007-07-10 17:32:56 +010087 local_timer_interrupt (irq, dev_id);
88
Ralf Baechlec78cbf42005-09-30 13:59:37 +010089 return IRQ_HANDLED;
90#else
Ralf Baechle937a8012006-10-07 19:44:33 +010091 return timer_interrupt (irq, dev_id);
Ralf Baechlec78cbf42005-09-30 13:59:37 +010092#endif
93}
94
95
96
97/*
Ralf Baechle224dc502006-10-21 02:05:20 +010098 * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
Ralf Baechlec78cbf42005-09-30 13:59:37 +010099 */
100static unsigned int __init estimate_cpu_frequency(void)
101{
102 unsigned int prid = read_c0_prid() & 0xffff00;
103 unsigned int count;
104
105#if 1
106 /*
107 * hardwire the board frequency to 12MHz.
108 */
109
110 if ((prid == (PRID_COMP_MIPS | PRID_IMP_20KC)) ||
111 (prid == (PRID_COMP_MIPS | PRID_IMP_25KF)))
112 count = 12000000;
113 else
114 count = 6000000;
115#else
116 unsigned int flags;
117
118 local_irq_save(flags);
119
120 /* Start counter exactly on falling edge of update flag */
121 while (CMOS_READ(RTC_REG_A) & RTC_UIP);
122 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
123
124 /* Start r4k counter. */
125 write_c0_count(0);
126
127 /* Read counter exactly on falling edge of update flag */
128 while (CMOS_READ(RTC_REG_A) & RTC_UIP);
129 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
130
131 count = read_c0_count();
132
133 /* restore interrupts */
134 local_irq_restore(flags);
135#endif
136
137 mips_hpt_frequency = count;
138
139 if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
140 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
141 count *= 2;
142
143 count += 5000; /* round */
144 count -= count%10000;
145
146 return count;
147}
148
149void __init sim_time_init(void)
150{
151 unsigned int est_freq, flags;
152
153 local_irq_save(flags);
154
Ralf Baechlef6e23732007-07-10 17:32:56 +0100155 /* Set Data mode - binary. */
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100156 CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL);
157
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100158 est_freq = estimate_cpu_frequency ();
159
Ralf Baechlef6e23732007-07-10 17:32:56 +0100160 printk(KERN_INFO "CPU frequency %d.%02d MHz\n", est_freq / 1000000,
161 (est_freq % 1000000) * 100 / 1000000);
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100162
Ralf Baechlef6e23732007-07-10 17:32:56 +0100163 cpu_khz = est_freq / 1000;
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100164
165 local_irq_restore(flags);
166}
167
168static int mips_cpu_timer_irq;
169
Ralf Baechle937a8012006-10-07 19:44:33 +0100170static void mips_timer_dispatch(void)
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100171{
Ralf Baechle937a8012006-10-07 19:44:33 +0100172 do_IRQ(mips_cpu_timer_irq);
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100173}
174
175
Ralf Baechle5fd32652006-07-10 02:37:21 +0100176void __init plat_timer_setup(struct irqaction *irq)
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100177{
178 if (cpu_has_veic) {
179 set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch);
180 mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
Ralf Baechlef6e23732007-07-10 17:32:56 +0100181 } else {
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100182 if (cpu_has_vint)
Ralf Baechle3b1d4ed2007-06-20 22:27:10 +0100183 set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
184 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100185 }
186
187 /* we are using the cpu counter for timer interrupts */
188 irq->handler = sim_timer_interrupt;
189 setup_irq(mips_cpu_timer_irq, irq);
190
191#ifdef CONFIG_SMP
192 /* irq_desc(riptor) is a global resource, when the interrupt overlaps
193 on seperate cpu's the first one tries to handle the second interrupt.
194 The effect is that the int remains disabled on the second cpu.
195 Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
Atsushi Nemoto14178362006-11-14 01:13:18 +0900196 irq_desc[mips_cpu_timer_irq].flags |= IRQ_PER_CPU;
197 set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100198#endif
Ralf Baechlec78cbf42005-09-30 13:59:37 +0100199}