| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * linux/arch/kernel/time-ts.c | 
|  | 3 | * | 
|  | 4 | * Based on arm clockevents implementation and old bfin time tick. | 
|  | 5 | * | 
|  | 6 | * Copyright(C) 2008, GeoTechnologies, Vitja Makarov | 
|  | 7 | * | 
|  | 8 | * This code is licenced under the GPL version 2. For details see | 
|  | 9 | * kernel-base/COPYING. | 
|  | 10 | */ | 
|  | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/profile.h> | 
|  | 13 | #include <linux/interrupt.h> | 
|  | 14 | #include <linux/time.h> | 
| Mike Frysinger | 764cb81 | 2008-04-24 05:07:29 +0800 | [diff] [blame] | 15 | #include <linux/timex.h> | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 16 | #include <linux/irq.h> | 
|  | 17 | #include <linux/clocksource.h> | 
|  | 18 | #include <linux/clockchips.h> | 
| Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 19 | #include <linux/cpufreq.h> | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 20 |  | 
|  | 21 | #include <asm/blackfin.h> | 
| Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 22 | #include <asm/time.h> | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 23 |  | 
|  | 24 | #ifdef CONFIG_CYCLES_CLOCKSOURCE | 
|  | 25 |  | 
| Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 26 | /* Accelerators for sched_clock() | 
|  | 27 | * convert from cycles(64bits) => nanoseconds (64bits) | 
|  | 28 | *  basic equation: | 
|  | 29 | *		ns = cycles / (freq / ns_per_sec) | 
|  | 30 | *		ns = cycles * (ns_per_sec / freq) | 
|  | 31 | *		ns = cycles * (10^9 / (cpu_khz * 10^3)) | 
|  | 32 | *		ns = cycles * (10^6 / cpu_khz) | 
|  | 33 | * | 
|  | 34 | *	Then we use scaling math (suggested by george@mvista.com) to get: | 
|  | 35 | *		ns = cycles * (10^6 * SC / cpu_khz) / SC | 
|  | 36 | *		ns = cycles * cyc2ns_scale / SC | 
|  | 37 | * | 
|  | 38 | *	And since SC is a constant power of two, we can convert the div | 
|  | 39 | *  into a shift. | 
|  | 40 | * | 
|  | 41 | *  We can use khz divisor instead of mhz to keep a better precision, since | 
|  | 42 | *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | 
|  | 43 | *  (mathieu.desnoyers@polymtl.ca) | 
|  | 44 | * | 
|  | 45 | *			-johnstul@us.ibm.com "math is hard, lets go shopping!" | 
|  | 46 | */ | 
|  | 47 |  | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 48 | static unsigned long cyc2ns_scale; | 
|  | 49 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 
|  | 50 |  | 
|  | 51 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | 
|  | 52 | { | 
|  | 53 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz; | 
|  | 54 | } | 
|  | 55 |  | 
|  | 56 | static inline unsigned long long cycles_2_ns(cycle_t cyc) | 
|  | 57 | { | 
|  | 58 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static cycle_t read_cycles(void) | 
|  | 62 | { | 
| Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 63 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 64 | } | 
|  | 65 |  | 
|  | 66 | unsigned long long sched_clock(void) | 
|  | 67 | { | 
|  | 68 | return cycles_2_ns(read_cycles()); | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | static struct clocksource clocksource_bfin = { | 
|  | 72 | .name		= "bfin_cycles", | 
|  | 73 | .rating		= 350, | 
|  | 74 | .read		= read_cycles, | 
|  | 75 | .mask		= CLOCKSOURCE_MASK(64), | 
|  | 76 | .shift		= 22, | 
|  | 77 | .flags		= CLOCK_SOURCE_IS_CONTINUOUS, | 
|  | 78 | }; | 
|  | 79 |  | 
|  | 80 | static int __init bfin_clocksource_init(void) | 
|  | 81 | { | 
|  | 82 | set_cyc2ns_scale(get_cclk() / 1000); | 
|  | 83 |  | 
|  | 84 | clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift); | 
|  | 85 |  | 
|  | 86 | if (clocksource_register(&clocksource_bfin)) | 
|  | 87 | panic("failed to register clocksource"); | 
|  | 88 |  | 
|  | 89 | return 0; | 
|  | 90 | } | 
|  | 91 |  | 
|  | 92 | #else | 
|  | 93 | # define bfin_clocksource_init() | 
|  | 94 | #endif | 
|  | 95 |  | 
|  | 96 | static int bfin_timer_set_next_event(unsigned long cycles, | 
|  | 97 | struct clock_event_device *evt) | 
|  | 98 | { | 
|  | 99 | bfin_write_TCOUNT(cycles); | 
|  | 100 | CSYNC(); | 
|  | 101 | return 0; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | static void bfin_timer_set_mode(enum clock_event_mode mode, | 
|  | 105 | struct clock_event_device *evt) | 
|  | 106 | { | 
|  | 107 | switch (mode) { | 
|  | 108 | case CLOCK_EVT_MODE_PERIODIC: { | 
| Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 109 | unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 110 | bfin_write_TCNTL(TMPWR); | 
| Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 111 | bfin_write_TSCALE(TIME_SCALE - 1); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 112 | CSYNC(); | 
|  | 113 | bfin_write_TPERIOD(tcount); | 
|  | 114 | bfin_write_TCOUNT(tcount); | 
|  | 115 | bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); | 
|  | 116 | CSYNC(); | 
|  | 117 | break; | 
|  | 118 | } | 
|  | 119 | case CLOCK_EVT_MODE_ONESHOT: | 
| Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 120 | bfin_write_TSCALE(TIME_SCALE - 1); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 121 | bfin_write_TCOUNT(0); | 
|  | 122 | bfin_write_TCNTL(TMPWR | TMREN); | 
|  | 123 | CSYNC(); | 
|  | 124 | break; | 
|  | 125 | case CLOCK_EVT_MODE_UNUSED: | 
|  | 126 | case CLOCK_EVT_MODE_SHUTDOWN: | 
|  | 127 | bfin_write_TCNTL(0); | 
|  | 128 | CSYNC(); | 
|  | 129 | break; | 
|  | 130 | case CLOCK_EVT_MODE_RESUME: | 
|  | 131 | break; | 
|  | 132 | } | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | static void __init bfin_timer_init(void) | 
|  | 136 | { | 
|  | 137 | /* power up the timer, but don't enable it just yet */ | 
|  | 138 | bfin_write_TCNTL(TMPWR); | 
|  | 139 | CSYNC(); | 
|  | 140 |  | 
|  | 141 | /* | 
|  | 142 | * the TSCALE prescaler counter. | 
|  | 143 | */ | 
| Michael Hennerich | e6c91b6 | 2008-04-25 04:58:29 +0800 | [diff] [blame] | 144 | bfin_write_TSCALE(TIME_SCALE - 1); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 145 | bfin_write_TPERIOD(0); | 
|  | 146 | bfin_write_TCOUNT(0); | 
|  | 147 |  | 
|  | 148 | /* now enable the timer */ | 
|  | 149 | CSYNC(); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | /* | 
|  | 153 | * timer_interrupt() needs to keep up the real-time clock, | 
|  | 154 | * as well as call the "do_timer()" routine every clocktick | 
|  | 155 | */ | 
|  | 156 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 | 
|  | 157 | __attribute__((l1_text)) | 
|  | 158 | #endif | 
|  | 159 | irqreturn_t timer_interrupt(int irq, void *dev_id); | 
|  | 160 |  | 
|  | 161 | static struct clock_event_device clockevent_bfin = { | 
|  | 162 | .name		= "bfin_core_timer", | 
|  | 163 | .features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 
|  | 164 | .shift		= 32, | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 165 | .set_next_event = bfin_timer_set_next_event, | 
|  | 166 | .set_mode	= bfin_timer_set_mode, | 
|  | 167 | }; | 
|  | 168 |  | 
|  | 169 | static struct irqaction bfin_timer_irq = { | 
|  | 170 | .name		= "Blackfin Core Timer", | 
|  | 171 | .flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, | 
|  | 172 | .handler	= timer_interrupt, | 
|  | 173 | .dev_id		= &clockevent_bfin, | 
|  | 174 | }; | 
|  | 175 |  | 
|  | 176 | irqreturn_t timer_interrupt(int irq, void *dev_id) | 
|  | 177 | { | 
|  | 178 | struct clock_event_device *evt = dev_id; | 
|  | 179 | evt->event_handler(evt); | 
|  | 180 | return IRQ_HANDLED; | 
|  | 181 | } | 
|  | 182 |  | 
|  | 183 | static int __init bfin_clockevent_init(void) | 
|  | 184 | { | 
| Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 185 | unsigned long timer_clk; | 
|  | 186 |  | 
|  | 187 | timer_clk = get_cclk() / TIME_SCALE; | 
|  | 188 |  | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 189 | setup_irq(IRQ_CORETMR, &bfin_timer_irq); | 
|  | 190 | bfin_timer_init(); | 
|  | 191 |  | 
| Vitja Makarov | 1bfb4b2 | 2008-05-07 11:41:26 +0800 | [diff] [blame] | 192 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 193 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); | 
|  | 194 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); | 
| Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 195 | clockevent_bfin.cpumask = cpumask_of(0); | 
| Vitja Makarov | 8b5f79f | 2008-02-29 12:24:23 +0800 | [diff] [blame] | 196 | clockevents_register_device(&clockevent_bfin); | 
|  | 197 |  | 
|  | 198 | return 0; | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | void __init time_init(void) | 
|  | 202 | { | 
|  | 203 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60;	/* 1 Jan 2007 */ | 
|  | 204 |  | 
|  | 205 | #ifdef CONFIG_RTC_DRV_BFIN | 
|  | 206 | /* [#2663] hack to filter junk RTC values that would cause | 
|  | 207 | * userspace to have to deal with time values greater than | 
|  | 208 | * 2^31 seconds (which uClibc cannot cope with yet) | 
|  | 209 | */ | 
|  | 210 | if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { | 
|  | 211 | printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); | 
|  | 212 | bfin_write_RTC_STAT(0); | 
|  | 213 | } | 
|  | 214 | #endif | 
|  | 215 |  | 
|  | 216 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ | 
|  | 217 | xtime.tv_sec = secs_since_1970; | 
|  | 218 | xtime.tv_nsec = 0; | 
|  | 219 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | 
|  | 220 |  | 
|  | 221 | bfin_clocksource_init(); | 
|  | 222 | bfin_clockevent_init(); | 
|  | 223 | } |