| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *      Copyright (C) 1993-1996 Bas Laarhoven. | 
|  | 3 |  | 
|  | 4 | This program is free software; you can redistribute it and/or modify | 
|  | 5 | it under the terms of the GNU General Public License as published by | 
|  | 6 | the Free Software Foundation; either version 2, or (at your option) | 
|  | 7 | any later version. | 
|  | 8 |  | 
|  | 9 | This program is distributed in the hope that it will be useful, | 
|  | 10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 12 | GNU General Public License for more details. | 
|  | 13 |  | 
|  | 14 | You should have received a copy of the GNU General Public License | 
|  | 15 | along with this program; see the file COPYING.  If not, write to | 
|  | 16 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | 
|  | 17 |  | 
|  | 18 | * | 
|  | 19 | * $Source: /homes/cvs/ftape-stacked/ftape/lowlevel/ftape-calibr.c,v $ | 
|  | 20 | * $Revision: 1.2 $ | 
|  | 21 | * $Date: 1997/10/05 19:18:08 $ | 
|  | 22 | * | 
|  | 23 | *      GP calibration routine for processor speed dependent | 
|  | 24 | *      functions. | 
|  | 25 | */ | 
|  | 26 |  | 
|  | 27 | #include <linux/config.h> | 
|  | 28 | #include <linux/errno.h> | 
|  | 29 | #include <linux/jiffies.h> | 
|  | 30 | #include <asm/system.h> | 
|  | 31 | #include <asm/io.h> | 
|  | 32 | #if defined(__alpha__) | 
|  | 33 | # include <asm/hwrpb.h> | 
|  | 34 | #elif defined(__x86_64__) | 
|  | 35 | # include <asm/msr.h> | 
|  | 36 | # include <asm/timex.h> | 
|  | 37 | #elif defined(__i386__) | 
|  | 38 | # include <linux/timex.h> | 
|  | 39 | #endif | 
|  | 40 | #include <linux/ftape.h> | 
|  | 41 | #include "../lowlevel/ftape-tracing.h" | 
|  | 42 | #include "../lowlevel/ftape-calibr.h" | 
|  | 43 | #include "../lowlevel/fdc-io.h" | 
|  | 44 |  | 
|  | 45 | #undef DEBUG | 
|  | 46 |  | 
|  | 47 | #if !defined(__alpha__) && !defined(__i386__) && !defined(__x86_64__) | 
|  | 48 | # error Ftape is not implemented for this architecture! | 
|  | 49 | #endif | 
|  | 50 |  | 
|  | 51 | #if defined(__alpha__) || defined(__x86_64__) | 
|  | 52 | static unsigned long ps_per_cycle = 0; | 
|  | 53 | #endif | 
|  | 54 |  | 
|  | 55 | static spinlock_t calibr_lock; | 
|  | 56 |  | 
|  | 57 | /* | 
|  | 58 | * Note: On Intel PCs, the clock ticks at 100 Hz (HZ==100) which is | 
|  | 59 | * too slow for certain timeouts (and that clock doesn't even tick | 
|  | 60 | * when interrupts are disabled).  For that reason, the 8254 timer is | 
|  | 61 | * used directly to implement fine-grained timeouts.  However, on | 
|  | 62 | * Alpha PCs, the 8254 is *not* used to implement the clock tick | 
|  | 63 | * (which is 1024 Hz, normally) and the 8254 timer runs at some | 
|  | 64 | * "random" frequency (it seems to run at 18Hz, but it's not safe to | 
|  | 65 | * rely on this value).  Instead, we use the Alpha's "rpcc" | 
|  | 66 | * instruction to read cycle counts.  As this is a 32 bit counter, | 
|  | 67 | * it will overflow only once per 30 seconds (on a 200MHz machine), | 
|  | 68 | * which is plenty. | 
|  | 69 | */ | 
|  | 70 |  | 
|  | 71 | unsigned int ftape_timestamp(void) | 
|  | 72 | { | 
|  | 73 | #if defined(__alpha__) | 
|  | 74 | unsigned long r; | 
|  | 75 |  | 
|  | 76 | asm volatile ("rpcc %0" : "=r" (r)); | 
|  | 77 | return r; | 
|  | 78 | #elif defined(__x86_64__) | 
|  | 79 | unsigned long r; | 
|  | 80 | rdtscl(r); | 
|  | 81 | return r; | 
|  | 82 | #elif defined(__i386__) | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * Note that there is some time between counter underflowing and jiffies | 
|  | 86 | * increasing, so the code below won't always give correct output. | 
|  | 87 | * -Vojtech | 
|  | 88 | */ | 
|  | 89 |  | 
|  | 90 | unsigned long flags; | 
|  | 91 | __u16 lo; | 
|  | 92 | __u16 hi; | 
|  | 93 |  | 
|  | 94 | spin_lock_irqsave(&calibr_lock, flags); | 
|  | 95 | outb_p(0x00, 0x43);	/* latch the count ASAP */ | 
|  | 96 | lo = inb_p(0x40);	/* read the latched count */ | 
|  | 97 | lo |= inb(0x40) << 8; | 
|  | 98 | hi = jiffies; | 
|  | 99 | spin_unlock_irqrestore(&calibr_lock, flags); | 
|  | 100 | return ((hi + 1) * (unsigned int) LATCH) - lo;  /* downcounter ! */ | 
|  | 101 | #endif | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | static unsigned int short_ftape_timestamp(void) | 
|  | 105 | { | 
|  | 106 | #if defined(__alpha__) || defined(__x86_64__) | 
|  | 107 | return ftape_timestamp(); | 
|  | 108 | #elif defined(__i386__) | 
|  | 109 | unsigned int count; | 
|  | 110 | unsigned long flags; | 
|  | 111 |  | 
|  | 112 | spin_lock_irqsave(&calibr_lock, flags); | 
|  | 113 | outb_p(0x00, 0x43);	/* latch the count ASAP */ | 
|  | 114 | count = inb_p(0x40);	/* read the latched count */ | 
|  | 115 | count |= inb(0x40) << 8; | 
|  | 116 | spin_unlock_irqrestore(&calibr_lock, flags); | 
|  | 117 | return (LATCH - count);	/* normal: downcounter */ | 
|  | 118 | #endif | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | static unsigned int diff(unsigned int t0, unsigned int t1) | 
|  | 122 | { | 
|  | 123 | #if defined(__alpha__) || defined(__x86_64__) | 
|  | 124 | return (t1 - t0); | 
|  | 125 | #elif defined(__i386__) | 
|  | 126 | /* | 
|  | 127 | * This is tricky: to work for both short and full ftape_timestamps | 
|  | 128 | * we'll have to discriminate between these. | 
|  | 129 | * If it _looks_ like short stamps with wrapping around we'll | 
|  | 130 | * asume it are. This will generate a small error if it really | 
|  | 131 | * was a (very large) delta from full ftape_timestamps. | 
|  | 132 | */ | 
|  | 133 | return (t1 <= t0 && t0 <= LATCH) ? t1 + LATCH - t0 : t1 - t0; | 
|  | 134 | #endif | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | static unsigned int usecs(unsigned int count) | 
|  | 138 | { | 
|  | 139 | #if defined(__alpha__) || defined(__x86_64__) | 
|  | 140 | return (ps_per_cycle * count) / 1000000UL; | 
|  | 141 | #elif defined(__i386__) | 
|  | 142 | return (10000 * count) / ((CLOCK_TICK_RATE + 50) / 100); | 
|  | 143 | #endif | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | unsigned int ftape_timediff(unsigned int t0, unsigned int t1) | 
|  | 147 | { | 
|  | 148 | /* | 
|  | 149 | *  Calculate difference in usec for ftape_timestamp results t0 & t1. | 
|  | 150 | *  Note that on the i386 platform with short time-stamps, the | 
|  | 151 | *  maximum allowed timespan is 1/HZ or we'll lose ticks! | 
|  | 152 | */ | 
|  | 153 | return usecs(diff(t0, t1)); | 
|  | 154 | } | 
|  | 155 |  | 
|  | 156 | /*      To get an indication of the I/O performance, | 
|  | 157 | *      measure the duration of the inb() function. | 
|  | 158 | */ | 
|  | 159 | static void time_inb(void) | 
|  | 160 | { | 
|  | 161 | int i; | 
|  | 162 | int t0, t1; | 
|  | 163 | unsigned long flags; | 
|  | 164 | int status; | 
|  | 165 | TRACE_FUN(ft_t_any); | 
|  | 166 |  | 
|  | 167 | spin_lock_irqsave(&calibr_lock, flags); | 
|  | 168 | t0 = short_ftape_timestamp(); | 
|  | 169 | for (i = 0; i < 1000; ++i) { | 
|  | 170 | status = inb(fdc.msr); | 
|  | 171 | } | 
|  | 172 | t1 = short_ftape_timestamp(); | 
|  | 173 | spin_unlock_irqrestore(&calibr_lock, flags); | 
|  | 174 | TRACE(ft_t_info, "inb() duration: %d nsec", ftape_timediff(t0, t1)); | 
|  | 175 | TRACE_EXIT; | 
|  | 176 | } | 
|  | 177 |  | 
|  | 178 | static void init_clock(void) | 
|  | 179 | { | 
|  | 180 | TRACE_FUN(ft_t_any); | 
|  | 181 |  | 
|  | 182 | #if defined(__x86_64__) | 
|  | 183 | ps_per_cycle = 1000000000UL / cpu_khz; | 
|  | 184 | #elif defined(__alpha__) | 
|  | 185 | extern struct hwrpb_struct *hwrpb; | 
|  | 186 | ps_per_cycle = (1000*1000*1000*1000UL) / hwrpb->cycle_freq; | 
|  | 187 | #endif | 
|  | 188 | TRACE_EXIT; | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | /* | 
|  | 192 | *      Input:  function taking int count as parameter. | 
|  | 193 | *              pointers to calculated calibration variables. | 
|  | 194 | */ | 
|  | 195 | void ftape_calibrate(char *name, | 
|  | 196 | void (*fun) (unsigned int), | 
|  | 197 | unsigned int *calibr_count, | 
|  | 198 | unsigned int *calibr_time) | 
|  | 199 | { | 
|  | 200 | static int first_time = 1; | 
|  | 201 | int i; | 
|  | 202 | unsigned int tc = 0; | 
|  | 203 | unsigned int count; | 
|  | 204 | unsigned int time; | 
|  | 205 | #if defined(__i386__) | 
|  | 206 | unsigned int old_tc = 0; | 
|  | 207 | unsigned int old_count = 1; | 
|  | 208 | unsigned int old_time = 1; | 
|  | 209 | #endif | 
|  | 210 | TRACE_FUN(ft_t_flow); | 
|  | 211 |  | 
|  | 212 | if (first_time) {             /* get idea of I/O performance */ | 
|  | 213 | init_clock(); | 
|  | 214 | time_inb(); | 
|  | 215 | first_time = 0; | 
|  | 216 | } | 
|  | 217 | /*    value of timeout must be set so that on very slow systems | 
|  | 218 | *    it will give a time less than one jiffy, and on | 
|  | 219 | *    very fast systems it'll give reasonable precision. | 
|  | 220 | */ | 
|  | 221 |  | 
|  | 222 | count = 40; | 
|  | 223 | for (i = 0; i < 15; ++i) { | 
|  | 224 | unsigned int t0; | 
|  | 225 | unsigned int t1; | 
|  | 226 | unsigned int once; | 
|  | 227 | unsigned int multiple; | 
|  | 228 | unsigned long flags; | 
|  | 229 |  | 
|  | 230 | *calibr_count = | 
|  | 231 | *calibr_time = count;	/* set TC to 1 */ | 
|  | 232 | spin_lock_irqsave(&calibr_lock, flags); | 
|  | 233 | fun(0);		/* dummy, get code into cache */ | 
|  | 234 | t0 = short_ftape_timestamp(); | 
|  | 235 | fun(0);		/* overhead + one test */ | 
|  | 236 | t1 = short_ftape_timestamp(); | 
|  | 237 | once = diff(t0, t1); | 
|  | 238 | t0 = short_ftape_timestamp(); | 
|  | 239 | fun(count);		/* overhead + count tests */ | 
|  | 240 | t1 = short_ftape_timestamp(); | 
|  | 241 | multiple = diff(t0, t1); | 
|  | 242 | spin_unlock_irqrestore(&calibr_lock, flags); | 
|  | 243 | time = ftape_timediff(0, multiple - once); | 
|  | 244 | tc = (1000 * time) / (count - 1); | 
|  | 245 | TRACE(ft_t_any, "once:%3d us,%6d times:%6d us, TC:%5d ns", | 
|  | 246 | usecs(once), count - 1, usecs(multiple), tc); | 
|  | 247 | #if defined(__alpha__) || defined(__x86_64__) | 
|  | 248 | /* | 
|  | 249 | * Increase the calibration count exponentially until the | 
|  | 250 | * calibration time exceeds 100 ms. | 
|  | 251 | */ | 
|  | 252 | if (time >= 100*1000) { | 
|  | 253 | break; | 
|  | 254 | } | 
|  | 255 | #elif defined(__i386__) | 
|  | 256 | /* | 
|  | 257 | * increase the count until the resulting time nears 2/HZ, | 
|  | 258 | * then the tc will drop sharply because we lose LATCH counts. | 
|  | 259 | */ | 
|  | 260 | if (tc <= old_tc / 2) { | 
|  | 261 | time = old_time; | 
|  | 262 | count = old_count; | 
|  | 263 | break; | 
|  | 264 | } | 
|  | 265 | old_tc = tc; | 
|  | 266 | old_count = count; | 
|  | 267 | old_time = time; | 
|  | 268 | #endif | 
|  | 269 | count *= 2; | 
|  | 270 | } | 
|  | 271 | *calibr_count = count - 1; | 
|  | 272 | *calibr_time  = time; | 
|  | 273 | TRACE(ft_t_info, "TC for `%s()' = %d nsec (at %d counts)", | 
|  | 274 | name, (1000 * *calibr_time) / *calibr_count, *calibr_count); | 
|  | 275 | TRACE_EXIT; | 
|  | 276 | } |