| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * Count register synchronisation. | 
|  | 3 | * | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 4 | * All CPUs will have their count registers synchronised to the CPU0 next time | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 5 | * value. This can cause a small timewarp for CPU0. All other CPU's should | 
|  | 6 | * not have done anything significant (but they may have had interrupts | 
|  | 7 | * enabled briefly - prom_smp_finish() should not be responsible for enabling | 
|  | 8 | * interrupts...) | 
|  | 9 | * | 
|  | 10 | * FIXME: broken for SMTC | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/irqflags.h> | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 16 | #include <linux/cpumask.h> | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 17 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 18 | #include <asm/r4k-timer.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 19 | #include <linux/atomic.h> | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 20 | #include <asm/barrier.h> | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 21 | #include <asm/mipsregs.h> | 
|  | 22 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 23 | static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); | 
|  | 24 | static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); | 
|  | 25 | static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); | 
|  | 26 | static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 27 |  | 
|  | 28 | #define COUNTON	100 | 
|  | 29 | #define NR_LOOPS 5 | 
|  | 30 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 31 | void __cpuinit synchronise_count_master(void) | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 32 | { | 
|  | 33 | int i; | 
|  | 34 | unsigned long flags; | 
|  | 35 | unsigned int initcount; | 
|  | 36 | int nslaves; | 
|  | 37 |  | 
|  | 38 | #ifdef CONFIG_MIPS_MT_SMTC | 
|  | 39 | /* | 
|  | 40 | * SMTC needs to synchronise per VPE, not per CPU | 
|  | 41 | * ignore for now | 
|  | 42 | */ | 
|  | 43 | return; | 
|  | 44 | #endif | 
|  | 45 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 46 | printk(KERN_INFO "Synchronize counters across %u CPUs: ", | 
|  | 47 | num_online_cpus()); | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 48 |  | 
|  | 49 | local_irq_save(flags); | 
|  | 50 |  | 
|  | 51 | /* | 
|  | 52 | * Notify the slaves that it's time to start | 
|  | 53 | */ | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 54 | atomic_set(&count_reference, read_c0_count()); | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 55 | atomic_set(&count_start_flag, 1); | 
|  | 56 | smp_wmb(); | 
|  | 57 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 58 | /* Count will be initialised to current timer for all CPU's */ | 
|  | 59 | initcount = read_c0_count(); | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 60 |  | 
|  | 61 | /* | 
|  | 62 | * We loop a few times to get a primed instruction cache, | 
|  | 63 | * then the last pass is more or less synchronised and | 
|  | 64 | * the master and slaves each set their cycle counters to a known | 
|  | 65 | * value all at once. This reduces the chance of having random offsets | 
|  | 66 | * between the processors, and guarantees that the maximum | 
|  | 67 | * delay between the cycle counters is never bigger than | 
|  | 68 | * the latency of information-passing (cachelines) between | 
|  | 69 | * two CPUs. | 
|  | 70 | */ | 
|  | 71 |  | 
|  | 72 | nslaves = num_online_cpus()-1; | 
|  | 73 | for (i = 0; i < NR_LOOPS; i++) { | 
|  | 74 | /* slaves loop on '!= ncpus' */ | 
|  | 75 | while (atomic_read(&count_count_start) != nslaves) | 
|  | 76 | mb(); | 
|  | 77 | atomic_set(&count_count_stop, 0); | 
|  | 78 | smp_wmb(); | 
|  | 79 |  | 
|  | 80 | /* this lets the slaves write their count register */ | 
|  | 81 | atomic_inc(&count_count_start); | 
|  | 82 |  | 
|  | 83 | /* | 
|  | 84 | * Everyone initialises count in the last loop: | 
|  | 85 | */ | 
|  | 86 | if (i == NR_LOOPS-1) | 
|  | 87 | write_c0_count(initcount); | 
|  | 88 |  | 
|  | 89 | /* | 
|  | 90 | * Wait for all slaves to leave the synchronization point: | 
|  | 91 | */ | 
|  | 92 | while (atomic_read(&count_count_stop) != nslaves) | 
|  | 93 | mb(); | 
|  | 94 | atomic_set(&count_count_start, 0); | 
|  | 95 | smp_wmb(); | 
|  | 96 | atomic_inc(&count_count_stop); | 
|  | 97 | } | 
|  | 98 | /* Arrange for an interrupt in a short while */ | 
|  | 99 | write_c0_compare(read_c0_count() + COUNTON); | 
|  | 100 |  | 
|  | 101 | local_irq_restore(flags); | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * i386 code reported the skew here, but the | 
|  | 105 | * count registers were almost certainly out of sync | 
|  | 106 | * so no point in alarming people | 
|  | 107 | */ | 
|  | 108 | printk("done.\n"); | 
|  | 109 | } | 
|  | 110 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 111 | void __cpuinit synchronise_count_slave(void) | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 112 | { | 
|  | 113 | int i; | 
|  | 114 | unsigned long flags; | 
|  | 115 | unsigned int initcount; | 
|  | 116 | int ncpus; | 
|  | 117 |  | 
|  | 118 | #ifdef CONFIG_MIPS_MT_SMTC | 
|  | 119 | /* | 
|  | 120 | * SMTC needs to synchronise per VPE, not per CPU | 
|  | 121 | * ignore for now | 
|  | 122 | */ | 
|  | 123 | return; | 
|  | 124 | #endif | 
|  | 125 |  | 
|  | 126 | local_irq_save(flags); | 
|  | 127 |  | 
|  | 128 | /* | 
|  | 129 | * Not every cpu is online at the time this gets called, | 
|  | 130 | * so we first wait for the master to say everyone is ready | 
|  | 131 | */ | 
|  | 132 |  | 
|  | 133 | while (!atomic_read(&count_start_flag)) | 
|  | 134 | mb(); | 
|  | 135 |  | 
| Tim Anderson | eb9b514 | 2009-06-17 16:40:34 -0700 | [diff] [blame] | 136 | /* Count will be initialised to next expire for all CPU's */ | 
|  | 137 | initcount = atomic_read(&count_reference); | 
| Ralf Baechle | 39b8d52 | 2008-04-28 17:14:26 +0100 | [diff] [blame] | 138 |  | 
|  | 139 | ncpus = num_online_cpus(); | 
|  | 140 | for (i = 0; i < NR_LOOPS; i++) { | 
|  | 141 | atomic_inc(&count_count_start); | 
|  | 142 | while (atomic_read(&count_count_start) != ncpus) | 
|  | 143 | mb(); | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * Everyone initialises count in the last loop: | 
|  | 147 | */ | 
|  | 148 | if (i == NR_LOOPS-1) | 
|  | 149 | write_c0_count(initcount); | 
|  | 150 |  | 
|  | 151 | atomic_inc(&count_count_stop); | 
|  | 152 | while (atomic_read(&count_count_stop) != ncpus) | 
|  | 153 | mb(); | 
|  | 154 | } | 
|  | 155 | /* Arrange for an interrupt in a short while */ | 
|  | 156 | write_c0_compare(read_c0_count() + COUNTON); | 
|  | 157 |  | 
|  | 158 | local_irq_restore(flags); | 
|  | 159 | } | 
|  | 160 | #undef NR_LOOPS |