blob: 6a00e5faaa74375d5b709488a0b9c371c3fe5c79 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
5 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
11#include <linux/kernel.h>
12
Cliff Wickman18129242008-06-02 08:56:14 -050013#include <asm/mmu_context.h>
Cliff Wickman18129242008-06-02 08:56:14 -050014#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020015#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050016#include <asm/uv/uv_bau.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020017#include <asm/genapic.h>
18#include <asm/idle.h>
Cliff Wickmanb194b122008-06-12 08:23:48 -050019#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050020#include <asm/irq_vectors.h>
Cliff Wickman18129242008-06-02 08:56:14 -050021
Cliff Wickmanb194b122008-06-12 08:23:48 -050022#include <mach_apic.h>
23
Ingo Molnarb4c286e2008-06-18 14:28:19 +020024static struct bau_control **uv_bau_table_bases __read_mostly;
25static int uv_bau_retry_limit __read_mostly;
26
27/* position of pnode (which is nasid>>1): */
28static int uv_nshift __read_mostly;
29
30static unsigned long uv_mmask __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -050031
Ingo Molnardc163a42008-06-18 14:15:43 +020032static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
33static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickman18129242008-06-02 08:56:14 -050034
35/*
36 * Free a software acknowledge hardware resource by clearing its Pending
37 * bit. This will return a reply to the sender.
38 * If the message has timed out, a reply has already been sent by the
39 * hardware but the resource has not been released. In that case our
40 * clear of the Timeout bit (as well) will free the resource. No reply will
41 * be sent (the hardware will only do one reply per message).
42 */
Cliff Wickmanb194b122008-06-12 08:23:48 -050043static void uv_reply_to_message(int resource,
Ingo Molnarb4c286e2008-06-18 14:28:19 +020044 struct bau_payload_queue_entry *msg,
45 struct bau_msg_status *msp)
Cliff Wickman18129242008-06-02 08:56:14 -050046{
Cliff Wickmanb194b122008-06-12 08:23:48 -050047 unsigned long dw;
Cliff Wickman18129242008-06-02 08:56:14 -050048
Cliff Wickmanb194b122008-06-12 08:23:48 -050049 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
Cliff Wickman18129242008-06-02 08:56:14 -050050 msg->replied_to = 1;
51 msg->sw_ack_vector = 0;
52 if (msp)
53 msp->seen_by.bits = 0;
Cliff Wickmanb194b122008-06-12 08:23:48 -050054 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
Cliff Wickman18129242008-06-02 08:56:14 -050055}
56
57/*
58 * Do all the things a cpu should do for a TLB shootdown message.
59 * Other cpu's may come here at the same time for this message.
60 */
Cliff Wickmanb194b122008-06-12 08:23:48 -050061static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
Ingo Molnarb4c286e2008-06-18 14:28:19 +020062 int msg_slot, int sw_ack_slot)
Cliff Wickman18129242008-06-02 08:56:14 -050063{
Cliff Wickman18129242008-06-02 08:56:14 -050064 unsigned long this_cpu_mask;
65 struct bau_msg_status *msp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +020066 int cpu;
Cliff Wickman18129242008-06-02 08:56:14 -050067
68 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
69 cpu = uv_blade_processor_id();
70 msg->number_of_cpus =
71 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
Ingo Molnardc163a42008-06-18 14:15:43 +020072 this_cpu_mask = 1UL << cpu;
Cliff Wickman18129242008-06-02 08:56:14 -050073 if (msp->seen_by.bits & this_cpu_mask)
74 return;
75 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
76
77 if (msg->replied_to == 1)
78 return;
79
80 if (msg->address == TLB_FLUSH_ALL) {
81 local_flush_tlb();
82 __get_cpu_var(ptcstats).alltlb++;
83 } else {
84 __flush_tlb_one(msg->address);
85 __get_cpu_var(ptcstats).onetlb++;
86 }
87
88 __get_cpu_var(ptcstats).requestee++;
89
90 atomic_inc_short(&msg->acknowledge_count);
91 if (msg->number_of_cpus == msg->acknowledge_count)
92 uv_reply_to_message(sw_ack_slot, msg, msp);
Ingo Molnardc163a42008-06-18 14:15:43 +020093}
94
95/*
96 * Examine the payload queue on one distribution node to see
97 * which messages have not been seen, and which cpu(s) have not seen them.
98 *
99 * Returns the number of cpu's that have not responded.
100 */
101static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
102{
Ingo Molnardc163a42008-06-18 14:15:43 +0200103 struct bau_payload_queue_entry *msg;
104 struct bau_msg_status *msp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200105 int count = 0;
106 int i;
107 int j;
Ingo Molnardc163a42008-06-18 14:15:43 +0200108
109 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
110 msg++, i++) {
111 if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
112 msp = bau_tablesp->msg_statuses + i;
113 printk(KERN_DEBUG
114 "blade %d: address:%#lx %d of %d, not cpu(s): ",
115 i, msg->address, msg->acknowledge_count,
116 msg->number_of_cpus);
117 for (j = 0; j < msg->number_of_cpus; j++) {
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200118 if (!((1L << j) & msp->seen_by.bits)) {
Ingo Molnardc163a42008-06-18 14:15:43 +0200119 count++;
120 printk("%d ", j);
121 }
122 }
123 printk("\n");
124 }
125 }
126 return count;
Cliff Wickman18129242008-06-02 08:56:14 -0500127}
128
129/*
130 * Examine the payload queue on all the distribution nodes to see
131 * which messages have not been seen, and which cpu(s) have not seen them.
132 *
133 * Returns the number of cpu's that have not responded.
134 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500135static int uv_examine_destinations(struct bau_target_nodemask *distribution)
Cliff Wickman18129242008-06-02 08:56:14 -0500136{
137 int sender;
138 int i;
Cliff Wickman18129242008-06-02 08:56:14 -0500139 int count = 0;
Cliff Wickman18129242008-06-02 08:56:14 -0500140
141 sender = smp_processor_id();
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200142 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500143 if (!bau_node_isset(i, distribution))
144 continue;
Ingo Molnardc163a42008-06-18 14:15:43 +0200145 count += uv_examine_destination(uv_bau_table_bases[i], sender);
Cliff Wickman18129242008-06-02 08:56:14 -0500146 }
147 return count;
148}
149
Cliff Wickmanb194b122008-06-12 08:23:48 -0500150/*
151 * wait for completion of a broadcast message
152 *
153 * return COMPLETE, RETRY or GIVEUP
154 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200155static int uv_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanb194b122008-06-12 08:23:48 -0500156 unsigned long mmr_offset, int right_shift)
157{
158 int exams = 0;
159 long destination_timeouts = 0;
160 long source_timeouts = 0;
161 unsigned long descriptor_status;
162
163 while ((descriptor_status = (((unsigned long)
164 uv_read_local_mmr(mmr_offset) >>
165 right_shift) & UV_ACT_STATUS_MASK)) !=
166 DESC_STATUS_IDLE) {
167 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
168 source_timeouts++;
169 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
170 source_timeouts = 0;
171 __get_cpu_var(ptcstats).s_retry++;
172 return FLUSH_RETRY;
173 }
174 /*
175 * spin here looking for progress at the destinations
176 */
177 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
178 destination_timeouts++;
179 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
180 /*
181 * returns number of cpus not responding
182 */
183 if (uv_examine_destinations
184 (&bau_desc->distribution) == 0) {
185 __get_cpu_var(ptcstats).d_retry++;
186 return FLUSH_RETRY;
187 }
188 exams++;
189 if (exams >= uv_bau_retry_limit) {
190 printk(KERN_DEBUG
191 "uv_flush_tlb_others");
192 printk("giving up on cpu %d\n",
193 smp_processor_id());
194 return FLUSH_GIVEUP;
195 }
196 /*
197 * delays can hang the simulator
198 udelay(1000);
199 */
200 destination_timeouts = 0;
201 }
202 }
203 }
204 return FLUSH_COMPLETE;
205}
206
207/**
208 * uv_flush_send_and_wait
209 *
210 * Send a broadcast and wait for a broadcast message to complete.
211 *
212 * The cpumaskp mask contains the cpus the broadcast was sent to.
213 *
214 * Returns 1 if all remote flushing was done. The mask is zeroed.
215 * Returns 0 if some remote flushing remains to be done. The mask is left
216 * unchanged.
217 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200218int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
219 cpumask_t *cpumaskp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500220{
221 int completion_status = 0;
222 int right_shift;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500223 int tries = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200224 int blade;
225 int bit;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500226 unsigned long mmr_offset;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200227 unsigned long index;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500228 cycles_t time1;
229 cycles_t time2;
230
231 if (cpu < UV_CPUS_PER_ACT_STATUS) {
232 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
233 right_shift = cpu * UV_ACT_STATUS_SIZE;
234 } else {
235 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
236 right_shift =
237 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
238 }
239 time1 = get_cycles();
240 do {
241 tries++;
Ingo Molnardc163a42008-06-18 14:15:43 +0200242 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
243 cpu;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500244 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
245 completion_status = uv_wait_completion(bau_desc, mmr_offset,
246 right_shift);
247 } while (completion_status == FLUSH_RETRY);
248 time2 = get_cycles();
249 __get_cpu_var(ptcstats).sflush += (time2 - time1);
250 if (tries > 1)
251 __get_cpu_var(ptcstats).retriesok++;
252
253 if (completion_status == FLUSH_GIVEUP) {
254 /*
255 * Cause the caller to do an IPI-style TLB shootdown on
256 * the cpu's, all of which are still in the mask.
257 */
258 __get_cpu_var(ptcstats).ptc_i++;
259 return 0;
260 }
261
262 /*
263 * Success, so clear the remote cpu's from the mask so we don't
264 * use the IPI method of shootdown on them.
265 */
266 for_each_cpu_mask(bit, *cpumaskp) {
267 blade = uv_cpu_to_blade_id(bit);
268 if (blade == this_blade)
269 continue;
270 cpu_clear(bit, *cpumaskp);
271 }
272 if (!cpus_empty(*cpumaskp))
273 return 0;
274 return 1;
275}
276
Cliff Wickman18129242008-06-02 08:56:14 -0500277/**
278 * uv_flush_tlb_others - globally purge translation cache of a virtual
279 * address or all TLB's
280 * @cpumaskp: mask of all cpu's in which the address is to be removed
281 * @mm: mm_struct containing virtual address range
282 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
283 *
284 * This is the entry point for initiating any UV global TLB shootdown.
285 *
286 * Purges the translation caches of all specified processors of the given
287 * virtual address, or purges all TLB's on specified processors.
288 *
289 * The caller has derived the cpumaskp from the mm_struct and has subtracted
290 * the local cpu from the mask. This function is called only if there
291 * are bits set in the mask. (e.g. flush_tlb_page())
292 *
293 * The cpumaskp is converted into a nodemask of the nodes containing
294 * the cpus.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500295 *
296 * Returns 1 if all remote flushing was done.
297 * Returns 0 if some remote flushing remains to be done.
Cliff Wickman18129242008-06-02 08:56:14 -0500298 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500299int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200300 unsigned long va)
Cliff Wickman18129242008-06-02 08:56:14 -0500301{
302 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500303 int bit;
Cliff Wickman18129242008-06-02 08:56:14 -0500304 int blade;
305 int cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500306 int this_blade;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500307 int locals = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +0200308 struct bau_desc *bau_desc;
Cliff Wickman18129242008-06-02 08:56:14 -0500309
310 cpu = uv_blade_processor_id();
311 this_blade = uv_numa_blade_id();
312 bau_desc = __get_cpu_var(bau_control).descriptor_base;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500313 bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500314
315 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
316
317 i = 0;
318 for_each_cpu_mask(bit, *cpumaskp) {
319 blade = uv_cpu_to_blade_id(bit);
Ingo Molnardc163a42008-06-18 14:15:43 +0200320 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
Cliff Wickmanb194b122008-06-12 08:23:48 -0500321 if (blade == this_blade) {
322 locals++;
Cliff Wickman18129242008-06-02 08:56:14 -0500323 continue;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500324 }
Cliff Wickman18129242008-06-02 08:56:14 -0500325 bau_node_set(blade, &bau_desc->distribution);
Cliff Wickman18129242008-06-02 08:56:14 -0500326 i++;
327 }
Cliff Wickmanb194b122008-06-12 08:23:48 -0500328 if (i == 0) {
329 /*
330 * no off_node flushing; return status for local node
331 */
332 if (locals)
333 return 0;
334 else
335 return 1;
336 }
Cliff Wickman18129242008-06-02 08:56:14 -0500337 __get_cpu_var(ptcstats).requestor++;
338 __get_cpu_var(ptcstats).ntargeted += i;
339
340 bau_desc->payload.address = va;
341 bau_desc->payload.sending_cpu = smp_processor_id();
342
Cliff Wickmanb194b122008-06-12 08:23:48 -0500343 return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp);
Cliff Wickman18129242008-06-02 08:56:14 -0500344}
345
346/*
347 * The BAU message interrupt comes here. (registered by set_intr_gate)
348 * See entry_64.S
349 *
350 * We received a broadcast assist message.
351 *
352 * Interrupts may have been disabled; this interrupt could represent
353 * the receipt of several messages.
354 *
355 * All cores/threads on this node get this interrupt.
356 * The last one to see it does the s/w ack.
357 * (the resource will not be freed until noninterruptable cpus see this
358 * interrupt; hardware will timeout the s/w ack and reply ERROR)
359 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500360void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -0500361{
Ingo Molnardc163a42008-06-18 14:15:43 +0200362 struct bau_payload_queue_entry *va_queue_first;
363 struct bau_payload_queue_entry *va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200364 struct bau_payload_queue_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500365 struct pt_regs *old_regs = set_irq_regs(regs);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200366 cycles_t time1;
367 cycles_t time2;
Cliff Wickman18129242008-06-02 08:56:14 -0500368 int msg_slot;
369 int sw_ack_slot;
370 int fw;
371 int count = 0;
372 unsigned long local_pnode;
373
374 ack_APIC_irq();
375 exit_idle();
376 irq_enter();
377
Cliff Wickmanb194b122008-06-12 08:23:48 -0500378 time1 = get_cycles();
Cliff Wickman18129242008-06-02 08:56:14 -0500379
380 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
381
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200382 va_queue_first = __get_cpu_var(bau_control).va_queue_first;
Ingo Molnardc163a42008-06-18 14:15:43 +0200383 va_queue_last = __get_cpu_var(bau_control).va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200384
Cliff Wickman18129242008-06-02 08:56:14 -0500385 msg = __get_cpu_var(bau_control).bau_msg_head;
386 while (msg->sw_ack_vector) {
387 count++;
388 fw = msg->sw_ack_vector;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200389 msg_slot = msg - va_queue_first;
Cliff Wickman18129242008-06-02 08:56:14 -0500390 sw_ack_slot = ffs(fw) - 1;
391
392 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
393
394 msg++;
Ingo Molnardc163a42008-06-18 14:15:43 +0200395 if (msg > va_queue_last)
396 msg = va_queue_first;
Cliff Wickman18129242008-06-02 08:56:14 -0500397 __get_cpu_var(bau_control).bau_msg_head = msg;
398 }
399 if (!count)
400 __get_cpu_var(ptcstats).nomsg++;
401 else if (count > 1)
402 __get_cpu_var(ptcstats).multmsg++;
403
Cliff Wickmanb194b122008-06-12 08:23:48 -0500404 time2 = get_cycles();
405 __get_cpu_var(ptcstats).dflush += (time2 - time1);
Cliff Wickman18129242008-06-02 08:56:14 -0500406
407 irq_exit();
408 set_irq_regs(old_regs);
Cliff Wickman18129242008-06-02 08:56:14 -0500409}
410
Cliff Wickmanb194b122008-06-12 08:23:48 -0500411static void uv_enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500412{
413 int i;
414 int blade;
415 int last_blade;
416 int pnode;
417 int cur_cpu = 0;
418 unsigned long apicid;
419
Cliff Wickman18129242008-06-02 08:56:14 -0500420 last_blade = -1;
421 for_each_online_node(i) {
422 blade = uv_node_to_blade_id(i);
423 if (blade == last_blade)
424 continue;
425 last_blade = blade;
426 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
427 pnode = uv_blade_to_pnode(blade);
428 cur_cpu += uv_blade_nr_possible_cpus(i);
429 }
Cliff Wickman18129242008-06-02 08:56:14 -0500430}
431
Cliff Wickmanb194b122008-06-12 08:23:48 -0500432static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500433{
434 if (*offset < num_possible_cpus())
435 return offset;
436 return NULL;
437}
438
Cliff Wickmanb194b122008-06-12 08:23:48 -0500439static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500440{
441 (*offset)++;
442 if (*offset < num_possible_cpus())
443 return offset;
444 return NULL;
445}
446
Cliff Wickmanb194b122008-06-12 08:23:48 -0500447static void uv_ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500448{
449}
450
451/*
452 * Display the statistics thru /proc
453 * data points to the cpu number
454 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500455static int uv_ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500456{
457 struct ptc_stats *stat;
458 int cpu;
459
460 cpu = *(loff_t *)data;
461
462 if (!cpu) {
463 seq_printf(file,
464 "# cpu requestor requestee one all sretry dretry ptc_i ");
465 seq_printf(file,
Cliff Wickmanb194b122008-06-12 08:23:48 -0500466 "sw_ack sflush dflush sok dnomsg dmult starget\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500467 }
468 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
469 stat = &per_cpu(ptcstats, cpu);
470 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
471 cpu, stat->requestor,
472 stat->requestee, stat->onetlb, stat->alltlb,
473 stat->s_retry, stat->d_retry, stat->ptc_i);
474 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
475 uv_read_global_mmr64(uv_blade_to_pnode
476 (uv_cpu_to_blade_id(cpu)),
477 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
Cliff Wickmanb194b122008-06-12 08:23:48 -0500478 stat->sflush, stat->dflush,
Cliff Wickman18129242008-06-02 08:56:14 -0500479 stat->retriesok, stat->nomsg,
480 stat->multmsg, stat->ntargeted);
481 }
482
483 return 0;
484}
485
486/*
487 * 0: display meaning of the statistics
488 * >0: retry limit
489 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500490static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200491 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500492{
493 long newmode;
494 char optstr[64];
495
Cliff Wickmane7eb8722008-06-23 08:32:25 -0500496 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -0500497 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -0500498 if (copy_from_user(optstr, user, count))
499 return -EFAULT;
500 optstr[count - 1] = '\0';
501 if (strict_strtoul(optstr, 10, &newmode) < 0) {
502 printk(KERN_DEBUG "%s is invalid\n", optstr);
503 return -EINVAL;
504 }
505
506 if (newmode == 0) {
507 printk(KERN_DEBUG "# cpu: cpu number\n");
508 printk(KERN_DEBUG
509 "requestor: times this cpu was the flush requestor\n");
510 printk(KERN_DEBUG
511 "requestee: times this cpu was requested to flush its TLBs\n");
512 printk(KERN_DEBUG
513 "one: times requested to flush a single address\n");
514 printk(KERN_DEBUG
515 "all: times requested to flush all TLB's\n");
516 printk(KERN_DEBUG
517 "sretry: number of retries of source-side timeouts\n");
518 printk(KERN_DEBUG
519 "dretry: number of retries of destination-side timeouts\n");
520 printk(KERN_DEBUG
521 "ptc_i: times UV fell through to IPI-style flushes\n");
522 printk(KERN_DEBUG
523 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
524 printk(KERN_DEBUG
Cliff Wickmanb194b122008-06-12 08:23:48 -0500525 "sflush_us: cycles spent in uv_flush_tlb_others()\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500526 printk(KERN_DEBUG
Cliff Wickmanb194b122008-06-12 08:23:48 -0500527 "dflush_us: cycles spent in handling flush requests\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500528 printk(KERN_DEBUG "sok: successes on retry\n");
529 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
530 printk(KERN_DEBUG
531 "dmult: interrupts with multiple messages\n");
532 printk(KERN_DEBUG "starget: nodes targeted\n");
533 } else {
534 uv_bau_retry_limit = newmode;
535 printk(KERN_DEBUG "timeout retry limit:%d\n",
536 uv_bau_retry_limit);
537 }
538
539 return count;
540}
541
542static const struct seq_operations uv_ptc_seq_ops = {
Ingo Molnardc163a42008-06-18 14:15:43 +0200543 .start = uv_ptc_seq_start,
544 .next = uv_ptc_seq_next,
545 .stop = uv_ptc_seq_stop,
546 .show = uv_ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -0500547};
548
Cliff Wickmanb194b122008-06-12 08:23:48 -0500549static int uv_ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -0500550{
551 return seq_open(file, &uv_ptc_seq_ops);
552}
553
554static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500555 .open = uv_ptc_proc_open,
556 .read = seq_read,
557 .write = uv_ptc_proc_write,
558 .llseek = seq_lseek,
559 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -0500560};
561
Cliff Wickmanb194b122008-06-12 08:23:48 -0500562static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500563{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500564 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -0500565
566 if (!is_uv_system())
567 return 0;
568
Cliff Wickman18129242008-06-02 08:56:14 -0500569 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
570 if (!proc_uv_ptc) {
571 printk(KERN_ERR "unable to create %s proc entry\n",
572 UV_PTC_BASENAME);
573 return -EINVAL;
574 }
575 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
576 return 0;
577}
578
Cliff Wickmanb194b122008-06-12 08:23:48 -0500579/*
580 * begin the initialization of the per-blade control structures
581 */
582static struct bau_control * __init uv_table_bases_init(int blade, int node)
Cliff Wickman18129242008-06-02 08:56:14 -0500583{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500584 int i;
585 int *ip;
586 struct bau_msg_status *msp;
Ingo Molnardc163a42008-06-18 14:15:43 +0200587 struct bau_control *bau_tabp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500588
Ingo Molnardc163a42008-06-18 14:15:43 +0200589 bau_tabp =
Cliff Wickmanb194b122008-06-12 08:23:48 -0500590 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +0200591 BUG_ON(!bau_tabp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200592
Ingo Molnardc163a42008-06-18 14:15:43 +0200593 bau_tabp->msg_statuses =
Cliff Wickmanb194b122008-06-12 08:23:48 -0500594 kmalloc_node(sizeof(struct bau_msg_status) *
Ingo Molnardc163a42008-06-18 14:15:43 +0200595 DEST_Q_SIZE, GFP_KERNEL, node);
596 BUG_ON(!bau_tabp->msg_statuses);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200597
Ingo Molnardc163a42008-06-18 14:15:43 +0200598 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500599 bau_cpubits_clear(&msp->seen_by, (int)
600 uv_blade_nr_possible_cpus(blade));
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200601
Ingo Molnardc163a42008-06-18 14:15:43 +0200602 bau_tabp->watching =
603 kmalloc_node(sizeof(int) * DEST_NUM_RESOURCES, GFP_KERNEL, node);
604 BUG_ON(!bau_tabp->watching);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200605
606 for (i = 0, ip = bau_tabp->watching; i < DEST_Q_SIZE; i++, ip++)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500607 *ip = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200608
Ingo Molnardc163a42008-06-18 14:15:43 +0200609 uv_bau_table_bases[blade] = bau_tabp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200610
Ingo Molnard4005242008-06-18 14:51:57 +0200611 return bau_tabp;
Cliff Wickman18129242008-06-02 08:56:14 -0500612}
613
Cliff Wickmanb194b122008-06-12 08:23:48 -0500614/*
615 * finish the initialization of the per-blade control structures
616 */
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200617static void __init
618uv_table_bases_finish(int blade, int node, int cur_cpu,
619 struct bau_control *bau_tablesp,
620 struct bau_desc *adp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500621{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500622 struct bau_control *bcp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200623 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500624
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200625 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500626 bcp = (struct bau_control *)&per_cpu(bau_control, i);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200627
628 bcp->bau_msg_head = bau_tablesp->va_queue_first;
629 bcp->va_queue_first = bau_tablesp->va_queue_first;
630 bcp->va_queue_last = bau_tablesp->va_queue_last;
631 bcp->watching = bau_tablesp->watching;
632 bcp->msg_statuses = bau_tablesp->msg_statuses;
633 bcp->descriptor_base = adp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500634 }
635}
636
637/*
638 * initialize the sending side's sending buffers
639 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200640static struct bau_desc * __init
Cliff Wickmanb194b122008-06-12 08:23:48 -0500641uv_activation_descriptor_init(int node, int pnode)
642{
643 int i;
644 unsigned long pa;
645 unsigned long m;
646 unsigned long n;
647 unsigned long mmr_image;
Ingo Molnardc163a42008-06-18 14:15:43 +0200648 struct bau_desc *adp;
649 struct bau_desc *ad2;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500650
Ingo Molnardc163a42008-06-18 14:15:43 +0200651 adp = (struct bau_desc *)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500652 kmalloc_node(16384, GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +0200653 BUG_ON(!adp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200654
Cliff Wickmanb194b122008-06-12 08:23:48 -0500655 pa = __pa((unsigned long)adp);
656 n = pa >> uv_nshift;
657 m = pa & uv_mmask;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200658
Cliff Wickmanb194b122008-06-12 08:23:48 -0500659 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200660 if (mmr_image) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500661 uv_write_global_mmr64(pnode, (unsigned long)
662 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
663 (n << UV_DESC_BASE_PNODE_SHIFT | m));
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200664 }
665
Cliff Wickmanb194b122008-06-12 08:23:48 -0500666 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
Ingo Molnardc163a42008-06-18 14:15:43 +0200667 memset(ad2, 0, sizeof(struct bau_desc));
Cliff Wickmanb194b122008-06-12 08:23:48 -0500668 ad2->header.sw_ack_flag = 1;
669 ad2->header.base_dest_nodeid =
670 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
671 ad2->header.command = UV_NET_ENDPOINT_INTD;
672 ad2->header.int_both = 1;
673 /*
674 * all others need to be set to zero:
675 * fairness chaining multilevel count replied_to
676 */
677 }
678 return adp;
679}
680
681/*
682 * initialize the destination side's receiving buffers
683 */
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200684static struct bau_payload_queue_entry * __init
685uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500686{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500687 struct bau_payload_queue_entry *pqp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200688 char *cp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500689
Ingo Molnardc163a42008-06-18 14:15:43 +0200690 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
691 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
692 GFP_KERNEL, node);
693 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200694
Cliff Wickmanb194b122008-06-12 08:23:48 -0500695 cp = (char *)pqp + 31;
696 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
697 bau_tablesp->va_queue_first = pqp;
698 uv_write_global_mmr64(pnode,
699 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
700 ((unsigned long)pnode <<
701 UV_PAYLOADQ_PNODE_SHIFT) |
702 uv_physnodeaddr(pqp));
703 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
704 uv_physnodeaddr(pqp));
Ingo Molnardc163a42008-06-18 14:15:43 +0200705 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500706 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
707 (unsigned long)
708 uv_physnodeaddr(bau_tablesp->va_queue_last));
Ingo Molnardc163a42008-06-18 14:15:43 +0200709 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200710
Cliff Wickmanb194b122008-06-12 08:23:48 -0500711 return pqp;
712}
713
714/*
715 * Initialization of each UV blade's structures
716 */
717static int __init uv_init_blade(int blade, int node, int cur_cpu)
718{
719 int pnode;
720 unsigned long pa;
721 unsigned long apicid;
Ingo Molnardc163a42008-06-18 14:15:43 +0200722 struct bau_desc *adp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500723 struct bau_payload_queue_entry *pqp;
724 struct bau_control *bau_tablesp;
725
726 bau_tablesp = uv_table_bases_init(blade, node);
727 pnode = uv_blade_to_pnode(blade);
728 adp = uv_activation_descriptor_init(node, pnode);
729 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
730 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp);
731 /*
732 * the below initialization can't be in firmware because the
733 * messaging IRQ will be determined by the OS
734 */
735 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
736 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
737 if ((pa & 0xff) != UV_BAU_MESSAGE) {
738 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
739 ((apicid << 32) | UV_BAU_MESSAGE));
740 }
741 return 0;
742}
Cliff Wickman18129242008-06-02 08:56:14 -0500743
744/*
745 * Initialization of BAU-related structures
746 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500747static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500748{
Cliff Wickman18129242008-06-02 08:56:14 -0500749 int blade;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500750 int node;
Cliff Wickman18129242008-06-02 08:56:14 -0500751 int nblades;
Cliff Wickman18129242008-06-02 08:56:14 -0500752 int last_blade;
753 int cur_cpu = 0;
Cliff Wickman18129242008-06-02 08:56:14 -0500754
755 if (!is_uv_system())
756 return 0;
757
758 uv_bau_retry_limit = 1;
Cliff Wickman18129242008-06-02 08:56:14 -0500759 uv_nshift = uv_hub_info->n_val;
Ingo Molnardc163a42008-06-18 14:15:43 +0200760 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
Cliff Wickman18129242008-06-02 08:56:14 -0500761 nblades = 0;
762 last_blade = -1;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500763 for_each_online_node(node) {
764 blade = uv_node_to_blade_id(node);
Cliff Wickman18129242008-06-02 08:56:14 -0500765 if (blade == last_blade)
766 continue;
767 last_blade = blade;
768 nblades++;
769 }
Cliff Wickman18129242008-06-02 08:56:14 -0500770 uv_bau_table_bases = (struct bau_control **)
771 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
Ingo Molnardc163a42008-06-18 14:15:43 +0200772 BUG_ON(!uv_bau_table_bases);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200773
Cliff Wickman18129242008-06-02 08:56:14 -0500774 last_blade = -1;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500775 for_each_online_node(node) {
776 blade = uv_node_to_blade_id(node);
Cliff Wickman18129242008-06-02 08:56:14 -0500777 if (blade == last_blade)
778 continue;
779 last_blade = blade;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500780 uv_init_blade(blade, node, cur_cpu);
781 cur_cpu += uv_blade_nr_possible_cpus(blade);
Cliff Wickman18129242008-06-02 08:56:14 -0500782 }
Cliff Wickman99dd8712008-08-19 12:51:59 -0500783 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
Cliff Wickman18129242008-06-02 08:56:14 -0500784 uv_enable_timeouts();
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200785
Cliff Wickman18129242008-06-02 08:56:14 -0500786 return 0;
787}
Cliff Wickman18129242008-06-02 08:56:14 -0500788__initcall(uv_bau_init);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500789__initcall(uv_ptc_init);