blob: aae15dd72604b10b559c10f9050dc22fde3c2f0a [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
5 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
11#include <linux/kernel.h>
12
Cliff Wickman18129242008-06-02 08:56:14 -050013#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020016#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050017#include <asm/uv/uv_bau.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020018#include <asm/genapic.h>
19#include <asm/idle.h>
Cliff Wickmanb194b122008-06-12 08:23:48 -050020#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050021#include <asm/irq_vectors.h>
Cliff Wickman18129242008-06-02 08:56:14 -050022
Cliff Wickmanb194b122008-06-12 08:23:48 -050023#include <mach_apic.h>
24
Ingo Molnarb4c286e2008-06-18 14:28:19 +020025static struct bau_control **uv_bau_table_bases __read_mostly;
26static int uv_bau_retry_limit __read_mostly;
27
28/* position of pnode (which is nasid>>1): */
29static int uv_nshift __read_mostly;
30
31static unsigned long uv_mmask __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -050032
Ingo Molnardc163a42008-06-18 14:15:43 +020033static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
34static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickman18129242008-06-02 08:56:14 -050035
36/*
37 * Free a software acknowledge hardware resource by clearing its Pending
38 * bit. This will return a reply to the sender.
39 * If the message has timed out, a reply has already been sent by the
40 * hardware but the resource has not been released. In that case our
41 * clear of the Timeout bit (as well) will free the resource. No reply will
42 * be sent (the hardware will only do one reply per message).
43 */
Cliff Wickmanb194b122008-06-12 08:23:48 -050044static void uv_reply_to_message(int resource,
Ingo Molnarb4c286e2008-06-18 14:28:19 +020045 struct bau_payload_queue_entry *msg,
46 struct bau_msg_status *msp)
Cliff Wickman18129242008-06-02 08:56:14 -050047{
Cliff Wickmanb194b122008-06-12 08:23:48 -050048 unsigned long dw;
Cliff Wickman18129242008-06-02 08:56:14 -050049
Cliff Wickmanb194b122008-06-12 08:23:48 -050050 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
Cliff Wickman18129242008-06-02 08:56:14 -050051 msg->replied_to = 1;
52 msg->sw_ack_vector = 0;
53 if (msp)
54 msp->seen_by.bits = 0;
Cliff Wickmanb194b122008-06-12 08:23:48 -050055 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
Cliff Wickman18129242008-06-02 08:56:14 -050056}
57
58/*
59 * Do all the things a cpu should do for a TLB shootdown message.
60 * Other cpu's may come here at the same time for this message.
61 */
Cliff Wickmanb194b122008-06-12 08:23:48 -050062static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
Ingo Molnarb4c286e2008-06-18 14:28:19 +020063 int msg_slot, int sw_ack_slot)
Cliff Wickman18129242008-06-02 08:56:14 -050064{
Cliff Wickman18129242008-06-02 08:56:14 -050065 unsigned long this_cpu_mask;
66 struct bau_msg_status *msp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +020067 int cpu;
Cliff Wickman18129242008-06-02 08:56:14 -050068
69 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
70 cpu = uv_blade_processor_id();
71 msg->number_of_cpus =
72 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
Ingo Molnardc163a42008-06-18 14:15:43 +020073 this_cpu_mask = 1UL << cpu;
Cliff Wickman18129242008-06-02 08:56:14 -050074 if (msp->seen_by.bits & this_cpu_mask)
75 return;
76 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
77
78 if (msg->replied_to == 1)
79 return;
80
81 if (msg->address == TLB_FLUSH_ALL) {
82 local_flush_tlb();
83 __get_cpu_var(ptcstats).alltlb++;
84 } else {
85 __flush_tlb_one(msg->address);
86 __get_cpu_var(ptcstats).onetlb++;
87 }
88
89 __get_cpu_var(ptcstats).requestee++;
90
91 atomic_inc_short(&msg->acknowledge_count);
92 if (msg->number_of_cpus == msg->acknowledge_count)
93 uv_reply_to_message(sw_ack_slot, msg, msp);
Ingo Molnardc163a42008-06-18 14:15:43 +020094}
95
96/*
97 * Examine the payload queue on one distribution node to see
98 * which messages have not been seen, and which cpu(s) have not seen them.
99 *
100 * Returns the number of cpu's that have not responded.
101 */
102static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
103{
Ingo Molnardc163a42008-06-18 14:15:43 +0200104 struct bau_payload_queue_entry *msg;
105 struct bau_msg_status *msp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200106 int count = 0;
107 int i;
108 int j;
Ingo Molnardc163a42008-06-18 14:15:43 +0200109
110 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
111 msg++, i++) {
112 if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
113 msp = bau_tablesp->msg_statuses + i;
114 printk(KERN_DEBUG
115 "blade %d: address:%#lx %d of %d, not cpu(s): ",
116 i, msg->address, msg->acknowledge_count,
117 msg->number_of_cpus);
118 for (j = 0; j < msg->number_of_cpus; j++) {
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200119 if (!((1L << j) & msp->seen_by.bits)) {
Ingo Molnardc163a42008-06-18 14:15:43 +0200120 count++;
121 printk("%d ", j);
122 }
123 }
124 printk("\n");
125 }
126 }
127 return count;
Cliff Wickman18129242008-06-02 08:56:14 -0500128}
129
130/*
131 * Examine the payload queue on all the distribution nodes to see
132 * which messages have not been seen, and which cpu(s) have not seen them.
133 *
134 * Returns the number of cpu's that have not responded.
135 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500136static int uv_examine_destinations(struct bau_target_nodemask *distribution)
Cliff Wickman18129242008-06-02 08:56:14 -0500137{
138 int sender;
139 int i;
Cliff Wickman18129242008-06-02 08:56:14 -0500140 int count = 0;
Cliff Wickman18129242008-06-02 08:56:14 -0500141
142 sender = smp_processor_id();
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200143 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500144 if (!bau_node_isset(i, distribution))
145 continue;
Ingo Molnardc163a42008-06-18 14:15:43 +0200146 count += uv_examine_destination(uv_bau_table_bases[i], sender);
Cliff Wickman18129242008-06-02 08:56:14 -0500147 }
148 return count;
149}
150
Cliff Wickmanb194b122008-06-12 08:23:48 -0500151/*
152 * wait for completion of a broadcast message
153 *
154 * return COMPLETE, RETRY or GIVEUP
155 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200156static int uv_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanb194b122008-06-12 08:23:48 -0500157 unsigned long mmr_offset, int right_shift)
158{
159 int exams = 0;
160 long destination_timeouts = 0;
161 long source_timeouts = 0;
162 unsigned long descriptor_status;
163
164 while ((descriptor_status = (((unsigned long)
165 uv_read_local_mmr(mmr_offset) >>
166 right_shift) & UV_ACT_STATUS_MASK)) !=
167 DESC_STATUS_IDLE) {
168 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
169 source_timeouts++;
170 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
171 source_timeouts = 0;
172 __get_cpu_var(ptcstats).s_retry++;
173 return FLUSH_RETRY;
174 }
175 /*
176 * spin here looking for progress at the destinations
177 */
178 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
179 destination_timeouts++;
180 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
181 /*
182 * returns number of cpus not responding
183 */
184 if (uv_examine_destinations
185 (&bau_desc->distribution) == 0) {
186 __get_cpu_var(ptcstats).d_retry++;
187 return FLUSH_RETRY;
188 }
189 exams++;
190 if (exams >= uv_bau_retry_limit) {
191 printk(KERN_DEBUG
192 "uv_flush_tlb_others");
193 printk("giving up on cpu %d\n",
194 smp_processor_id());
195 return FLUSH_GIVEUP;
196 }
197 /*
198 * delays can hang the simulator
199 udelay(1000);
200 */
201 destination_timeouts = 0;
202 }
203 }
204 }
205 return FLUSH_COMPLETE;
206}
207
208/**
209 * uv_flush_send_and_wait
210 *
211 * Send a broadcast and wait for a broadcast message to complete.
212 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900213 * The flush_mask contains the cpus the broadcast was sent to.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500214 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900215 * Returns NULL if all remote flushing was done. The mask is zeroed.
216 * Returns @flush_mask if some remote flushing remains to be done. The
217 * mask will have some bits still set.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500218 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900219const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
220 struct bau_desc *bau_desc,
221 struct cpumask *flush_mask)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500222{
223 int completion_status = 0;
224 int right_shift;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500225 int tries = 0;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200226 int blade;
227 int bit;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500228 unsigned long mmr_offset;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200229 unsigned long index;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500230 cycles_t time1;
231 cycles_t time2;
232
233 if (cpu < UV_CPUS_PER_ACT_STATUS) {
234 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
235 right_shift = cpu * UV_ACT_STATUS_SIZE;
236 } else {
237 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
238 right_shift =
239 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
240 }
241 time1 = get_cycles();
242 do {
243 tries++;
Ingo Molnardc163a42008-06-18 14:15:43 +0200244 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
245 cpu;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500246 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
247 completion_status = uv_wait_completion(bau_desc, mmr_offset,
248 right_shift);
249 } while (completion_status == FLUSH_RETRY);
250 time2 = get_cycles();
251 __get_cpu_var(ptcstats).sflush += (time2 - time1);
252 if (tries > 1)
253 __get_cpu_var(ptcstats).retriesok++;
254
255 if (completion_status == FLUSH_GIVEUP) {
256 /*
257 * Cause the caller to do an IPI-style TLB shootdown on
258 * the cpu's, all of which are still in the mask.
259 */
260 __get_cpu_var(ptcstats).ptc_i++;
261 return 0;
262 }
263
264 /*
265 * Success, so clear the remote cpu's from the mask so we don't
266 * use the IPI method of shootdown on them.
267 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900268 for_each_cpu(bit, flush_mask) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500269 blade = uv_cpu_to_blade_id(bit);
270 if (blade == this_blade)
271 continue;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900272 cpumask_clear_cpu(bit, flush_mask);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500273 }
Tejun Heobdbcdd42009-01-21 17:26:06 +0900274 if (!cpumask_empty(flush_mask))
275 return flush_mask;
276 return NULL;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500277}
278
Cliff Wickman18129242008-06-02 08:56:14 -0500279/**
280 * uv_flush_tlb_others - globally purge translation cache of a virtual
281 * address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +0900282 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -0500283 * @mm: mm_struct containing virtual address range
284 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900285 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -0500286 *
287 * This is the entry point for initiating any UV global TLB shootdown.
288 *
289 * Purges the translation caches of all specified processors of the given
290 * virtual address, or purges all TLB's on specified processors.
291 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900292 * The caller has derived the cpumask from the mm_struct. This function
293 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -0500294 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900295 * The cpumask is converted into a nodemask of the nodes containing
Cliff Wickman18129242008-06-02 08:56:14 -0500296 * the cpus.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500297 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900298 * Note that this function should be called with preemption disabled.
299 *
300 * Returns NULL if all remote flushing was done.
301 * Returns pointer to cpumask if some remote flushing remains to be
302 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -0500303 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900304const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
305 struct mm_struct *mm,
306 unsigned long va, unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -0500307{
Tejun Heobdbcdd42009-01-21 17:26:06 +0900308 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
309 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
Cliff Wickman18129242008-06-02 08:56:14 -0500310 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500311 int bit;
Cliff Wickman18129242008-06-02 08:56:14 -0500312 int blade;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900313 int uv_cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500314 int this_blade;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500315 int locals = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +0200316 struct bau_desc *bau_desc;
Cliff Wickman18129242008-06-02 08:56:14 -0500317
Tejun Heobdbcdd42009-01-21 17:26:06 +0900318 WARN_ON(!in_atomic());
319
320 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
321
322 uv_cpu = uv_blade_processor_id();
Cliff Wickman18129242008-06-02 08:56:14 -0500323 this_blade = uv_numa_blade_id();
324 bau_desc = __get_cpu_var(bau_control).descriptor_base;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900325 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500326
327 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
328
329 i = 0;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900330 for_each_cpu(bit, flush_mask) {
Cliff Wickman18129242008-06-02 08:56:14 -0500331 blade = uv_cpu_to_blade_id(bit);
Ingo Molnardc163a42008-06-18 14:15:43 +0200332 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
Cliff Wickmanb194b122008-06-12 08:23:48 -0500333 if (blade == this_blade) {
334 locals++;
Cliff Wickman18129242008-06-02 08:56:14 -0500335 continue;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500336 }
Cliff Wickman18129242008-06-02 08:56:14 -0500337 bau_node_set(blade, &bau_desc->distribution);
Cliff Wickman18129242008-06-02 08:56:14 -0500338 i++;
339 }
Cliff Wickmanb194b122008-06-12 08:23:48 -0500340 if (i == 0) {
341 /*
342 * no off_node flushing; return status for local node
343 */
344 if (locals)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900345 return flush_mask;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500346 else
Tejun Heobdbcdd42009-01-21 17:26:06 +0900347 return NULL;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500348 }
Cliff Wickman18129242008-06-02 08:56:14 -0500349 __get_cpu_var(ptcstats).requestor++;
350 __get_cpu_var(ptcstats).ntargeted += i;
351
352 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900353 bau_desc->payload.sending_cpu = cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500354
Tejun Heobdbcdd42009-01-21 17:26:06 +0900355 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
Cliff Wickman18129242008-06-02 08:56:14 -0500356}
357
358/*
359 * The BAU message interrupt comes here. (registered by set_intr_gate)
360 * See entry_64.S
361 *
362 * We received a broadcast assist message.
363 *
364 * Interrupts may have been disabled; this interrupt could represent
365 * the receipt of several messages.
366 *
367 * All cores/threads on this node get this interrupt.
368 * The last one to see it does the s/w ack.
369 * (the resource will not be freed until noninterruptable cpus see this
370 * interrupt; hardware will timeout the s/w ack and reply ERROR)
371 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500372void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -0500373{
Ingo Molnardc163a42008-06-18 14:15:43 +0200374 struct bau_payload_queue_entry *va_queue_first;
375 struct bau_payload_queue_entry *va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200376 struct bau_payload_queue_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500377 struct pt_regs *old_regs = set_irq_regs(regs);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200378 cycles_t time1;
379 cycles_t time2;
Cliff Wickman18129242008-06-02 08:56:14 -0500380 int msg_slot;
381 int sw_ack_slot;
382 int fw;
383 int count = 0;
384 unsigned long local_pnode;
385
386 ack_APIC_irq();
387 exit_idle();
388 irq_enter();
389
Cliff Wickmanb194b122008-06-12 08:23:48 -0500390 time1 = get_cycles();
Cliff Wickman18129242008-06-02 08:56:14 -0500391
392 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
393
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200394 va_queue_first = __get_cpu_var(bau_control).va_queue_first;
Ingo Molnardc163a42008-06-18 14:15:43 +0200395 va_queue_last = __get_cpu_var(bau_control).va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200396
Cliff Wickman18129242008-06-02 08:56:14 -0500397 msg = __get_cpu_var(bau_control).bau_msg_head;
398 while (msg->sw_ack_vector) {
399 count++;
400 fw = msg->sw_ack_vector;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200401 msg_slot = msg - va_queue_first;
Cliff Wickman18129242008-06-02 08:56:14 -0500402 sw_ack_slot = ffs(fw) - 1;
403
404 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
405
406 msg++;
Ingo Molnardc163a42008-06-18 14:15:43 +0200407 if (msg > va_queue_last)
408 msg = va_queue_first;
Cliff Wickman18129242008-06-02 08:56:14 -0500409 __get_cpu_var(bau_control).bau_msg_head = msg;
410 }
411 if (!count)
412 __get_cpu_var(ptcstats).nomsg++;
413 else if (count > 1)
414 __get_cpu_var(ptcstats).multmsg++;
415
Cliff Wickmanb194b122008-06-12 08:23:48 -0500416 time2 = get_cycles();
417 __get_cpu_var(ptcstats).dflush += (time2 - time1);
Cliff Wickman18129242008-06-02 08:56:14 -0500418
419 irq_exit();
420 set_irq_regs(old_regs);
Cliff Wickman18129242008-06-02 08:56:14 -0500421}
422
Cliff Wickmanb194b122008-06-12 08:23:48 -0500423static void uv_enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500424{
425 int i;
426 int blade;
427 int last_blade;
428 int pnode;
429 int cur_cpu = 0;
430 unsigned long apicid;
431
Cliff Wickman18129242008-06-02 08:56:14 -0500432 last_blade = -1;
433 for_each_online_node(i) {
434 blade = uv_node_to_blade_id(i);
435 if (blade == last_blade)
436 continue;
437 last_blade = blade;
438 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
439 pnode = uv_blade_to_pnode(blade);
440 cur_cpu += uv_blade_nr_possible_cpus(i);
441 }
Cliff Wickman18129242008-06-02 08:56:14 -0500442}
443
Cliff Wickmanb194b122008-06-12 08:23:48 -0500444static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500445{
446 if (*offset < num_possible_cpus())
447 return offset;
448 return NULL;
449}
450
Cliff Wickmanb194b122008-06-12 08:23:48 -0500451static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500452{
453 (*offset)++;
454 if (*offset < num_possible_cpus())
455 return offset;
456 return NULL;
457}
458
Cliff Wickmanb194b122008-06-12 08:23:48 -0500459static void uv_ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500460{
461}
462
463/*
464 * Display the statistics thru /proc
465 * data points to the cpu number
466 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500467static int uv_ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500468{
469 struct ptc_stats *stat;
470 int cpu;
471
472 cpu = *(loff_t *)data;
473
474 if (!cpu) {
475 seq_printf(file,
476 "# cpu requestor requestee one all sretry dretry ptc_i ");
477 seq_printf(file,
Cliff Wickmanb194b122008-06-12 08:23:48 -0500478 "sw_ack sflush dflush sok dnomsg dmult starget\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500479 }
480 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
481 stat = &per_cpu(ptcstats, cpu);
482 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
483 cpu, stat->requestor,
484 stat->requestee, stat->onetlb, stat->alltlb,
485 stat->s_retry, stat->d_retry, stat->ptc_i);
486 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
487 uv_read_global_mmr64(uv_blade_to_pnode
488 (uv_cpu_to_blade_id(cpu)),
489 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
Cliff Wickmanb194b122008-06-12 08:23:48 -0500490 stat->sflush, stat->dflush,
Cliff Wickman18129242008-06-02 08:56:14 -0500491 stat->retriesok, stat->nomsg,
492 stat->multmsg, stat->ntargeted);
493 }
494
495 return 0;
496}
497
498/*
499 * 0: display meaning of the statistics
500 * >0: retry limit
501 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500502static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200503 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500504{
505 long newmode;
506 char optstr[64];
507
Cliff Wickmane7eb8722008-06-23 08:32:25 -0500508 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -0500509 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -0500510 if (copy_from_user(optstr, user, count))
511 return -EFAULT;
512 optstr[count - 1] = '\0';
513 if (strict_strtoul(optstr, 10, &newmode) < 0) {
514 printk(KERN_DEBUG "%s is invalid\n", optstr);
515 return -EINVAL;
516 }
517
518 if (newmode == 0) {
519 printk(KERN_DEBUG "# cpu: cpu number\n");
520 printk(KERN_DEBUG
521 "requestor: times this cpu was the flush requestor\n");
522 printk(KERN_DEBUG
523 "requestee: times this cpu was requested to flush its TLBs\n");
524 printk(KERN_DEBUG
525 "one: times requested to flush a single address\n");
526 printk(KERN_DEBUG
527 "all: times requested to flush all TLB's\n");
528 printk(KERN_DEBUG
529 "sretry: number of retries of source-side timeouts\n");
530 printk(KERN_DEBUG
531 "dretry: number of retries of destination-side timeouts\n");
532 printk(KERN_DEBUG
533 "ptc_i: times UV fell through to IPI-style flushes\n");
534 printk(KERN_DEBUG
535 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
536 printk(KERN_DEBUG
Cliff Wickmanb194b122008-06-12 08:23:48 -0500537 "sflush_us: cycles spent in uv_flush_tlb_others()\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500538 printk(KERN_DEBUG
Cliff Wickmanb194b122008-06-12 08:23:48 -0500539 "dflush_us: cycles spent in handling flush requests\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500540 printk(KERN_DEBUG "sok: successes on retry\n");
541 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
542 printk(KERN_DEBUG
543 "dmult: interrupts with multiple messages\n");
544 printk(KERN_DEBUG "starget: nodes targeted\n");
545 } else {
546 uv_bau_retry_limit = newmode;
547 printk(KERN_DEBUG "timeout retry limit:%d\n",
548 uv_bau_retry_limit);
549 }
550
551 return count;
552}
553
554static const struct seq_operations uv_ptc_seq_ops = {
Ingo Molnardc163a42008-06-18 14:15:43 +0200555 .start = uv_ptc_seq_start,
556 .next = uv_ptc_seq_next,
557 .stop = uv_ptc_seq_stop,
558 .show = uv_ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -0500559};
560
Cliff Wickmanb194b122008-06-12 08:23:48 -0500561static int uv_ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -0500562{
563 return seq_open(file, &uv_ptc_seq_ops);
564}
565
566static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500567 .open = uv_ptc_proc_open,
568 .read = seq_read,
569 .write = uv_ptc_proc_write,
570 .llseek = seq_lseek,
571 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -0500572};
573
Cliff Wickmanb194b122008-06-12 08:23:48 -0500574static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500575{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500576 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -0500577
578 if (!is_uv_system())
579 return 0;
580
Cliff Wickman18129242008-06-02 08:56:14 -0500581 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
582 if (!proc_uv_ptc) {
583 printk(KERN_ERR "unable to create %s proc entry\n",
584 UV_PTC_BASENAME);
585 return -EINVAL;
586 }
587 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
588 return 0;
589}
590
Cliff Wickmanb194b122008-06-12 08:23:48 -0500591/*
592 * begin the initialization of the per-blade control structures
593 */
594static struct bau_control * __init uv_table_bases_init(int blade, int node)
Cliff Wickman18129242008-06-02 08:56:14 -0500595{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500596 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500597 struct bau_msg_status *msp;
Ingo Molnardc163a42008-06-18 14:15:43 +0200598 struct bau_control *bau_tabp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500599
Ingo Molnardc163a42008-06-18 14:15:43 +0200600 bau_tabp =
Cliff Wickmanb194b122008-06-12 08:23:48 -0500601 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +0200602 BUG_ON(!bau_tabp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200603
Ingo Molnardc163a42008-06-18 14:15:43 +0200604 bau_tabp->msg_statuses =
Cliff Wickmanb194b122008-06-12 08:23:48 -0500605 kmalloc_node(sizeof(struct bau_msg_status) *
Ingo Molnardc163a42008-06-18 14:15:43 +0200606 DEST_Q_SIZE, GFP_KERNEL, node);
607 BUG_ON(!bau_tabp->msg_statuses);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200608
Ingo Molnardc163a42008-06-18 14:15:43 +0200609 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500610 bau_cpubits_clear(&msp->seen_by, (int)
611 uv_blade_nr_possible_cpus(blade));
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200612
Ingo Molnardc163a42008-06-18 14:15:43 +0200613 uv_bau_table_bases[blade] = bau_tabp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200614
Ingo Molnard4005242008-06-18 14:51:57 +0200615 return bau_tabp;
Cliff Wickman18129242008-06-02 08:56:14 -0500616}
617
Cliff Wickmanb194b122008-06-12 08:23:48 -0500618/*
619 * finish the initialization of the per-blade control structures
620 */
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200621static void __init
622uv_table_bases_finish(int blade, int node, int cur_cpu,
623 struct bau_control *bau_tablesp,
624 struct bau_desc *adp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500625{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500626 struct bau_control *bcp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200627 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500628
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200629 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500630 bcp = (struct bau_control *)&per_cpu(bau_control, i);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200631
632 bcp->bau_msg_head = bau_tablesp->va_queue_first;
633 bcp->va_queue_first = bau_tablesp->va_queue_first;
634 bcp->va_queue_last = bau_tablesp->va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200635 bcp->msg_statuses = bau_tablesp->msg_statuses;
636 bcp->descriptor_base = adp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500637 }
638}
639
640/*
641 * initialize the sending side's sending buffers
642 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200643static struct bau_desc * __init
Cliff Wickmanb194b122008-06-12 08:23:48 -0500644uv_activation_descriptor_init(int node, int pnode)
645{
646 int i;
647 unsigned long pa;
648 unsigned long m;
649 unsigned long n;
650 unsigned long mmr_image;
Ingo Molnardc163a42008-06-18 14:15:43 +0200651 struct bau_desc *adp;
652 struct bau_desc *ad2;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500653
Ingo Molnardc163a42008-06-18 14:15:43 +0200654 adp = (struct bau_desc *)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500655 kmalloc_node(16384, GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +0200656 BUG_ON(!adp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200657
Cliff Wickmanb194b122008-06-12 08:23:48 -0500658 pa = __pa((unsigned long)adp);
659 n = pa >> uv_nshift;
660 m = pa & uv_mmask;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200661
Cliff Wickmanb194b122008-06-12 08:23:48 -0500662 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200663 if (mmr_image) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500664 uv_write_global_mmr64(pnode, (unsigned long)
665 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
666 (n << UV_DESC_BASE_PNODE_SHIFT | m));
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200667 }
668
Cliff Wickmanb194b122008-06-12 08:23:48 -0500669 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
Ingo Molnardc163a42008-06-18 14:15:43 +0200670 memset(ad2, 0, sizeof(struct bau_desc));
Cliff Wickmanb194b122008-06-12 08:23:48 -0500671 ad2->header.sw_ack_flag = 1;
672 ad2->header.base_dest_nodeid =
673 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
674 ad2->header.command = UV_NET_ENDPOINT_INTD;
675 ad2->header.int_both = 1;
676 /*
677 * all others need to be set to zero:
678 * fairness chaining multilevel count replied_to
679 */
680 }
681 return adp;
682}
683
684/*
685 * initialize the destination side's receiving buffers
686 */
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200687static struct bau_payload_queue_entry * __init
688uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500689{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500690 struct bau_payload_queue_entry *pqp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200691 char *cp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500692
Ingo Molnardc163a42008-06-18 14:15:43 +0200693 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
694 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
695 GFP_KERNEL, node);
696 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200697
Cliff Wickmanb194b122008-06-12 08:23:48 -0500698 cp = (char *)pqp + 31;
699 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
700 bau_tablesp->va_queue_first = pqp;
701 uv_write_global_mmr64(pnode,
702 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
703 ((unsigned long)pnode <<
704 UV_PAYLOADQ_PNODE_SHIFT) |
705 uv_physnodeaddr(pqp));
706 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
707 uv_physnodeaddr(pqp));
Ingo Molnardc163a42008-06-18 14:15:43 +0200708 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500709 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
710 (unsigned long)
711 uv_physnodeaddr(bau_tablesp->va_queue_last));
Ingo Molnardc163a42008-06-18 14:15:43 +0200712 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200713
Cliff Wickmanb194b122008-06-12 08:23:48 -0500714 return pqp;
715}
716
717/*
718 * Initialization of each UV blade's structures
719 */
720static int __init uv_init_blade(int blade, int node, int cur_cpu)
721{
722 int pnode;
723 unsigned long pa;
724 unsigned long apicid;
Ingo Molnardc163a42008-06-18 14:15:43 +0200725 struct bau_desc *adp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500726 struct bau_payload_queue_entry *pqp;
727 struct bau_control *bau_tablesp;
728
729 bau_tablesp = uv_table_bases_init(blade, node);
730 pnode = uv_blade_to_pnode(blade);
731 adp = uv_activation_descriptor_init(node, pnode);
732 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
733 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp);
734 /*
735 * the below initialization can't be in firmware because the
736 * messaging IRQ will be determined by the OS
737 */
738 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
739 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
740 if ((pa & 0xff) != UV_BAU_MESSAGE) {
741 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
742 ((apicid << 32) | UV_BAU_MESSAGE));
743 }
744 return 0;
745}
Cliff Wickman18129242008-06-02 08:56:14 -0500746
747/*
748 * Initialization of BAU-related structures
749 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500750static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500751{
Cliff Wickman18129242008-06-02 08:56:14 -0500752 int blade;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500753 int node;
Cliff Wickman18129242008-06-02 08:56:14 -0500754 int nblades;
Cliff Wickman18129242008-06-02 08:56:14 -0500755 int last_blade;
756 int cur_cpu = 0;
Cliff Wickman18129242008-06-02 08:56:14 -0500757
758 if (!is_uv_system())
759 return 0;
760
761 uv_bau_retry_limit = 1;
Cliff Wickman18129242008-06-02 08:56:14 -0500762 uv_nshift = uv_hub_info->n_val;
Ingo Molnardc163a42008-06-18 14:15:43 +0200763 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
Cliff Wickman18129242008-06-02 08:56:14 -0500764 nblades = 0;
765 last_blade = -1;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500766 for_each_online_node(node) {
767 blade = uv_node_to_blade_id(node);
Cliff Wickman18129242008-06-02 08:56:14 -0500768 if (blade == last_blade)
769 continue;
770 last_blade = blade;
771 nblades++;
772 }
Cliff Wickman18129242008-06-02 08:56:14 -0500773 uv_bau_table_bases = (struct bau_control **)
774 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
Ingo Molnardc163a42008-06-18 14:15:43 +0200775 BUG_ON(!uv_bau_table_bases);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200776
Cliff Wickman18129242008-06-02 08:56:14 -0500777 last_blade = -1;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500778 for_each_online_node(node) {
779 blade = uv_node_to_blade_id(node);
Cliff Wickman18129242008-06-02 08:56:14 -0500780 if (blade == last_blade)
781 continue;
782 last_blade = blade;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500783 uv_init_blade(blade, node, cur_cpu);
784 cur_cpu += uv_blade_nr_possible_cpus(blade);
Cliff Wickman18129242008-06-02 08:56:14 -0500785 }
Cliff Wickman99dd8712008-08-19 12:51:59 -0500786 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
Cliff Wickman18129242008-06-02 08:56:14 -0500787 uv_enable_timeouts();
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200788
Cliff Wickman18129242008-06-02 08:56:14 -0500789 return 0;
790}
Cliff Wickman18129242008-06-02 08:56:14 -0500791__initcall(uv_bau_init);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500792__initcall(uv_ptc_init);