blob: a1615058fad3ae59b955ff234b071f4fcbf4fc10 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05004 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
Cliff Wickman18129242008-06-02 08:56:14 -05005 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050011#include <linux/debugfs.h>
Cliff Wickman18129242008-06-02 08:56:14 -050012#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Cliff Wickman18129242008-06-02 08:56:14 -050014
Cliff Wickman18129242008-06-02 08:56:14 -050015#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090016#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050017#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020018#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050019#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010020#include <asm/apic.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020021#include <asm/idle.h>
Cliff Wickmanb194b122008-06-12 08:23:48 -050022#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050023#include <asm/irq_vectors.h>
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050024#include <asm/timer.h>
Cliff Wickman18129242008-06-02 08:56:14 -050025
Cliff Wickman12a66112010-06-02 16:22:01 -050026/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
27static int timeout_base_ns[] = {
28 20,
29 160,
30 1280,
31 10240,
32 81920,
33 655360,
34 5242880,
35 167772160
36};
37static int timeout_us;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050038static int nobau;
Cliff Wickman50fb55a2010-06-02 16:22:02 -050039static int baudisabled;
40static spinlock_t disable_lock;
41static cycles_t congested_cycles;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -050042
43/* tunables: */
44static int max_bau_concurrent = MAX_BAU_CONCURRENT;
45static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
46static int plugged_delay = PLUGGED_DELAY;
47static int plugsb4reset = PLUGSB4RESET;
48static int timeoutsb4reset = TIMEOUTSB4RESET;
49static int ipi_reset_limit = IPI_RESET_LIMIT;
50static int complete_threshold = COMPLETE_THRESHOLD;
51static int congested_response_us = CONGESTED_RESPONSE_US;
52static int congested_reps = CONGESTED_REPS;
53static int congested_period = CONGESTED_PERIOD;
54static struct dentry *tunables_dir;
55static struct dentry *tunables_file;
56
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050057static int __init setup_nobau(char *arg)
58{
59 nobau = 1;
60 return 0;
61}
62early_param("nobau", setup_nobau);
Ingo Molnarb4c286e2008-06-18 14:28:19 +020063
Cliff Wickman94ca8e42009-04-14 10:56:48 -050064/* base pnode in this partition */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050065static int uv_partition_base_pnode __read_mostly;
66/* position of pnode (which is nasid>>1): */
67static int uv_nshift __read_mostly;
68static unsigned long uv_mmask __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -050069
Ingo Molnardc163a42008-06-18 14:15:43 +020070static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
71static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050072static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
73
Cliff Wickman18129242008-06-02 08:56:14 -050074/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050075 * Determine the first node on a uvhub. 'Nodes' are used for kernel
76 * memory allocation.
Cliff Wickman9674f352009-04-03 08:34:05 -050077 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050078static int __init uvhub_to_first_node(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -050079{
80 int node, b;
81
82 for_each_online_node(node) {
83 b = uv_node_to_blade_id(node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050084 if (uvhub == b)
Cliff Wickman9674f352009-04-03 08:34:05 -050085 return node;
86 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050087 return -1;
Cliff Wickman9674f352009-04-03 08:34:05 -050088}
89
90/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050091 * Determine the apicid of the first cpu on a uvhub.
Cliff Wickman9674f352009-04-03 08:34:05 -050092 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050093static int __init uvhub_to_first_apicid(int uvhub)
Cliff Wickman9674f352009-04-03 08:34:05 -050094{
95 int cpu;
96
97 for_each_present_cpu(cpu)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -050098 if (uvhub == uv_cpu_to_blade_id(cpu))
Cliff Wickman9674f352009-04-03 08:34:05 -050099 return per_cpu(x86_cpu_to_apicid, cpu);
100 return -1;
101}
102
103/*
Cliff Wickman18129242008-06-02 08:56:14 -0500104 * Free a software acknowledge hardware resource by clearing its Pending
105 * bit. This will return a reply to the sender.
106 * If the message has timed out, a reply has already been sent by the
107 * hardware but the resource has not been released. In that case our
108 * clear of the Timeout bit (as well) will free the resource. No reply will
109 * be sent (the hardware will only do one reply per message).
110 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500111static inline void uv_reply_to_message(struct msg_desc *mdp,
112 struct bau_control *bcp)
Cliff Wickman18129242008-06-02 08:56:14 -0500113{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500114 unsigned long dw;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500115 struct bau_payload_queue_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500116
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500117 msg = mdp->msg;
118 if (!msg->canceled) {
119 dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
120 msg->sw_ack_vector;
121 uv_write_local_mmr(
122 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
123 }
Cliff Wickman18129242008-06-02 08:56:14 -0500124 msg->replied_to = 1;
125 msg->sw_ack_vector = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500126}
127
128/*
129 * Process the receipt of a RETRY message
130 */
131static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
132 struct bau_control *bcp)
133{
134 int i;
135 int cancel_count = 0;
136 int slot2;
137 unsigned long msg_res;
138 unsigned long mmr = 0;
139 struct bau_payload_queue_entry *msg;
140 struct bau_payload_queue_entry *msg2;
141 struct ptc_stats *stat;
142
143 msg = mdp->msg;
Cliff Wickman712157a2010-06-02 16:22:02 -0500144 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500145 stat->d_retries++;
146 /*
147 * cancel any message from msg+1 to the retry itself
148 */
149 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
150 if (msg2 > mdp->va_queue_last)
151 msg2 = mdp->va_queue_first;
152 if (msg2 == msg)
153 break;
154
155 /* same conditions for cancellation as uv_do_reset */
156 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
157 (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
158 msg->sw_ack_vector) == 0) &&
159 (msg2->sending_cpu == msg->sending_cpu) &&
160 (msg2->msg_type != MSG_NOOP)) {
161 slot2 = msg2 - mdp->va_queue_first;
162 mmr = uv_read_local_mmr
163 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
Cliff Wickman39847e72010-06-02 16:22:02 -0500164 msg_res = msg2->sw_ack_vector;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500165 /*
166 * This is a message retry; clear the resources held
167 * by the previous message only if they timed out.
168 * If it has not timed out we have an unexpected
169 * situation to report.
170 */
Cliff Wickman39847e72010-06-02 16:22:02 -0500171 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500172 /*
173 * is the resource timed out?
174 * make everyone ignore the cancelled message.
175 */
176 msg2->canceled = 1;
177 stat->d_canceled++;
178 cancel_count++;
179 uv_write_local_mmr(
180 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
Cliff Wickman39847e72010-06-02 16:22:02 -0500181 (msg_res << UV_SW_ACK_NPENDING) |
182 msg_res);
183 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500184 }
185 }
186 if (!cancel_count)
187 stat->d_nocanceled++;
Cliff Wickman18129242008-06-02 08:56:14 -0500188}
189
190/*
191 * Do all the things a cpu should do for a TLB shootdown message.
192 * Other cpu's may come here at the same time for this message.
193 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500194static void uv_bau_process_message(struct msg_desc *mdp,
195 struct bau_control *bcp)
Cliff Wickman18129242008-06-02 08:56:14 -0500196{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500197 int msg_ack_count;
198 short socket_ack_count = 0;
199 struct ptc_stats *stat;
200 struct bau_payload_queue_entry *msg;
201 struct bau_control *smaster = bcp->socket_master;
Cliff Wickman18129242008-06-02 08:56:14 -0500202
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500203 /*
204 * This must be a normal message, or retry of a normal message
205 */
206 msg = mdp->msg;
Cliff Wickman712157a2010-06-02 16:22:02 -0500207 stat = bcp->statp;
Cliff Wickman18129242008-06-02 08:56:14 -0500208 if (msg->address == TLB_FLUSH_ALL) {
209 local_flush_tlb();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500210 stat->d_alltlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500211 } else {
212 __flush_tlb_one(msg->address);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500213 stat->d_onetlb++;
Cliff Wickman18129242008-06-02 08:56:14 -0500214 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500215 stat->d_requestee++;
Cliff Wickman18129242008-06-02 08:56:14 -0500216
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500217 /*
218 * One cpu on each uvhub has the additional job on a RETRY
219 * of releasing the resource held by the message that is
220 * being retried. That message is identified by sending
221 * cpu number.
222 */
223 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
224 uv_bau_process_retry_msg(mdp, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500225
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500226 /*
227 * This is a sw_ack message, so we have to reply to it.
228 * Count each responding cpu on the socket. This avoids
229 * pinging the count's cache line back and forth between
230 * the sockets.
231 */
232 socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
233 &smaster->socket_acknowledge_count[mdp->msg_slot]);
234 if (socket_ack_count == bcp->cpus_in_socket) {
235 /*
236 * Both sockets dump their completed count total into
237 * the message's count.
238 */
239 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
240 msg_ack_count = atomic_add_short_return(socket_ack_count,
241 (struct atomic_short *)&msg->acknowledge_count);
Ingo Molnardc163a42008-06-18 14:15:43 +0200242
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500243 if (msg_ack_count == bcp->cpus_in_uvhub) {
244 /*
245 * All cpus in uvhub saw it; reply
246 */
247 uv_reply_to_message(mdp, bcp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200248 }
249 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500250
251 return;
Cliff Wickman18129242008-06-02 08:56:14 -0500252}
253
254/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500255 * Determine the first cpu on a uvhub.
Cliff Wickman18129242008-06-02 08:56:14 -0500256 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500257static int uvhub_to_first_cpu(int uvhub)
Cliff Wickman18129242008-06-02 08:56:14 -0500258{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500259 int cpu;
260 for_each_present_cpu(cpu)
261 if (uvhub == uv_cpu_to_blade_id(cpu))
262 return cpu;
263 return -1;
Cliff Wickman18129242008-06-02 08:56:14 -0500264}
265
Cliff Wickmanb194b122008-06-12 08:23:48 -0500266/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500267 * Last resort when we get a large number of destination timeouts is
268 * to clear resources held by a given cpu.
269 * Do this with IPI so that all messages in the BAU message queue
270 * can be identified by their nonzero sw_ack_vector field.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500271 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500272 * This is entered for a single cpu on the uvhub.
273 * The sender want's this uvhub to free a specific message's
274 * sw_ack resources.
275 */
276static void
277uv_do_reset(void *ptr)
278{
279 int i;
280 int slot;
281 int count = 0;
282 unsigned long mmr;
283 unsigned long msg_res;
284 struct bau_control *bcp;
285 struct reset_args *rap;
286 struct bau_payload_queue_entry *msg;
287 struct ptc_stats *stat;
288
289 bcp = &per_cpu(bau_control, smp_processor_id());
290 rap = (struct reset_args *)ptr;
Cliff Wickman712157a2010-06-02 16:22:02 -0500291 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500292 stat->d_resets++;
293
294 /*
295 * We're looking for the given sender, and
296 * will free its sw_ack resource.
297 * If all cpu's finally responded after the timeout, its
298 * message 'replied_to' was set.
299 */
300 for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
301 /* uv_do_reset: same conditions for cancellation as
302 uv_bau_process_retry_msg() */
303 if ((msg->replied_to == 0) &&
304 (msg->canceled == 0) &&
305 (msg->sending_cpu == rap->sender) &&
306 (msg->sw_ack_vector) &&
307 (msg->msg_type != MSG_NOOP)) {
308 /*
309 * make everyone else ignore this message
310 */
311 msg->canceled = 1;
312 slot = msg - bcp->va_queue_first;
313 count++;
314 /*
315 * only reset the resource if it is still pending
316 */
317 mmr = uv_read_local_mmr
318 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
Cliff Wickman39847e72010-06-02 16:22:02 -0500319 msg_res = msg->sw_ack_vector;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500320 if (mmr & msg_res) {
321 stat->d_rcanceled++;
322 uv_write_local_mmr(
323 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
Cliff Wickman39847e72010-06-02 16:22:02 -0500324 (msg_res << UV_SW_ACK_NPENDING) |
325 msg_res);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500326 }
327 }
328 }
329 return;
330}
331
332/*
333 * Use IPI to get all target uvhubs to release resources held by
334 * a given sending cpu number.
335 */
336static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
337 int sender)
338{
339 int uvhub;
340 int cpu;
341 cpumask_t mask;
342 struct reset_args reset_args;
343
344 reset_args.sender = sender;
345
346 cpus_clear(mask);
347 /* find a single cpu for each uvhub in this distribution mask */
348 for (uvhub = 0;
349 uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
350 uvhub++) {
351 if (!bau_uvhub_isset(uvhub, distribution))
352 continue;
353 /* find a cpu for this uvhub */
354 cpu = uvhub_to_first_cpu(uvhub);
355 cpu_set(cpu, mask);
356 }
357 /* IPI all cpus; Preemption is already disabled */
358 smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
359 return;
360}
361
362static inline unsigned long
363cycles_2_us(unsigned long long cyc)
364{
365 unsigned long long ns;
366 unsigned long us;
367 ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
368 >> CYC2NS_SCALE_FACTOR;
369 us = ns / 1000;
370 return us;
371}
372
373/*
374 * wait for all cpus on this hub to finish their sends and go quiet
375 * leaves uvhub_quiesce set so that no new broadcasts are started by
376 * bau_flush_send_and_wait()
377 */
378static inline void
379quiesce_local_uvhub(struct bau_control *hmaster)
380{
381 atomic_add_short_return(1, (struct atomic_short *)
382 &hmaster->uvhub_quiesce);
383}
384
385/*
386 * mark this quiet-requestor as done
387 */
388static inline void
389end_uvhub_quiesce(struct bau_control *hmaster)
390{
391 atomic_add_short_return(-1, (struct atomic_short *)
392 &hmaster->uvhub_quiesce);
393}
394
395/*
396 * Wait for completion of a broadcast software ack message
397 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
Cliff Wickmanb194b122008-06-12 08:23:48 -0500398 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200399static int uv_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500400 unsigned long mmr_offset, int right_shift, int this_cpu,
401 struct bau_control *bcp, struct bau_control *smaster, long try)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500402{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500403 unsigned long descriptor_status;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500404 cycles_t ttime;
Cliff Wickman712157a2010-06-02 16:22:02 -0500405 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500406 struct bau_control *hmaster;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500407
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500408 hmaster = bcp->uvhub_master;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500409
410 /* spin on the status MMR, waiting for it to go idle */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500411 while ((descriptor_status = (((unsigned long)
412 uv_read_local_mmr(mmr_offset) >>
413 right_shift) & UV_ACT_STATUS_MASK)) !=
414 DESC_STATUS_IDLE) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500415 /*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500416 * Our software ack messages may be blocked because there are
417 * no swack resources available. As long as none of them
418 * has timed out hardware will NACK our message and its
419 * state will stay IDLE.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500420 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500421 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
422 stat->s_stimeout++;
423 return FLUSH_GIVEUP;
424 } else if (descriptor_status ==
425 DESC_STATUS_DESTINATION_TIMEOUT) {
426 stat->s_dtimeout++;
427 ttime = get_cycles();
428
429 /*
430 * Our retries may be blocked by all destination
431 * swack resources being consumed, and a timeout
432 * pending. In that case hardware returns the
433 * ERROR that looks like a destination timeout.
434 */
Cliff Wickman12a66112010-06-02 16:22:01 -0500435 if (cycles_2_us(ttime - bcp->send_message) <
436 timeout_us) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500437 bcp->conseccompletes = 0;
438 return FLUSH_RETRY_PLUGGED;
439 }
440
441 bcp->conseccompletes = 0;
442 return FLUSH_RETRY_TIMEOUT;
443 } else {
444 /*
445 * descriptor_status is still BUSY
446 */
447 cpu_relax();
Cliff Wickmanb194b122008-06-12 08:23:48 -0500448 }
449 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500450 bcp->conseccompletes++;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500451 return FLUSH_COMPLETE;
452}
453
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500454static inline cycles_t
455sec_2_cycles(unsigned long sec)
456{
457 unsigned long ns;
458 cycles_t cyc;
459
460 ns = sec * 1000000000;
461 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
462 return cyc;
463}
464
465/*
466 * conditionally add 1 to *v, unless *v is >= u
467 * return 0 if we cannot add 1 to *v because it is >= u
468 * return 1 if we can add 1 to *v because it is < u
469 * the add is atomic
470 *
471 * This is close to atomic_add_unless(), but this allows the 'u' value
472 * to be lowered below the current 'v'. atomic_add_unless can only stop
473 * on equal.
474 */
475static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
476{
477 spin_lock(lock);
478 if (atomic_read(v) >= u) {
479 spin_unlock(lock);
480 return 0;
481 }
482 atomic_inc(v);
483 spin_unlock(lock);
484 return 1;
485}
486
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500487/*
488 * Completions are taking a very long time due to a congested numalink
489 * network.
490 */
491static void
492disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
493{
494 int tcpu;
495 struct bau_control *tbcp;
496
497 /* let only one cpu do this disabling */
498 spin_lock(&disable_lock);
499 if (!baudisabled && bcp->period_requests &&
500 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
501 /* it becomes this cpu's job to turn on the use of the
502 BAU again */
503 baudisabled = 1;
504 bcp->set_bau_off = 1;
505 bcp->set_bau_on_time = get_cycles() +
506 sec_2_cycles(bcp->congested_period);
507 stat->s_bau_disabled++;
508 for_each_present_cpu(tcpu) {
509 tbcp = &per_cpu(bau_control, tcpu);
510 tbcp->baudisabled = 1;
511 }
512 }
513 spin_unlock(&disable_lock);
514}
515
Cliff Wickmanb194b122008-06-12 08:23:48 -0500516/**
517 * uv_flush_send_and_wait
518 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500519 * Send a broadcast and wait for it to complete.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500520 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500521 * The flush_mask contains the cpus the broadcast is to be sent to, plus
522 * cpus that are on the local uvhub.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500523 *
Cliff Wickman450a0072010-06-02 16:22:02 -0500524 * Returns 0 if all flushing represented in the mask was done.
525 * Returns 1 if it gives up entirely and the original cpu mask is to be
526 * returned to the kernel.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500527 */
Cliff Wickman450a0072010-06-02 16:22:02 -0500528int uv_flush_send_and_wait(struct bau_desc *bau_desc,
529 struct cpumask *flush_mask, struct bau_control *bcp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500530{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500531 int right_shift;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500532 int completion_status = 0;
533 int seq_number = 0;
534 long try = 0;
535 int cpu = bcp->uvhub_cpu;
536 int this_cpu = bcp->cpu;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500537 unsigned long mmr_offset;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200538 unsigned long index;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500539 cycles_t time1;
540 cycles_t time2;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500541 cycles_t elapsed;
Cliff Wickman712157a2010-06-02 16:22:02 -0500542 struct ptc_stats *stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500543 struct bau_control *smaster = bcp->socket_master;
544 struct bau_control *hmaster = bcp->uvhub_master;
545
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500546 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
547 &hmaster->active_descriptor_count,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500548 hmaster->max_bau_concurrent)) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500549 stat->s_throttles++;
550 do {
551 cpu_relax();
552 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
553 &hmaster->active_descriptor_count,
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500554 hmaster->max_bau_concurrent));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500555 }
556
557 while (hmaster->uvhub_quiesce)
558 cpu_relax();
Cliff Wickmanb194b122008-06-12 08:23:48 -0500559
560 if (cpu < UV_CPUS_PER_ACT_STATUS) {
561 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
562 right_shift = cpu * UV_ACT_STATUS_SIZE;
563 } else {
564 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
565 right_shift =
566 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
567 }
568 time1 = get_cycles();
569 do {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500570 if (try == 0) {
Cliff Wickman7fba1bc2010-06-02 16:22:02 -0500571 bau_desc->header.msg_type = MSG_REGULAR;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500572 seq_number = bcp->message_number++;
573 } else {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500574 bau_desc->header.msg_type = MSG_RETRY;
575 stat->s_retry_messages++;
576 }
577 bau_desc->header.sequence = seq_number;
Ingo Molnardc163a42008-06-18 14:15:43 +0200578 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500579 bcp->uvhub_cpu;
580 bcp->send_message = get_cycles();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500581 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500582 try++;
583 completion_status = uv_wait_completion(bau_desc, mmr_offset,
584 right_shift, this_cpu, bcp, smaster, try);
585
586 if (completion_status == FLUSH_RETRY_PLUGGED) {
587 /*
588 * Our retries may be blocked by all destination swack
589 * resources being consumed, and a timeout pending. In
590 * that case hardware immediately returns the ERROR
591 * that looks like a destination timeout.
592 */
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500593 udelay(bcp->plugged_delay);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500594 bcp->plugged_tries++;
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500595 if (bcp->plugged_tries >= bcp->plugsb4reset) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500596 bcp->plugged_tries = 0;
597 quiesce_local_uvhub(hmaster);
598 spin_lock(&hmaster->queue_lock);
599 uv_reset_with_ipi(&bau_desc->distribution,
600 this_cpu);
601 spin_unlock(&hmaster->queue_lock);
602 end_uvhub_quiesce(hmaster);
603 bcp->ipi_attempts++;
604 stat->s_resets_plug++;
605 }
606 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500607 hmaster->max_bau_concurrent = 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500608 bcp->timeout_tries++;
609 udelay(TIMEOUT_DELAY);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500610 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500611 bcp->timeout_tries = 0;
612 quiesce_local_uvhub(hmaster);
613 spin_lock(&hmaster->queue_lock);
614 uv_reset_with_ipi(&bau_desc->distribution,
615 this_cpu);
616 spin_unlock(&hmaster->queue_lock);
617 end_uvhub_quiesce(hmaster);
618 bcp->ipi_attempts++;
619 stat->s_resets_timeout++;
620 }
621 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500622 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500623 bcp->ipi_attempts = 0;
624 completion_status = FLUSH_GIVEUP;
625 break;
626 }
627 cpu_relax();
628 } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
629 (completion_status == FLUSH_RETRY_TIMEOUT));
630 time2 = get_cycles();
631
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500632 bcp->plugged_tries = 0;
633 bcp->timeout_tries = 0;
634
635 if ((completion_status == FLUSH_COMPLETE) &&
636 (bcp->conseccompletes > bcp->complete_threshold) &&
637 (hmaster->max_bau_concurrent <
638 hmaster->max_bau_concurrent_constant))
639 hmaster->max_bau_concurrent++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500640 while (hmaster->uvhub_quiesce)
641 cpu_relax();
642 atomic_dec(&hmaster->active_descriptor_count);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500643 if (time2 > time1) {
644 elapsed = time2 - time1;
645 stat->s_time += elapsed;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500646 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
647 bcp->period_requests++;
648 bcp->period_time += elapsed;
649 if ((elapsed > congested_cycles) &&
650 (bcp->period_requests > bcp->congested_reps)) {
651 disable_for_congestion(bcp, stat);
652 }
653 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500654 } else
Cliff Wickman450a0072010-06-02 16:22:02 -0500655 stat->s_requestor--;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500656 if (completion_status == FLUSH_COMPLETE && try > 1)
657 stat->s_retriesok++;
658 else if (completion_status == FLUSH_GIVEUP) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500659 stat->s_giveup++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500660 return 1;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500661 }
Cliff Wickman450a0072010-06-02 16:22:02 -0500662 return 0;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500663}
664
Cliff Wickman18129242008-06-02 08:56:14 -0500665/**
666 * uv_flush_tlb_others - globally purge translation cache of a virtual
667 * address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +0900668 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -0500669 * @mm: mm_struct containing virtual address range
670 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900671 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -0500672 *
673 * This is the entry point for initiating any UV global TLB shootdown.
674 *
675 * Purges the translation caches of all specified processors of the given
676 * virtual address, or purges all TLB's on specified processors.
677 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900678 * The caller has derived the cpumask from the mm_struct. This function
679 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -0500680 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500681 * The cpumask is converted into a uvhubmask of the uvhubs containing
682 * those cpus.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500683 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900684 * Note that this function should be called with preemption disabled.
685 *
686 * Returns NULL if all remote flushing was done.
687 * Returns pointer to cpumask if some remote flushing remains to be
688 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -0500689 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900690const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
691 struct mm_struct *mm,
692 unsigned long va, unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -0500693{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500694 int tcpu;
695 int uvhub;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500696 int locals = 0;
Cliff Wickman450a0072010-06-02 16:22:02 -0500697 int remotes = 0;
698 int hubs = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +0200699 struct bau_desc *bau_desc;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500700 struct cpumask *flush_mask;
701 struct ptc_stats *stat;
702 struct bau_control *bcp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500703 struct bau_control *tbcp;
Cliff Wickman18129242008-06-02 08:56:14 -0500704
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500705 /* kernel was booted 'nobau' */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500706 if (nobau)
707 return cpumask;
708
709 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman712157a2010-06-02 16:22:02 -0500710 stat = bcp->statp;
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500711
712 /* bau was disabled due to slow response */
713 if (bcp->baudisabled) {
714 /* the cpu that disabled it must re-enable it */
715 if (bcp->set_bau_off) {
716 if (get_cycles() >= bcp->set_bau_on_time) {
717 stat->s_bau_reenabled++;
718 baudisabled = 0;
719 for_each_present_cpu(tcpu) {
720 tbcp = &per_cpu(bau_control, tcpu);
721 tbcp->baudisabled = 0;
722 tbcp->period_requests = 0;
723 tbcp->period_time = 0;
724 }
725 }
726 }
727 return cpumask;
728 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500729
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500730 /*
731 * Each sending cpu has a per-cpu mask which it fills from the caller's
Cliff Wickman450a0072010-06-02 16:22:02 -0500732 * cpu mask. All cpus are converted to uvhubs and copied to the
733 * activation descriptor.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500734 */
735 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
Cliff Wickman450a0072010-06-02 16:22:02 -0500736 /* don't actually do a shootdown of the local cpu */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900737 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500738 if (cpu_isset(cpu, *cpumask))
Cliff Wickman450a0072010-06-02 16:22:02 -0500739 stat->s_ntargself++;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900740
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500741 bau_desc = bcp->descriptor_base;
742 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500743
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500744 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
Cliff Wickman450a0072010-06-02 16:22:02 -0500745
746 /* cpu statistics */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500747 for_each_cpu(tcpu, flush_mask) {
748 uvhub = uv_cpu_to_blade_id(tcpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500749 bau_uvhub_set(uvhub, &bau_desc->distribution);
Cliff Wickman450a0072010-06-02 16:22:02 -0500750 if (uvhub == bcp->uvhub)
751 locals++;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500752 else
Cliff Wickman450a0072010-06-02 16:22:02 -0500753 remotes++;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500754 }
Cliff Wickman450a0072010-06-02 16:22:02 -0500755 if ((locals + remotes) == 0)
756 return NULL;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500757 stat->s_requestor++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500758 stat->s_ntargcpu += remotes + locals;
759 stat->s_ntargremotes += remotes;
760 stat->s_ntarglocals += locals;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500761 remotes = bau_uvhub_weight(&bau_desc->distribution);
Cliff Wickman450a0072010-06-02 16:22:02 -0500762
763 /* uvhub statistics */
764 hubs = bau_uvhub_weight(&bau_desc->distribution);
765 if (locals) {
766 stat->s_ntarglocaluvhub++;
767 stat->s_ntargremoteuvhub += (hubs - 1);
768 } else
769 stat->s_ntargremoteuvhub += hubs;
770 stat->s_ntarguvhub += hubs;
771 if (hubs >= 16)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500772 stat->s_ntarguvhub16++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500773 else if (hubs >= 8)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500774 stat->s_ntarguvhub8++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500775 else if (hubs >= 4)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500776 stat->s_ntarguvhub4++;
Cliff Wickman450a0072010-06-02 16:22:02 -0500777 else if (hubs >= 2)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500778 stat->s_ntarguvhub2++;
779 else
780 stat->s_ntarguvhub1++;
Cliff Wickman18129242008-06-02 08:56:14 -0500781
782 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900783 bau_desc->payload.sending_cpu = cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500784
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500785 /*
Cliff Wickman450a0072010-06-02 16:22:02 -0500786 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
787 * or 1 if it gave up and the original cpumask should be returned.
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500788 */
Cliff Wickman450a0072010-06-02 16:22:02 -0500789 if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
790 return NULL;
791 else
792 return cpumask;
Cliff Wickman18129242008-06-02 08:56:14 -0500793}
794
795/*
796 * The BAU message interrupt comes here. (registered by set_intr_gate)
797 * See entry_64.S
798 *
799 * We received a broadcast assist message.
800 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500801 * Interrupts are disabled; this interrupt could represent
Cliff Wickman18129242008-06-02 08:56:14 -0500802 * the receipt of several messages.
803 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500804 * All cores/threads on this hub get this interrupt.
805 * The last one to see it does the software ack.
Cliff Wickman18129242008-06-02 08:56:14 -0500806 * (the resource will not be freed until noninterruptable cpus see this
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500807 * interrupt; hardware may timeout the s/w ack and reply ERROR)
Cliff Wickman18129242008-06-02 08:56:14 -0500808 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500809void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -0500810{
Cliff Wickman18129242008-06-02 08:56:14 -0500811 int count = 0;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500812 cycles_t time_start;
813 struct bau_payload_queue_entry *msg;
814 struct bau_control *bcp;
815 struct ptc_stats *stat;
816 struct msg_desc msgdesc;
Cliff Wickman18129242008-06-02 08:56:14 -0500817
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500818 time_start = get_cycles();
819 bcp = &per_cpu(bau_control, smp_processor_id());
Cliff Wickman712157a2010-06-02 16:22:02 -0500820 stat = bcp->statp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500821 msgdesc.va_queue_first = bcp->va_queue_first;
822 msgdesc.va_queue_last = bcp->va_queue_last;
823 msg = bcp->bau_msg_head;
Cliff Wickman18129242008-06-02 08:56:14 -0500824 while (msg->sw_ack_vector) {
825 count++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500826 msgdesc.msg_slot = msg - msgdesc.va_queue_first;
827 msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
828 msgdesc.msg = msg;
829 uv_bau_process_message(&msgdesc, bcp);
Cliff Wickman18129242008-06-02 08:56:14 -0500830 msg++;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500831 if (msg > msgdesc.va_queue_last)
832 msg = msgdesc.va_queue_first;
833 bcp->bau_msg_head = msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500834 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500835 stat->d_time += (get_cycles() - time_start);
Cliff Wickman18129242008-06-02 08:56:14 -0500836 if (!count)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500837 stat->d_nomsg++;
Cliff Wickman18129242008-06-02 08:56:14 -0500838 else if (count > 1)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500839 stat->d_multmsg++;
840 ack_APIC_irq();
Cliff Wickman18129242008-06-02 08:56:14 -0500841}
842
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500843/*
844 * uv_enable_timeouts
845 *
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500846 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500847 * shootdown message timeouts enabled. The timeout does not cause
848 * an interrupt, but causes an error message to be returned to
849 * the sender.
850 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500851static void uv_enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500852{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500853 int uvhub;
854 int nuvhubs;
Cliff Wickman18129242008-06-02 08:56:14 -0500855 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500856 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -0500857
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500858 nuvhubs = uv_num_possible_blades();
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500859
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500860 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
861 if (!uv_blade_nr_possible_cpus(uvhub))
Cliff Wickman18129242008-06-02 08:56:14 -0500862 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500863
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500864 pnode = uv_blade_to_pnode(uvhub);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500865 mmr_image =
866 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
867 /*
868 * Set the timeout period and then lock it in, in three
869 * steps; captures and locks in the period.
870 *
871 * To program the period, the SOFT_ACK_MODE must be off.
872 */
873 mmr_image &= ~((unsigned long)1 <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600874 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500875 uv_write_global_mmr64
876 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
877 /*
878 * Set the 4-bit period.
879 */
880 mmr_image &= ~((unsigned long)0xf <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600881 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500882 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600883 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500884 uv_write_global_mmr64
885 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
886 /*
887 * Subsequent reversals of the timebase bit (3) cause an
888 * immediate timeout of one or all INTD resources as
889 * indicated in bits 2:0 (7 causes all of them to timeout).
890 */
891 mmr_image |= ((unsigned long)1 <<
Jack Steiner6f4edd62010-03-10 14:44:58 -0600892 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500893 uv_write_global_mmr64
894 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -0500895 }
Cliff Wickman18129242008-06-02 08:56:14 -0500896}
897
Cliff Wickmanb194b122008-06-12 08:23:48 -0500898static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500899{
900 if (*offset < num_possible_cpus())
901 return offset;
902 return NULL;
903}
904
Cliff Wickmanb194b122008-06-12 08:23:48 -0500905static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500906{
907 (*offset)++;
908 if (*offset < num_possible_cpus())
909 return offset;
910 return NULL;
911}
912
Cliff Wickmanb194b122008-06-12 08:23:48 -0500913static void uv_ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500914{
915}
916
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500917static inline unsigned long long
Cliff Wickman12a66112010-06-02 16:22:01 -0500918microsec_2_cycles(unsigned long microsec)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500919{
920 unsigned long ns;
921 unsigned long long cyc;
922
Cliff Wickman12a66112010-06-02 16:22:01 -0500923 ns = microsec * 1000;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500924 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
925 return cyc;
926}
927
Cliff Wickman18129242008-06-02 08:56:14 -0500928/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500929 * Display the statistics thru /proc.
930 * 'data' points to the cpu number
Cliff Wickman18129242008-06-02 08:56:14 -0500931 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500932static int uv_ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500933{
934 struct ptc_stats *stat;
935 int cpu;
936
937 cpu = *(loff_t *)data;
938
939 if (!cpu) {
940 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -0500941 "# cpu sent stime self locals remotes ncpus localhub ");
Cliff Wickman18129242008-06-02 08:56:14 -0500942 seq_printf(file,
Cliff Wickman450a0072010-06-02 16:22:02 -0500943 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
944 seq_printf(file,
945 "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500946 seq_printf(file,
947 "retries rok resetp resett giveup sto bz throt ");
948 seq_printf(file,
949 "sw_ack recv rtime all ");
950 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500951 "one mult none retry canc nocan reset rcan ");
952 seq_printf(file,
953 "disable enable\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500954 }
955 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
956 stat = &per_cpu(ptcstats, cpu);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500957 /* source side statistics */
958 seq_printf(file,
959 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
960 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
Cliff Wickman450a0072010-06-02 16:22:02 -0500961 stat->s_ntargself, stat->s_ntarglocals,
962 stat->s_ntargremotes, stat->s_ntargcpu,
963 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
964 stat->s_ntarguvhub, stat->s_ntarguvhub16);
965 seq_printf(file, "%ld %ld %ld %ld %ld ",
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500966 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
967 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
Cliff Wickman450a0072010-06-02 16:22:02 -0500968 stat->s_dtimeout);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500969 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
970 stat->s_retry_messages, stat->s_retriesok,
971 stat->s_resets_plug, stat->s_resets_timeout,
972 stat->s_giveup, stat->s_stimeout,
973 stat->s_busy, stat->s_throttles);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500974
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500975 /* destination side statistics */
976 seq_printf(file,
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500977 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
Cliff Wickman9674f352009-04-03 08:34:05 -0500978 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
Cliff Wickman18129242008-06-02 08:56:14 -0500979 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -0500980 stat->d_requestee, cycles_2_us(stat->d_time),
981 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
982 stat->d_nomsg, stat->d_retries, stat->d_canceled,
983 stat->d_nocanceled, stat->d_resets,
984 stat->d_rcanceled);
Cliff Wickman50fb55a2010-06-02 16:22:02 -0500985 seq_printf(file, "%ld %ld\n",
986 stat->s_bau_disabled, stat->s_bau_reenabled);
Cliff Wickman18129242008-06-02 08:56:14 -0500987 }
988
989 return 0;
990}
991
992/*
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -0500993 * Display the tunables thru debugfs
994 */
995static ssize_t tunables_read(struct file *file, char __user *userbuf,
996 size_t count, loff_t *ppos)
997{
998 char buf[300];
999 int ret;
1000
1001 ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1002 "max_bau_concurrent plugged_delay plugsb4reset",
1003 "timeoutsb4reset ipi_reset_limit complete_threshold",
1004 "congested_response_us congested_reps congested_period",
1005 max_bau_concurrent, plugged_delay, plugsb4reset,
1006 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1007 congested_response_us, congested_reps, congested_period);
1008
1009 return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
1010}
1011
1012/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001013 * -1: resetf the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001014 * 0: display meaning of the statistics
Cliff Wickman18129242008-06-02 08:56:14 -05001015 */
Cliff Wickmanb194b122008-06-12 08:23:48 -05001016static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001017 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -05001018{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001019 int cpu;
1020 long input_arg;
Cliff Wickman18129242008-06-02 08:56:14 -05001021 char optstr[64];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001022 struct ptc_stats *stat;
Cliff Wickman18129242008-06-02 08:56:14 -05001023
Cliff Wickmane7eb8722008-06-23 08:32:25 -05001024 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -05001025 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -05001026 if (copy_from_user(optstr, user, count))
1027 return -EFAULT;
1028 optstr[count - 1] = '\0';
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001029 if (strict_strtol(optstr, 10, &input_arg) < 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001030 printk(KERN_DEBUG "%s is invalid\n", optstr);
1031 return -EINVAL;
1032 }
1033
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001034 if (input_arg == 0) {
Cliff Wickman18129242008-06-02 08:56:14 -05001035 printk(KERN_DEBUG "# cpu: cpu number\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001036 printk(KERN_DEBUG "Sender statistics:\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001037 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001038 "sent: number of shootdown messages sent\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001039 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001040 "stime: time spent sending messages\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001041 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001042 "numuvhubs: number of hubs targeted with shootdown\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001043 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001044 "numuvhubs16: number times 16 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001045 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001046 "numuvhubs8: number times 8 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001047 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001048 "numuvhubs4: number times 4 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001049 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001050 "numuvhubs2: number times 2 or more hubs targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001051 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001052 "numuvhubs1: number times 1 hub targeted\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001053 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001054 "numcpus: number of cpus targeted with shootdown\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001055 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001056 "dto: number of destination timeouts\n");
Cliff Wickman18129242008-06-02 08:56:14 -05001057 printk(KERN_DEBUG
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001058 "retries: destination timeout retries sent\n");
1059 printk(KERN_DEBUG
1060 "rok: : destination timeouts successfully retried\n");
1061 printk(KERN_DEBUG
1062 "resetp: ipi-style resource resets for plugs\n");
1063 printk(KERN_DEBUG
1064 "resett: ipi-style resource resets for timeouts\n");
1065 printk(KERN_DEBUG
1066 "giveup: fall-backs to ipi-style shootdowns\n");
1067 printk(KERN_DEBUG
1068 "sto: number of source timeouts\n");
1069 printk(KERN_DEBUG
1070 "bz: number of stay-busy's\n");
1071 printk(KERN_DEBUG
1072 "throt: number times spun in throttle\n");
1073 printk(KERN_DEBUG "Destination side statistics:\n");
1074 printk(KERN_DEBUG
1075 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
1076 printk(KERN_DEBUG
1077 "recv: shootdown messages received\n");
1078 printk(KERN_DEBUG
1079 "rtime: time spent processing messages\n");
1080 printk(KERN_DEBUG
1081 "all: shootdown all-tlb messages\n");
1082 printk(KERN_DEBUG
1083 "one: shootdown one-tlb messages\n");
1084 printk(KERN_DEBUG
1085 "mult: interrupts that found multiple messages\n");
1086 printk(KERN_DEBUG
1087 "none: interrupts that found no messages\n");
1088 printk(KERN_DEBUG
1089 "retry: number of retry messages processed\n");
1090 printk(KERN_DEBUG
1091 "canc: number messages canceled by retries\n");
1092 printk(KERN_DEBUG
1093 "nocan: number retries that found nothing to cancel\n");
1094 printk(KERN_DEBUG
1095 "reset: number of ipi-style reset requests processed\n");
1096 printk(KERN_DEBUG
1097 "rcan: number messages canceled by reset requests\n");
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001098 printk(KERN_DEBUG
1099 "disable: number times use of the BAU was disabled\n");
1100 printk(KERN_DEBUG
1101 "enable: number times use of the BAU was re-enabled\n");
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001102 } else if (input_arg == -1) {
1103 for_each_present_cpu(cpu) {
1104 stat = &per_cpu(ptcstats, cpu);
1105 memset(stat, 0, sizeof(struct ptc_stats));
1106 }
Cliff Wickman18129242008-06-02 08:56:14 -05001107 }
1108
1109 return count;
1110}
1111
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001112static int local_atoi(const char *name)
1113{
1114 int val = 0;
1115
1116 for (;; name++) {
1117 switch (*name) {
1118 case '0' ... '9':
1119 val = 10*val+(*name-'0');
1120 break;
1121 default:
1122 return val;
1123 }
1124 }
1125}
1126
1127/*
1128 * set the tunables
1129 * 0 values reset them to defaults
1130 */
1131static ssize_t tunables_write(struct file *file, const char __user *user,
1132 size_t count, loff_t *data)
1133{
1134 int cpu;
1135 int cnt = 0;
1136 int val;
1137 char *p;
1138 char *q;
1139 char instr[64];
1140 struct bau_control *bcp;
1141
1142 if (count == 0 || count > sizeof(instr)-1)
1143 return -EINVAL;
1144 if (copy_from_user(instr, user, count))
1145 return -EFAULT;
1146
1147 instr[count] = '\0';
1148 /* count the fields */
1149 p = instr + strspn(instr, WHITESPACE);
1150 q = p;
1151 for (; *p; p = q + strspn(q, WHITESPACE)) {
1152 q = p + strcspn(p, WHITESPACE);
1153 cnt++;
1154 if (q == p)
1155 break;
1156 }
1157 if (cnt != 9) {
1158 printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
1159 return -EINVAL;
1160 }
1161
1162 p = instr + strspn(instr, WHITESPACE);
1163 q = p;
1164 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1165 q = p + strcspn(p, WHITESPACE);
1166 val = local_atoi(p);
1167 switch (cnt) {
1168 case 0:
1169 if (val == 0) {
1170 max_bau_concurrent = MAX_BAU_CONCURRENT;
1171 max_bau_concurrent_constant =
1172 MAX_BAU_CONCURRENT;
1173 continue;
1174 }
1175 bcp = &per_cpu(bau_control, smp_processor_id());
1176 if (val < 1 || val > bcp->cpus_in_uvhub) {
1177 printk(KERN_DEBUG
1178 "Error: BAU max concurrent %d is invalid\n",
1179 val);
1180 return -EINVAL;
1181 }
1182 max_bau_concurrent = val;
1183 max_bau_concurrent_constant = val;
1184 continue;
1185 case 1:
1186 if (val == 0)
1187 plugged_delay = PLUGGED_DELAY;
1188 else
1189 plugged_delay = val;
1190 continue;
1191 case 2:
1192 if (val == 0)
1193 plugsb4reset = PLUGSB4RESET;
1194 else
1195 plugsb4reset = val;
1196 continue;
1197 case 3:
1198 if (val == 0)
1199 timeoutsb4reset = TIMEOUTSB4RESET;
1200 else
1201 timeoutsb4reset = val;
1202 continue;
1203 case 4:
1204 if (val == 0)
1205 ipi_reset_limit = IPI_RESET_LIMIT;
1206 else
1207 ipi_reset_limit = val;
1208 continue;
1209 case 5:
1210 if (val == 0)
1211 complete_threshold = COMPLETE_THRESHOLD;
1212 else
1213 complete_threshold = val;
1214 continue;
1215 case 6:
1216 if (val == 0)
1217 congested_response_us = CONGESTED_RESPONSE_US;
1218 else
1219 congested_response_us = val;
1220 continue;
1221 case 7:
1222 if (val == 0)
1223 congested_reps = CONGESTED_REPS;
1224 else
1225 congested_reps = val;
1226 continue;
1227 case 8:
1228 if (val == 0)
1229 congested_period = CONGESTED_PERIOD;
1230 else
1231 congested_period = val;
1232 continue;
1233 }
1234 if (q == p)
1235 break;
1236 }
1237 for_each_present_cpu(cpu) {
1238 bcp = &per_cpu(bau_control, cpu);
1239 bcp->max_bau_concurrent = max_bau_concurrent;
1240 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1241 bcp->plugged_delay = plugged_delay;
1242 bcp->plugsb4reset = plugsb4reset;
1243 bcp->timeoutsb4reset = timeoutsb4reset;
1244 bcp->ipi_reset_limit = ipi_reset_limit;
1245 bcp->complete_threshold = complete_threshold;
1246 bcp->congested_response_us = congested_response_us;
1247 bcp->congested_reps = congested_reps;
1248 bcp->congested_period = congested_period;
1249 }
1250 return count;
1251}
1252
Cliff Wickman18129242008-06-02 08:56:14 -05001253static const struct seq_operations uv_ptc_seq_ops = {
Ingo Molnardc163a42008-06-18 14:15:43 +02001254 .start = uv_ptc_seq_start,
1255 .next = uv_ptc_seq_next,
1256 .stop = uv_ptc_seq_stop,
1257 .show = uv_ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -05001258};
1259
Cliff Wickmanb194b122008-06-12 08:23:48 -05001260static int uv_ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -05001261{
1262 return seq_open(file, &uv_ptc_seq_ops);
1263}
1264
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001265static int tunables_open(struct inode *inode, struct file *file)
1266{
1267 return 0;
1268}
1269
Cliff Wickman18129242008-06-02 08:56:14 -05001270static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanb194b122008-06-12 08:23:48 -05001271 .open = uv_ptc_proc_open,
1272 .read = seq_read,
1273 .write = uv_ptc_proc_write,
1274 .llseek = seq_lseek,
1275 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -05001276};
1277
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001278static const struct file_operations tunables_fops = {
1279 .open = tunables_open,
1280 .read = tunables_read,
1281 .write = tunables_write,
1282};
1283
Cliff Wickmanb194b122008-06-12 08:23:48 -05001284static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001285{
Cliff Wickmanb194b122008-06-12 08:23:48 -05001286 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -05001287
1288 if (!is_uv_system())
1289 return 0;
1290
Alexey Dobriyan10f02d112009-08-23 23:17:27 +04001291 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1292 &proc_uv_ptc_operations);
Cliff Wickman18129242008-06-02 08:56:14 -05001293 if (!proc_uv_ptc) {
1294 printk(KERN_ERR "unable to create %s proc entry\n",
1295 UV_PTC_BASENAME);
1296 return -EINVAL;
1297 }
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001298
1299 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1300 if (!tunables_dir) {
1301 printk(KERN_ERR "unable to create debugfs directory %s\n",
1302 UV_BAU_TUNABLES_DIR);
1303 return -EINVAL;
1304 }
1305 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1306 tunables_dir, NULL, &tunables_fops);
1307 if (!tunables_file) {
1308 printk(KERN_ERR "unable to create debugfs file %s\n",
1309 UV_BAU_TUNABLES_FILE);
1310 return -EINVAL;
1311 }
Cliff Wickman18129242008-06-02 08:56:14 -05001312 return 0;
1313}
1314
Cliff Wickmanb194b122008-06-12 08:23:48 -05001315/*
Cliff Wickmanb194b122008-06-12 08:23:48 -05001316 * initialize the sending side's sending buffers
1317 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001318static void
Cliff Wickmanb194b122008-06-12 08:23:48 -05001319uv_activation_descriptor_init(int node, int pnode)
1320{
1321 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001322 int cpu;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001323 unsigned long pa;
1324 unsigned long m;
1325 unsigned long n;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001326 struct bau_desc *bau_desc;
1327 struct bau_desc *bd2;
1328 struct bau_control *bcp;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001329
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001330 /*
1331 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001332 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001333 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001334 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001335 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001336 BUG_ON(!bau_desc);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001337
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001338 pa = uv_gpa(bau_desc); /* need the real nasid*/
1339 n = pa >> uv_nshift;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001340 m = pa & uv_mmask;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001341
Cliff Wickman9c26f522009-06-24 09:41:59 -05001342 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1343 (n << UV_DESC_BASE_PNODE_SHIFT | m));
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001344
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001345 /*
1346 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1347 * cpu even though we only use the first one; one descriptor can
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001348 * describe a broadcast to 256 uv hubs.
Cliff Wickman0e2595c2009-05-20 08:10:57 -05001349 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001350 for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
1351 i++, bd2++) {
1352 memset(bd2, 0, sizeof(struct bau_desc));
1353 bd2->header.sw_ack_flag = 1;
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001354 /*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001355 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
1356 * in the partition. The bit map will indicate uvhub numbers,
1357 * which are 0-N in a partition. Pnodes are unique system-wide.
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001358 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001359 bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
1360 bd2->header.dest_subnodeid = 0x10; /* the LB */
1361 bd2->header.command = UV_NET_ENDPOINT_INTD;
1362 bd2->header.int_both = 1;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001363 /*
1364 * all others need to be set to zero:
1365 * fairness chaining multilevel count replied_to
1366 */
1367 }
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001368 for_each_present_cpu(cpu) {
1369 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1370 continue;
1371 bcp = &per_cpu(bau_control, cpu);
1372 bcp->descriptor_base = bau_desc;
1373 }
Cliff Wickmanb194b122008-06-12 08:23:48 -05001374}
1375
1376/*
1377 * initialize the destination side's receiving buffers
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001378 * entered for each uvhub in the partition
1379 * - node is first node (kernel memory notion) on the uvhub
1380 * - pnode is the uvhub's physical identifier
Cliff Wickmanb194b122008-06-12 08:23:48 -05001381 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001382static void
1383uv_payload_queue_init(int node, int pnode)
Cliff Wickmanb194b122008-06-12 08:23:48 -05001384{
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001385 int pn;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001386 int cpu;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001387 char *cp;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001388 unsigned long pa;
1389 struct bau_payload_queue_entry *pqp;
1390 struct bau_payload_queue_entry *pqp_malloc;
1391 struct bau_control *bcp;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001392
Ingo Molnardc163a42008-06-18 14:15:43 +02001393 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
1394 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
1395 GFP_KERNEL, node);
1396 BUG_ON(!pqp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001397 pqp_malloc = pqp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001398
Cliff Wickmanb194b122008-06-12 08:23:48 -05001399 cp = (char *)pqp + 31;
1400 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001401
1402 for_each_present_cpu(cpu) {
1403 if (pnode != uv_cpu_to_pnode(cpu))
1404 continue;
1405 /* for every cpu on this pnode: */
1406 bcp = &per_cpu(bau_control, cpu);
1407 bcp->va_queue_first = pqp;
1408 bcp->bau_msg_head = pqp;
1409 bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
1410 }
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001411 /*
1412 * need the pnode of where the memory was really allocated
1413 */
1414 pa = uv_gpa(pqp);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001415 pn = pa >> uv_nshift;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001416 uv_write_global_mmr64(pnode,
1417 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
Cliff Wickman4ea3c512009-04-16 07:53:09 -05001418 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
Cliff Wickmanb194b122008-06-12 08:23:48 -05001419 uv_physnodeaddr(pqp));
1420 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
1421 uv_physnodeaddr(pqp));
Cliff Wickmanb194b122008-06-12 08:23:48 -05001422 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
1423 (unsigned long)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001424 uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
1425 /* in effect, all msg_type's are set to MSG_NOOP */
Ingo Molnardc163a42008-06-18 14:15:43 +02001426 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
Cliff Wickmanb194b122008-06-12 08:23:48 -05001427}
1428
1429/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001430 * Initialization of each UV hub's structures
Cliff Wickmanb194b122008-06-12 08:23:48 -05001431 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001432static void __init uv_init_uvhub(int uvhub, int vector)
Cliff Wickmanb194b122008-06-12 08:23:48 -05001433{
Cliff Wickman9674f352009-04-03 08:34:05 -05001434 int node;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001435 int pnode;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001436 unsigned long apicid;
Cliff Wickmanb194b122008-06-12 08:23:48 -05001437
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001438 node = uvhub_to_first_node(uvhub);
1439 pnode = uv_blade_to_pnode(uvhub);
1440 uv_activation_descriptor_init(node, pnode);
1441 uv_payload_queue_init(node, pnode);
Cliff Wickmanb194b122008-06-12 08:23:48 -05001442 /*
1443 * the below initialization can't be in firmware because the
1444 * messaging IRQ will be determined by the OS
1445 */
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001446 apicid = uvhub_to_first_apicid(uvhub);
Cliff Wickmane38e2af2009-11-19 17:12:43 -06001447 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001448 ((apicid << 32) | vector));
1449}
1450
1451/*
Cliff Wickman12a66112010-06-02 16:22:01 -05001452 * We will set BAU_MISC_CONTROL with a timeout period.
1453 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1454 * So the destination timeout period has be be calculated from them.
1455 */
1456static int
1457calculate_destination_timeout(void)
1458{
1459 unsigned long mmr_image;
1460 int mult1;
1461 int mult2;
1462 int index;
1463 int base;
1464 int ret;
1465 unsigned long ts_ns;
1466
1467 mult1 = UV_INTD_SOFT_ACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1468 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1469 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1470 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1471 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1472 base = timeout_base_ns[index];
1473 ts_ns = base * mult1 * mult2;
1474 ret = ts_ns / 1000;
1475 return ret;
1476}
1477
1478/*
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001479 * initialize the bau_control structure for each cpu
1480 */
1481static void uv_init_per_cpu(int nuvhubs)
1482{
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001483 int i;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001484 int cpu;
1485 int pnode;
1486 int uvhub;
1487 short socket = 0;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001488 unsigned short socket_mask;
1489 unsigned int uvhub_mask;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001490 struct bau_control *bcp;
1491 struct uvhub_desc *bdp;
1492 struct socket_desc *sdp;
1493 struct bau_control *hmaster = NULL;
1494 struct bau_control *smaster = NULL;
1495 struct socket_desc {
1496 short num_cpus;
1497 short cpu_number[16];
1498 };
1499 struct uvhub_desc {
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001500 unsigned short socket_mask;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001501 short num_cpus;
1502 short uvhub;
1503 short pnode;
1504 struct socket_desc socket[2];
1505 };
1506 struct uvhub_desc *uvhub_descs;
1507
Cliff Wickman12a66112010-06-02 16:22:01 -05001508 timeout_us = calculate_destination_timeout();
1509
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001510 uvhub_descs = (struct uvhub_desc *)
1511 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1512 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1513 for_each_present_cpu(cpu) {
1514 bcp = &per_cpu(bau_control, cpu);
1515 memset(bcp, 0, sizeof(struct bau_control));
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001516 pnode = uv_cpu_hub_info(cpu)->pnode;
1517 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001518 uvhub_mask |= (1 << uvhub);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001519 bdp = &uvhub_descs[uvhub];
1520 bdp->num_cpus++;
1521 bdp->uvhub = uvhub;
1522 bdp->pnode = pnode;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001523 /* kludge: 'assuming' one node per socket, and assuming that
1524 disabling a socket just leaves a gap in node numbers */
1525 socket = (cpu_to_node(cpu) & 1);;
1526 bdp->socket_mask |= (1 << socket);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001527 sdp = &bdp->socket[socket];
1528 sdp->cpu_number[sdp->num_cpus] = cpu;
1529 sdp->num_cpus++;
1530 }
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001531 uvhub = 0;
1532 while (uvhub_mask) {
1533 if (!(uvhub_mask & 1))
1534 goto nexthub;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001535 bdp = &uvhub_descs[uvhub];
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001536 socket_mask = bdp->socket_mask;
1537 socket = 0;
1538 while (socket_mask) {
1539 if (!(socket_mask & 1))
1540 goto nextsocket;
1541 sdp = &bdp->socket[socket];
1542 for (i = 0; i < sdp->num_cpus; i++) {
1543 cpu = sdp->cpu_number[i];
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001544 bcp = &per_cpu(bau_control, cpu);
1545 bcp->cpu = cpu;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001546 if (i == 0) {
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001547 smaster = bcp;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001548 if (socket == 0)
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001549 hmaster = bcp;
1550 }
1551 bcp->cpus_in_uvhub = bdp->num_cpus;
1552 bcp->cpus_in_socket = sdp->num_cpus;
1553 bcp->socket_master = smaster;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001554 bcp->uvhub = bdp->uvhub;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001555 bcp->uvhub_master = hmaster;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001556 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
1557 blade_processor_id;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001558 }
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001559nextsocket:
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001560 socket++;
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001561 socket_mask = (socket_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001562 }
Cliff Wickmana8328ee2010-06-02 16:22:02 -05001563nexthub:
1564 uvhub++;
1565 uvhub_mask = (uvhub_mask >> 1);
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001566 }
1567 kfree(uvhub_descs);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001568 for_each_present_cpu(cpu) {
1569 bcp = &per_cpu(bau_control, cpu);
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001570 bcp->baudisabled = 0;
Cliff Wickman712157a2010-06-02 16:22:02 -05001571 bcp->statp = &per_cpu(ptcstats, cpu);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001572 /* time interval to catch a hardware stay-busy bug */
1573 bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
1574 bcp->max_bau_concurrent = max_bau_concurrent;
1575 bcp->max_bau_concurrent_constant = max_bau_concurrent;
1576 bcp->plugged_delay = plugged_delay;
1577 bcp->plugsb4reset = plugsb4reset;
1578 bcp->timeoutsb4reset = timeoutsb4reset;
1579 bcp->ipi_reset_limit = ipi_reset_limit;
1580 bcp->complete_threshold = complete_threshold;
1581 bcp->congested_response_us = congested_response_us;
1582 bcp->congested_reps = congested_reps;
1583 bcp->congested_period = congested_period;
1584 }
Cliff Wickmanb194b122008-06-12 08:23:48 -05001585}
Cliff Wickman18129242008-06-02 08:56:14 -05001586
1587/*
1588 * Initialization of BAU-related structures
1589 */
Cliff Wickmanb194b122008-06-12 08:23:48 -05001590static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -05001591{
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001592 int uvhub;
1593 int pnode;
1594 int nuvhubs;
Rusty Russell2c74d662009-03-18 08:22:30 +10301595 int cur_cpu;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001596 int vector;
1597 unsigned long mmr;
Cliff Wickman18129242008-06-02 08:56:14 -05001598
1599 if (!is_uv_system())
1600 return 0;
1601
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001602 if (nobau)
1603 return 0;
1604
Rusty Russell76ba0ec2009-03-13 14:49:57 +10301605 for_each_possible_cpu(cur_cpu)
Yinghai Lueaa958402009-06-06 14:51:36 -07001606 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
Rusty Russell76ba0ec2009-03-13 14:49:57 +10301607 GFP_KERNEL, cpu_to_node(cur_cpu));
1608
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001609 uv_nshift = uv_hub_info->m_val;
Robin Holt036ed8b2009-10-15 17:40:00 -05001610 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001611 nuvhubs = uv_num_possible_blades();
Cliff Wickman50fb55a2010-06-02 16:22:02 -05001612 spin_lock_init(&disable_lock);
1613 congested_cycles = microsec_2_cycles(congested_response_us);
Cliff Wickman9674f352009-04-03 08:34:05 -05001614
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001615 uv_init_per_cpu(nuvhubs);
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001616
Cliff Wickman94ca8e42009-04-14 10:56:48 -05001617 uv_partition_base_pnode = 0x7fffffff;
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001618 for (uvhub = 0; uvhub < nuvhubs; uvhub++)
1619 if (uv_blade_nr_possible_cpus(uvhub) &&
1620 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1621 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
Cliff Wickman9674f352009-04-03 08:34:05 -05001622
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001623 vector = UV_BAU_MESSAGE;
1624 for_each_possible_blade(uvhub)
1625 if (uv_blade_nr_possible_cpus(uvhub))
1626 uv_init_uvhub(uvhub, vector);
1627
Cliff Wickman18129242008-06-02 08:56:14 -05001628 uv_enable_timeouts();
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001629 alloc_intr_gate(vector, uv_bau_message_intr1);
1630
1631 for_each_possible_blade(uvhub) {
1632 pnode = uv_blade_to_pnode(uvhub);
1633 /* INIT the bau */
1634 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
1635 ((unsigned long)1 << 63));
1636 mmr = 1; /* should be 1 to broadcast to both sockets */
1637 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
1638 }
Ingo Molnarb4c286e2008-06-18 14:28:19 +02001639
Cliff Wickman18129242008-06-02 08:56:14 -05001640 return 0;
1641}
Cliff Wickmanb8f7fb12010-04-14 11:35:46 -05001642core_initcall(uv_bau_init);
Cliff Wickmane8e5e8a2010-06-02 16:22:01 -05001643fs_initcall(uv_ptc_init);