blob: 78422336ddea55fd3719d9f9e11d6d174cd48278 [file] [log] [blame]
Cliff Wickman18129242008-06-02 08:56:14 -05001/*
2 * SGI UltraViolet TLB flush routines.
3 *
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
5 *
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
Jeremy Fitzhardingeaef8f5b2008-10-14 21:43:43 -07009#include <linux/seq_file.h>
Cliff Wickman18129242008-06-02 08:56:14 -050010#include <linux/proc_fs.h>
11#include <linux/kernel.h>
12
Cliff Wickman18129242008-06-02 08:56:14 -050013#include <asm/mmu_context.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Cliff Wickman18129242008-06-02 08:56:14 -050015#include <asm/uv/uv_mmrs.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020016#include <asm/uv/uv_hub.h>
Cliff Wickman18129242008-06-02 08:56:14 -050017#include <asm/uv/uv_bau.h>
Ingo Molnar7b6aa332009-02-17 13:58:15 +010018#include <asm/apic.h>
Ingo Molnarb4c286e2008-06-18 14:28:19 +020019#include <asm/idle.h>
Cliff Wickmanb194b122008-06-12 08:23:48 -050020#include <asm/tsc.h>
Cliff Wickman99dd8712008-08-19 12:51:59 -050021#include <asm/irq_vectors.h>
Cliff Wickman18129242008-06-02 08:56:14 -050022
Ingo Molnarb4c286e2008-06-18 14:28:19 +020023static struct bau_control **uv_bau_table_bases __read_mostly;
24static int uv_bau_retry_limit __read_mostly;
25
26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly;
Cliff Wickman94ca8e42009-04-14 10:56:48 -050028/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
Ingo Molnarb4c286e2008-06-18 14:28:19 +020030
31static unsigned long uv_mmask __read_mostly;
Cliff Wickman18129242008-06-02 08:56:14 -050032
Ingo Molnardc163a42008-06-18 14:15:43 +020033static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
34static DEFINE_PER_CPU(struct bau_control, bau_control);
Cliff Wickman18129242008-06-02 08:56:14 -050035
36/*
Cliff Wickman9674f352009-04-03 08:34:05 -050037 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
Cliff Wickman94ca8e42009-04-14 10:56:48 -050048 return -1; /* shouldn't happen */
Cliff Wickman9674f352009-04-03 08:34:05 -050049}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
Cliff Wickman18129242008-06-02 08:56:14 -050065 * Free a software acknowledge hardware resource by clearing its Pending
66 * bit. This will return a reply to the sender.
67 * If the message has timed out, a reply has already been sent by the
68 * hardware but the resource has not been released. In that case our
69 * clear of the Timeout bit (as well) will free the resource. No reply will
70 * be sent (the hardware will only do one reply per message).
71 */
Cliff Wickmanb194b122008-06-12 08:23:48 -050072static void uv_reply_to_message(int resource,
Ingo Molnarb4c286e2008-06-18 14:28:19 +020073 struct bau_payload_queue_entry *msg,
74 struct bau_msg_status *msp)
Cliff Wickman18129242008-06-02 08:56:14 -050075{
Cliff Wickmanb194b122008-06-12 08:23:48 -050076 unsigned long dw;
Cliff Wickman18129242008-06-02 08:56:14 -050077
Cliff Wickmanb194b122008-06-12 08:23:48 -050078 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
Cliff Wickman18129242008-06-02 08:56:14 -050079 msg->replied_to = 1;
80 msg->sw_ack_vector = 0;
81 if (msp)
82 msp->seen_by.bits = 0;
Cliff Wickmanb194b122008-06-12 08:23:48 -050083 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
Cliff Wickman18129242008-06-02 08:56:14 -050084}
85
86/*
87 * Do all the things a cpu should do for a TLB shootdown message.
88 * Other cpu's may come here at the same time for this message.
89 */
Cliff Wickmanb194b122008-06-12 08:23:48 -050090static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
Ingo Molnarb4c286e2008-06-18 14:28:19 +020091 int msg_slot, int sw_ack_slot)
Cliff Wickman18129242008-06-02 08:56:14 -050092{
Cliff Wickman18129242008-06-02 08:56:14 -050093 unsigned long this_cpu_mask;
94 struct bau_msg_status *msp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +020095 int cpu;
Cliff Wickman18129242008-06-02 08:56:14 -050096
97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
98 cpu = uv_blade_processor_id();
99 msg->number_of_cpus =
Cliff Wickman9674f352009-04-03 08:34:05 -0500100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
Ingo Molnardc163a42008-06-18 14:15:43 +0200101 this_cpu_mask = 1UL << cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500102 if (msp->seen_by.bits & this_cpu_mask)
103 return;
104 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
105
106 if (msg->replied_to == 1)
107 return;
108
109 if (msg->address == TLB_FLUSH_ALL) {
110 local_flush_tlb();
111 __get_cpu_var(ptcstats).alltlb++;
112 } else {
113 __flush_tlb_one(msg->address);
114 __get_cpu_var(ptcstats).onetlb++;
115 }
116
117 __get_cpu_var(ptcstats).requestee++;
118
119 atomic_inc_short(&msg->acknowledge_count);
120 if (msg->number_of_cpus == msg->acknowledge_count)
121 uv_reply_to_message(sw_ack_slot, msg, msp);
Ingo Molnardc163a42008-06-18 14:15:43 +0200122}
123
124/*
125 * Examine the payload queue on one distribution node to see
126 * which messages have not been seen, and which cpu(s) have not seen them.
127 *
128 * Returns the number of cpu's that have not responded.
129 */
130static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
131{
Ingo Molnardc163a42008-06-18 14:15:43 +0200132 struct bau_payload_queue_entry *msg;
133 struct bau_msg_status *msp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200134 int count = 0;
135 int i;
136 int j;
Ingo Molnardc163a42008-06-18 14:15:43 +0200137
138 for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
139 msg++, i++) {
140 if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
141 msp = bau_tablesp->msg_statuses + i;
142 printk(KERN_DEBUG
143 "blade %d: address:%#lx %d of %d, not cpu(s): ",
144 i, msg->address, msg->acknowledge_count,
145 msg->number_of_cpus);
146 for (j = 0; j < msg->number_of_cpus; j++) {
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200147 if (!((1L << j) & msp->seen_by.bits)) {
Ingo Molnardc163a42008-06-18 14:15:43 +0200148 count++;
149 printk("%d ", j);
150 }
151 }
152 printk("\n");
153 }
154 }
155 return count;
Cliff Wickman18129242008-06-02 08:56:14 -0500156}
157
158/*
159 * Examine the payload queue on all the distribution nodes to see
160 * which messages have not been seen, and which cpu(s) have not seen them.
161 *
162 * Returns the number of cpu's that have not responded.
163 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500164static int uv_examine_destinations(struct bau_target_nodemask *distribution)
Cliff Wickman18129242008-06-02 08:56:14 -0500165{
166 int sender;
167 int i;
Cliff Wickman18129242008-06-02 08:56:14 -0500168 int count = 0;
Cliff Wickman18129242008-06-02 08:56:14 -0500169
170 sender = smp_processor_id();
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200171 for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500172 if (!bau_node_isset(i, distribution))
173 continue;
Ingo Molnardc163a42008-06-18 14:15:43 +0200174 count += uv_examine_destination(uv_bau_table_bases[i], sender);
Cliff Wickman18129242008-06-02 08:56:14 -0500175 }
176 return count;
177}
178
Cliff Wickmanb194b122008-06-12 08:23:48 -0500179/*
180 * wait for completion of a broadcast message
181 *
182 * return COMPLETE, RETRY or GIVEUP
183 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200184static int uv_wait_completion(struct bau_desc *bau_desc,
Cliff Wickmanb194b122008-06-12 08:23:48 -0500185 unsigned long mmr_offset, int right_shift)
186{
187 int exams = 0;
188 long destination_timeouts = 0;
189 long source_timeouts = 0;
190 unsigned long descriptor_status;
191
192 while ((descriptor_status = (((unsigned long)
193 uv_read_local_mmr(mmr_offset) >>
194 right_shift) & UV_ACT_STATUS_MASK)) !=
195 DESC_STATUS_IDLE) {
196 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
197 source_timeouts++;
198 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
199 source_timeouts = 0;
200 __get_cpu_var(ptcstats).s_retry++;
201 return FLUSH_RETRY;
202 }
203 /*
204 * spin here looking for progress at the destinations
205 */
206 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
207 destination_timeouts++;
208 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
209 /*
210 * returns number of cpus not responding
211 */
212 if (uv_examine_destinations
213 (&bau_desc->distribution) == 0) {
214 __get_cpu_var(ptcstats).d_retry++;
215 return FLUSH_RETRY;
216 }
217 exams++;
218 if (exams >= uv_bau_retry_limit) {
219 printk(KERN_DEBUG
220 "uv_flush_tlb_others");
221 printk("giving up on cpu %d\n",
222 smp_processor_id());
223 return FLUSH_GIVEUP;
224 }
225 /*
226 * delays can hang the simulator
227 udelay(1000);
228 */
229 destination_timeouts = 0;
230 }
231 }
Cliff Wickman18c07cf2009-01-15 09:51:20 -0600232 cpu_relax();
Cliff Wickmanb194b122008-06-12 08:23:48 -0500233 }
234 return FLUSH_COMPLETE;
235}
236
237/**
238 * uv_flush_send_and_wait
239 *
240 * Send a broadcast and wait for a broadcast message to complete.
241 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900242 * The flush_mask contains the cpus the broadcast was sent to.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500243 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900244 * Returns NULL if all remote flushing was done. The mask is zeroed.
245 * Returns @flush_mask if some remote flushing remains to be done. The
246 * mask will have some bits still set.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500247 */
Cliff Wickman9674f352009-04-03 08:34:05 -0500248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
Tejun Heobdbcdd42009-01-21 17:26:06 +0900249 struct bau_desc *bau_desc,
250 struct cpumask *flush_mask)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500251{
252 int completion_status = 0;
253 int right_shift;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500254 int tries = 0;
Cliff Wickman9674f352009-04-03 08:34:05 -0500255 int pnode;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200256 int bit;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500257 unsigned long mmr_offset;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200258 unsigned long index;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500259 cycles_t time1;
260 cycles_t time2;
261
262 if (cpu < UV_CPUS_PER_ACT_STATUS) {
263 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
264 right_shift = cpu * UV_ACT_STATUS_SIZE;
265 } else {
266 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
267 right_shift =
268 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
269 }
270 time1 = get_cycles();
271 do {
272 tries++;
Ingo Molnardc163a42008-06-18 14:15:43 +0200273 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
274 cpu;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500275 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
276 completion_status = uv_wait_completion(bau_desc, mmr_offset,
277 right_shift);
278 } while (completion_status == FLUSH_RETRY);
279 time2 = get_cycles();
280 __get_cpu_var(ptcstats).sflush += (time2 - time1);
281 if (tries > 1)
282 __get_cpu_var(ptcstats).retriesok++;
283
284 if (completion_status == FLUSH_GIVEUP) {
285 /*
286 * Cause the caller to do an IPI-style TLB shootdown on
287 * the cpu's, all of which are still in the mask.
288 */
289 __get_cpu_var(ptcstats).ptc_i++;
Cliff Wickman2749ebe2009-01-29 15:35:26 -0600290 return flush_mask;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500291 }
292
293 /*
294 * Success, so clear the remote cpu's from the mask so we don't
295 * use the IPI method of shootdown on them.
296 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900297 for_each_cpu(bit, flush_mask) {
Cliff Wickman9674f352009-04-03 08:34:05 -0500298 pnode = uv_cpu_to_pnode(bit);
299 if (pnode == this_pnode)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500300 continue;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900301 cpumask_clear_cpu(bit, flush_mask);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500302 }
Tejun Heobdbcdd42009-01-21 17:26:06 +0900303 if (!cpumask_empty(flush_mask))
304 return flush_mask;
305 return NULL;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500306}
307
Cliff Wickman18129242008-06-02 08:56:14 -0500308/**
309 * uv_flush_tlb_others - globally purge translation cache of a virtual
310 * address or all TLB's
Tejun Heobdbcdd42009-01-21 17:26:06 +0900311 * @cpumask: mask of all cpu's in which the address is to be removed
Cliff Wickman18129242008-06-02 08:56:14 -0500312 * @mm: mm_struct containing virtual address range
313 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900314 * @cpu: the current cpu
Cliff Wickman18129242008-06-02 08:56:14 -0500315 *
316 * This is the entry point for initiating any UV global TLB shootdown.
317 *
318 * Purges the translation caches of all specified processors of the given
319 * virtual address, or purges all TLB's on specified processors.
320 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900321 * The caller has derived the cpumask from the mm_struct. This function
322 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
Cliff Wickman18129242008-06-02 08:56:14 -0500323 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900324 * The cpumask is converted into a nodemask of the nodes containing
Cliff Wickman18129242008-06-02 08:56:14 -0500325 * the cpus.
Cliff Wickmanb194b122008-06-12 08:23:48 -0500326 *
Tejun Heobdbcdd42009-01-21 17:26:06 +0900327 * Note that this function should be called with preemption disabled.
328 *
329 * Returns NULL if all remote flushing was done.
330 * Returns pointer to cpumask if some remote flushing remains to be
331 * done. The returned pointer is valid till preemption is re-enabled.
Cliff Wickman18129242008-06-02 08:56:14 -0500332 */
Tejun Heobdbcdd42009-01-21 17:26:06 +0900333const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
334 struct mm_struct *mm,
335 unsigned long va, unsigned int cpu)
Cliff Wickman18129242008-06-02 08:56:14 -0500336{
Tejun Heobdbcdd42009-01-21 17:26:06 +0900337 static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
338 struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
Cliff Wickman18129242008-06-02 08:56:14 -0500339 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500340 int bit;
Cliff Wickman9674f352009-04-03 08:34:05 -0500341 int pnode;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900342 int uv_cpu;
Cliff Wickman9674f352009-04-03 08:34:05 -0500343 int this_pnode;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500344 int locals = 0;
Ingo Molnardc163a42008-06-18 14:15:43 +0200345 struct bau_desc *bau_desc;
Cliff Wickman18129242008-06-02 08:56:14 -0500346
Tejun Heobdbcdd42009-01-21 17:26:06 +0900347 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
348
349 uv_cpu = uv_blade_processor_id();
Cliff Wickman9674f352009-04-03 08:34:05 -0500350 this_pnode = uv_hub_info->pnode;
Cliff Wickman18129242008-06-02 08:56:14 -0500351 bau_desc = __get_cpu_var(bau_control).descriptor_base;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900352 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500353
354 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
355
356 i = 0;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900357 for_each_cpu(bit, flush_mask) {
Cliff Wickman9674f352009-04-03 08:34:05 -0500358 pnode = uv_cpu_to_pnode(bit);
359 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
360 if (pnode == this_pnode) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500361 locals++;
Cliff Wickman18129242008-06-02 08:56:14 -0500362 continue;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500363 }
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500364 bau_node_set(pnode - uv_partition_base_pnode,
365 &bau_desc->distribution);
Cliff Wickman18129242008-06-02 08:56:14 -0500366 i++;
367 }
Cliff Wickmanb194b122008-06-12 08:23:48 -0500368 if (i == 0) {
369 /*
370 * no off_node flushing; return status for local node
371 */
372 if (locals)
Tejun Heobdbcdd42009-01-21 17:26:06 +0900373 return flush_mask;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500374 else
Tejun Heobdbcdd42009-01-21 17:26:06 +0900375 return NULL;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500376 }
Cliff Wickman18129242008-06-02 08:56:14 -0500377 __get_cpu_var(ptcstats).requestor++;
378 __get_cpu_var(ptcstats).ntargeted += i;
379
380 bau_desc->payload.address = va;
Tejun Heobdbcdd42009-01-21 17:26:06 +0900381 bau_desc->payload.sending_cpu = cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500382
Cliff Wickman9674f352009-04-03 08:34:05 -0500383 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
Cliff Wickman18129242008-06-02 08:56:14 -0500384}
385
386/*
387 * The BAU message interrupt comes here. (registered by set_intr_gate)
388 * See entry_64.S
389 *
390 * We received a broadcast assist message.
391 *
392 * Interrupts may have been disabled; this interrupt could represent
393 * the receipt of several messages.
394 *
395 * All cores/threads on this node get this interrupt.
396 * The last one to see it does the s/w ack.
397 * (the resource will not be freed until noninterruptable cpus see this
398 * interrupt; hardware will timeout the s/w ack and reply ERROR)
399 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500400void uv_bau_message_interrupt(struct pt_regs *regs)
Cliff Wickman18129242008-06-02 08:56:14 -0500401{
Ingo Molnardc163a42008-06-18 14:15:43 +0200402 struct bau_payload_queue_entry *va_queue_first;
403 struct bau_payload_queue_entry *va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200404 struct bau_payload_queue_entry *msg;
Cliff Wickman18129242008-06-02 08:56:14 -0500405 struct pt_regs *old_regs = set_irq_regs(regs);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200406 cycles_t time1;
407 cycles_t time2;
Cliff Wickman18129242008-06-02 08:56:14 -0500408 int msg_slot;
409 int sw_ack_slot;
410 int fw;
411 int count = 0;
412 unsigned long local_pnode;
413
414 ack_APIC_irq();
415 exit_idle();
416 irq_enter();
417
Cliff Wickmanb194b122008-06-12 08:23:48 -0500418 time1 = get_cycles();
Cliff Wickman18129242008-06-02 08:56:14 -0500419
420 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
421
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200422 va_queue_first = __get_cpu_var(bau_control).va_queue_first;
Ingo Molnardc163a42008-06-18 14:15:43 +0200423 va_queue_last = __get_cpu_var(bau_control).va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200424
Cliff Wickman18129242008-06-02 08:56:14 -0500425 msg = __get_cpu_var(bau_control).bau_msg_head;
426 while (msg->sw_ack_vector) {
427 count++;
428 fw = msg->sw_ack_vector;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200429 msg_slot = msg - va_queue_first;
Cliff Wickman18129242008-06-02 08:56:14 -0500430 sw_ack_slot = ffs(fw) - 1;
431
432 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
433
434 msg++;
Ingo Molnardc163a42008-06-18 14:15:43 +0200435 if (msg > va_queue_last)
436 msg = va_queue_first;
Cliff Wickman18129242008-06-02 08:56:14 -0500437 __get_cpu_var(bau_control).bau_msg_head = msg;
438 }
439 if (!count)
440 __get_cpu_var(ptcstats).nomsg++;
441 else if (count > 1)
442 __get_cpu_var(ptcstats).multmsg++;
443
Cliff Wickmanb194b122008-06-12 08:23:48 -0500444 time2 = get_cycles();
445 __get_cpu_var(ptcstats).dflush += (time2 - time1);
Cliff Wickman18129242008-06-02 08:56:14 -0500446
447 irq_exit();
448 set_irq_regs(old_regs);
Cliff Wickman18129242008-06-02 08:56:14 -0500449}
450
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500451/*
452 * uv_enable_timeouts
453 *
454 * Each target blade (i.e. blades that have cpu's) needs to have
455 * shootdown message timeouts enabled. The timeout does not cause
456 * an interrupt, but causes an error message to be returned to
457 * the sender.
458 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500459static void uv_enable_timeouts(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500460{
Cliff Wickman18129242008-06-02 08:56:14 -0500461 int blade;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500462 int nblades;
Cliff Wickman18129242008-06-02 08:56:14 -0500463 int pnode;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500464 unsigned long mmr_image;
Cliff Wickman18129242008-06-02 08:56:14 -0500465
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500466 nblades = uv_num_possible_blades();
467
468 for (blade = 0; blade < nblades; blade++) {
469 if (!uv_blade_nr_possible_cpus(blade))
Cliff Wickman18129242008-06-02 08:56:14 -0500470 continue;
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500471
Cliff Wickman18129242008-06-02 08:56:14 -0500472 pnode = uv_blade_to_pnode(blade);
Cliff Wickmanc4c46882009-04-03 08:34:32 -0500473 mmr_image =
474 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
475 /*
476 * Set the timeout period and then lock it in, in three
477 * steps; captures and locks in the period.
478 *
479 * To program the period, the SOFT_ACK_MODE must be off.
480 */
481 mmr_image &= ~((unsigned long)1 <<
482 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
483 uv_write_global_mmr64
484 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
485 /*
486 * Set the 4-bit period.
487 */
488 mmr_image &= ~((unsigned long)0xf <<
489 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
490 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
491 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
492 uv_write_global_mmr64
493 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
494 /*
495 * Subsequent reversals of the timebase bit (3) cause an
496 * immediate timeout of one or all INTD resources as
497 * indicated in bits 2:0 (7 causes all of them to timeout).
498 */
499 mmr_image |= ((unsigned long)1 <<
500 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
501 uv_write_global_mmr64
502 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
Cliff Wickman18129242008-06-02 08:56:14 -0500503 }
Cliff Wickman18129242008-06-02 08:56:14 -0500504}
505
Cliff Wickmanb194b122008-06-12 08:23:48 -0500506static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500507{
508 if (*offset < num_possible_cpus())
509 return offset;
510 return NULL;
511}
512
Cliff Wickmanb194b122008-06-12 08:23:48 -0500513static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
Cliff Wickman18129242008-06-02 08:56:14 -0500514{
515 (*offset)++;
516 if (*offset < num_possible_cpus())
517 return offset;
518 return NULL;
519}
520
Cliff Wickmanb194b122008-06-12 08:23:48 -0500521static void uv_ptc_seq_stop(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500522{
523}
524
525/*
526 * Display the statistics thru /proc
527 * data points to the cpu number
528 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500529static int uv_ptc_seq_show(struct seq_file *file, void *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500530{
531 struct ptc_stats *stat;
532 int cpu;
533
534 cpu = *(loff_t *)data;
535
536 if (!cpu) {
537 seq_printf(file,
538 "# cpu requestor requestee one all sretry dretry ptc_i ");
539 seq_printf(file,
Cliff Wickmanb194b122008-06-12 08:23:48 -0500540 "sw_ack sflush dflush sok dnomsg dmult starget\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500541 }
542 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
543 stat = &per_cpu(ptcstats, cpu);
544 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
545 cpu, stat->requestor,
546 stat->requestee, stat->onetlb, stat->alltlb,
547 stat->s_retry, stat->d_retry, stat->ptc_i);
548 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
Cliff Wickman9674f352009-04-03 08:34:05 -0500549 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
Cliff Wickman18129242008-06-02 08:56:14 -0500550 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
Cliff Wickmanb194b122008-06-12 08:23:48 -0500551 stat->sflush, stat->dflush,
Cliff Wickman18129242008-06-02 08:56:14 -0500552 stat->retriesok, stat->nomsg,
553 stat->multmsg, stat->ntargeted);
554 }
555
556 return 0;
557}
558
559/*
560 * 0: display meaning of the statistics
561 * >0: retry limit
562 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500563static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200564 size_t count, loff_t *data)
Cliff Wickman18129242008-06-02 08:56:14 -0500565{
566 long newmode;
567 char optstr[64];
568
Cliff Wickmane7eb8722008-06-23 08:32:25 -0500569 if (count == 0 || count > sizeof(optstr))
Cliff Wickmancef53272008-06-19 11:16:24 -0500570 return -EINVAL;
Cliff Wickman18129242008-06-02 08:56:14 -0500571 if (copy_from_user(optstr, user, count))
572 return -EFAULT;
573 optstr[count - 1] = '\0';
574 if (strict_strtoul(optstr, 10, &newmode) < 0) {
575 printk(KERN_DEBUG "%s is invalid\n", optstr);
576 return -EINVAL;
577 }
578
579 if (newmode == 0) {
580 printk(KERN_DEBUG "# cpu: cpu number\n");
581 printk(KERN_DEBUG
582 "requestor: times this cpu was the flush requestor\n");
583 printk(KERN_DEBUG
584 "requestee: times this cpu was requested to flush its TLBs\n");
585 printk(KERN_DEBUG
586 "one: times requested to flush a single address\n");
587 printk(KERN_DEBUG
588 "all: times requested to flush all TLB's\n");
589 printk(KERN_DEBUG
590 "sretry: number of retries of source-side timeouts\n");
591 printk(KERN_DEBUG
592 "dretry: number of retries of destination-side timeouts\n");
593 printk(KERN_DEBUG
594 "ptc_i: times UV fell through to IPI-style flushes\n");
595 printk(KERN_DEBUG
596 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
597 printk(KERN_DEBUG
Cliff Wickmanb194b122008-06-12 08:23:48 -0500598 "sflush_us: cycles spent in uv_flush_tlb_others()\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500599 printk(KERN_DEBUG
Cliff Wickmanb194b122008-06-12 08:23:48 -0500600 "dflush_us: cycles spent in handling flush requests\n");
Cliff Wickman18129242008-06-02 08:56:14 -0500601 printk(KERN_DEBUG "sok: successes on retry\n");
602 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
603 printk(KERN_DEBUG
604 "dmult: interrupts with multiple messages\n");
605 printk(KERN_DEBUG "starget: nodes targeted\n");
606 } else {
607 uv_bau_retry_limit = newmode;
608 printk(KERN_DEBUG "timeout retry limit:%d\n",
609 uv_bau_retry_limit);
610 }
611
612 return count;
613}
614
615static const struct seq_operations uv_ptc_seq_ops = {
Ingo Molnardc163a42008-06-18 14:15:43 +0200616 .start = uv_ptc_seq_start,
617 .next = uv_ptc_seq_next,
618 .stop = uv_ptc_seq_stop,
619 .show = uv_ptc_seq_show
Cliff Wickman18129242008-06-02 08:56:14 -0500620};
621
Cliff Wickmanb194b122008-06-12 08:23:48 -0500622static int uv_ptc_proc_open(struct inode *inode, struct file *file)
Cliff Wickman18129242008-06-02 08:56:14 -0500623{
624 return seq_open(file, &uv_ptc_seq_ops);
625}
626
627static const struct file_operations proc_uv_ptc_operations = {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500628 .open = uv_ptc_proc_open,
629 .read = seq_read,
630 .write = uv_ptc_proc_write,
631 .llseek = seq_lseek,
632 .release = seq_release,
Cliff Wickman18129242008-06-02 08:56:14 -0500633};
634
Cliff Wickmanb194b122008-06-12 08:23:48 -0500635static int __init uv_ptc_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500636{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500637 struct proc_dir_entry *proc_uv_ptc;
Cliff Wickman18129242008-06-02 08:56:14 -0500638
639 if (!is_uv_system())
640 return 0;
641
Cliff Wickman18129242008-06-02 08:56:14 -0500642 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
643 if (!proc_uv_ptc) {
644 printk(KERN_ERR "unable to create %s proc entry\n",
645 UV_PTC_BASENAME);
646 return -EINVAL;
647 }
648 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
649 return 0;
650}
651
Cliff Wickmanb194b122008-06-12 08:23:48 -0500652/*
653 * begin the initialization of the per-blade control structures
654 */
655static struct bau_control * __init uv_table_bases_init(int blade, int node)
Cliff Wickman18129242008-06-02 08:56:14 -0500656{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500657 int i;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500658 struct bau_msg_status *msp;
Ingo Molnardc163a42008-06-18 14:15:43 +0200659 struct bau_control *bau_tabp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500660
Ingo Molnardc163a42008-06-18 14:15:43 +0200661 bau_tabp =
Cliff Wickmanb194b122008-06-12 08:23:48 -0500662 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +0200663 BUG_ON(!bau_tabp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200664
Ingo Molnardc163a42008-06-18 14:15:43 +0200665 bau_tabp->msg_statuses =
Cliff Wickmanb194b122008-06-12 08:23:48 -0500666 kmalloc_node(sizeof(struct bau_msg_status) *
Ingo Molnardc163a42008-06-18 14:15:43 +0200667 DEST_Q_SIZE, GFP_KERNEL, node);
668 BUG_ON(!bau_tabp->msg_statuses);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200669
Ingo Molnardc163a42008-06-18 14:15:43 +0200670 for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500671 bau_cpubits_clear(&msp->seen_by, (int)
672 uv_blade_nr_possible_cpus(blade));
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200673
Ingo Molnardc163a42008-06-18 14:15:43 +0200674 uv_bau_table_bases[blade] = bau_tabp;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200675
Ingo Molnard4005242008-06-18 14:51:57 +0200676 return bau_tabp;
Cliff Wickman18129242008-06-02 08:56:14 -0500677}
678
Cliff Wickmanb194b122008-06-12 08:23:48 -0500679/*
680 * finish the initialization of the per-blade control structures
681 */
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200682static void __init
Cliff Wickman9674f352009-04-03 08:34:05 -0500683uv_table_bases_finish(int blade,
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200684 struct bau_control *bau_tablesp,
685 struct bau_desc *adp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500686{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500687 struct bau_control *bcp;
Cliff Wickman9674f352009-04-03 08:34:05 -0500688 int cpu;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500689
Cliff Wickman9674f352009-04-03 08:34:05 -0500690 for_each_present_cpu(cpu) {
691 if (blade != uv_cpu_to_blade_id(cpu))
692 continue;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200693
Cliff Wickman9674f352009-04-03 08:34:05 -0500694 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200695 bcp->bau_msg_head = bau_tablesp->va_queue_first;
696 bcp->va_queue_first = bau_tablesp->va_queue_first;
697 bcp->va_queue_last = bau_tablesp->va_queue_last;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200698 bcp->msg_statuses = bau_tablesp->msg_statuses;
699 bcp->descriptor_base = adp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500700 }
701}
702
703/*
704 * initialize the sending side's sending buffers
705 */
Ingo Molnardc163a42008-06-18 14:15:43 +0200706static struct bau_desc * __init
Cliff Wickmanb194b122008-06-12 08:23:48 -0500707uv_activation_descriptor_init(int node, int pnode)
708{
709 int i;
710 unsigned long pa;
711 unsigned long m;
712 unsigned long n;
713 unsigned long mmr_image;
Ingo Molnardc163a42008-06-18 14:15:43 +0200714 struct bau_desc *adp;
715 struct bau_desc *ad2;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500716
Cliff Wickman9674f352009-04-03 08:34:05 -0500717 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
Ingo Molnardc163a42008-06-18 14:15:43 +0200718 BUG_ON(!adp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200719
Cliff Wickman4ea3c512009-04-16 07:53:09 -0500720 pa = uv_gpa(adp); /* need the real nasid*/
Cliff Wickmanb194b122008-06-12 08:23:48 -0500721 n = pa >> uv_nshift;
722 m = pa & uv_mmask;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200723
Cliff Wickmanb194b122008-06-12 08:23:48 -0500724 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200725 if (mmr_image) {
Cliff Wickmanb194b122008-06-12 08:23:48 -0500726 uv_write_global_mmr64(pnode, (unsigned long)
727 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
728 (n << UV_DESC_BASE_PNODE_SHIFT | m));
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200729 }
730
Cliff Wickmanb194b122008-06-12 08:23:48 -0500731 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
Ingo Molnardc163a42008-06-18 14:15:43 +0200732 memset(ad2, 0, sizeof(struct bau_desc));
Cliff Wickmanb194b122008-06-12 08:23:48 -0500733 ad2->header.sw_ack_flag = 1;
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500734 /*
735 * base_dest_nodeid is the first node in the partition, so
736 * the bit map will indicate partition-relative node numbers.
737 * note that base_dest_nodeid is actually a nasid.
738 */
739 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500740 ad2->header.command = UV_NET_ENDPOINT_INTD;
741 ad2->header.int_both = 1;
742 /*
743 * all others need to be set to zero:
744 * fairness chaining multilevel count replied_to
745 */
746 }
747 return adp;
748}
749
750/*
751 * initialize the destination side's receiving buffers
752 */
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200753static struct bau_payload_queue_entry * __init
754uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500755{
Cliff Wickmanb194b122008-06-12 08:23:48 -0500756 struct bau_payload_queue_entry *pqp;
Cliff Wickman4ea3c512009-04-16 07:53:09 -0500757 unsigned long pa;
758 int pn;
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200759 char *cp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500760
Ingo Molnardc163a42008-06-18 14:15:43 +0200761 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
762 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
763 GFP_KERNEL, node);
764 BUG_ON(!pqp);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200765
Cliff Wickmanb194b122008-06-12 08:23:48 -0500766 cp = (char *)pqp + 31;
767 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
768 bau_tablesp->va_queue_first = pqp;
Cliff Wickman4ea3c512009-04-16 07:53:09 -0500769 /*
770 * need the pnode of where the memory was really allocated
771 */
772 pa = uv_gpa(pqp);
773 pn = pa >> uv_nshift;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500774 uv_write_global_mmr64(pnode,
775 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
Cliff Wickman4ea3c512009-04-16 07:53:09 -0500776 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
Cliff Wickmanb194b122008-06-12 08:23:48 -0500777 uv_physnodeaddr(pqp));
778 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
779 uv_physnodeaddr(pqp));
Ingo Molnardc163a42008-06-18 14:15:43 +0200780 bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500781 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
782 (unsigned long)
783 uv_physnodeaddr(bau_tablesp->va_queue_last));
Ingo Molnardc163a42008-06-18 14:15:43 +0200784 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200785
Cliff Wickmanb194b122008-06-12 08:23:48 -0500786 return pqp;
787}
788
789/*
790 * Initialization of each UV blade's structures
791 */
Cliff Wickman9674f352009-04-03 08:34:05 -0500792static int __init uv_init_blade(int blade)
Cliff Wickmanb194b122008-06-12 08:23:48 -0500793{
Cliff Wickman9674f352009-04-03 08:34:05 -0500794 int node;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500795 int pnode;
796 unsigned long pa;
797 unsigned long apicid;
Ingo Molnardc163a42008-06-18 14:15:43 +0200798 struct bau_desc *adp;
Cliff Wickmanb194b122008-06-12 08:23:48 -0500799 struct bau_payload_queue_entry *pqp;
800 struct bau_control *bau_tablesp;
801
Cliff Wickman9674f352009-04-03 08:34:05 -0500802 node = blade_to_first_node(blade);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500803 bau_tablesp = uv_table_bases_init(blade, node);
804 pnode = uv_blade_to_pnode(blade);
805 adp = uv_activation_descriptor_init(node, pnode);
806 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
Cliff Wickman9674f352009-04-03 08:34:05 -0500807 uv_table_bases_finish(blade, bau_tablesp, adp);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500808 /*
809 * the below initialization can't be in firmware because the
810 * messaging IRQ will be determined by the OS
811 */
Cliff Wickman9674f352009-04-03 08:34:05 -0500812 apicid = blade_to_first_apicid(blade);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500813 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
814 if ((pa & 0xff) != UV_BAU_MESSAGE) {
815 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
816 ((apicid << 32) | UV_BAU_MESSAGE));
817 }
818 return 0;
819}
Cliff Wickman18129242008-06-02 08:56:14 -0500820
821/*
822 * Initialization of BAU-related structures
823 */
Cliff Wickmanb194b122008-06-12 08:23:48 -0500824static int __init uv_bau_init(void)
Cliff Wickman18129242008-06-02 08:56:14 -0500825{
Cliff Wickman18129242008-06-02 08:56:14 -0500826 int blade;
827 int nblades;
Rusty Russell2c74d662009-03-18 08:22:30 +1030828 int cur_cpu;
Cliff Wickman18129242008-06-02 08:56:14 -0500829
830 if (!is_uv_system())
831 return 0;
832
833 uv_bau_retry_limit = 1;
Cliff Wickman18129242008-06-02 08:56:14 -0500834 uv_nshift = uv_hub_info->n_val;
Ingo Molnardc163a42008-06-18 14:15:43 +0200835 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
Cliff Wickman9674f352009-04-03 08:34:05 -0500836 nblades = uv_num_possible_blades();
837
Cliff Wickman18129242008-06-02 08:56:14 -0500838 uv_bau_table_bases = (struct bau_control **)
839 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
Ingo Molnardc163a42008-06-18 14:15:43 +0200840 BUG_ON(!uv_bau_table_bases);
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200841
Cliff Wickman94ca8e42009-04-14 10:56:48 -0500842 uv_partition_base_pnode = 0x7fffffff;
843 for (blade = 0; blade < nblades; blade++)
844 if (uv_blade_nr_possible_cpus(blade) &&
845 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
846 uv_partition_base_pnode = uv_blade_to_pnode(blade);
Cliff Wickman9674f352009-04-03 08:34:05 -0500847 for (blade = 0; blade < nblades; blade++)
848 if (uv_blade_nr_possible_cpus(blade))
849 uv_init_blade(blade);
850
Cliff Wickman99dd8712008-08-19 12:51:59 -0500851 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
Cliff Wickman18129242008-06-02 08:56:14 -0500852 uv_enable_timeouts();
Ingo Molnarb4c286e2008-06-18 14:28:19 +0200853
Cliff Wickman18129242008-06-02 08:56:14 -0500854 return 0;
855}
Cliff Wickman18129242008-06-02 08:56:14 -0500856__initcall(uv_bau_init);
Cliff Wickmanb194b122008-06-12 08:23:48 -0500857__initcall(uv_ptc_init);