Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * Cross Partition Communication (XPC) sn2-based functions. |
| 11 | * |
| 12 | * Architecture specific implementation of common functions. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame^] | 17 | #include <linux/delay.h> |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 18 | #include <asm/uncached.h> |
| 19 | #include <asm/sn/sn_sal.h> |
| 20 | #include "xpc.h" |
| 21 | |
| 22 | struct xpc_vars *xpc_vars; |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame^] | 23 | static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */ |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 24 | |
| 25 | static enum xp_retval |
| 26 | xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) |
| 27 | { |
| 28 | AMO_t *amos_page; |
| 29 | u64 nasid_array = 0; |
| 30 | int i; |
| 31 | int ret; |
| 32 | |
| 33 | xpc_vars = XPC_RP_VARS(rp); |
| 34 | |
| 35 | rp->sn.vars_pa = __pa(xpc_vars); |
| 36 | |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame^] | 37 | /* vars_part array follows immediately after vars */ |
| 38 | xpc_vars_part = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + |
| 39 | XPC_RP_VARS_SIZE); |
| 40 | |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 41 | |
| 42 | /* |
| 43 | * Before clearing xpc_vars, see if a page of AMOs had been previously |
| 44 | * allocated. If not we'll need to allocate one and set permissions |
| 45 | * so that cross-partition AMOs are allowed. |
| 46 | * |
| 47 | * The allocated AMO page needs MCA reporting to remain disabled after |
| 48 | * XPC has unloaded. To make this work, we keep a copy of the pointer |
| 49 | * to this page (i.e., amos_page) in the struct xpc_vars structure, |
| 50 | * which is pointed to by the reserved page, and re-use that saved copy |
| 51 | * on subsequent loads of XPC. This AMO page is never freed, and its |
| 52 | * memory protections are never restricted. |
| 53 | */ |
| 54 | amos_page = xpc_vars->amos_page; |
| 55 | if (amos_page == NULL) { |
| 56 | amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1)); |
| 57 | if (amos_page == NULL) { |
| 58 | dev_err(xpc_part, "can't allocate page of AMOs\n"); |
| 59 | return xpNoMemory; |
| 60 | } |
| 61 | |
| 62 | /* |
| 63 | * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems |
| 64 | * when xpc_allow_IPI_ops() is called via xpc_hb_init(). |
| 65 | */ |
| 66 | if (!enable_shub_wars_1_1()) { |
| 67 | ret = sn_change_memprotect(ia64_tpa((u64)amos_page), |
| 68 | PAGE_SIZE, |
| 69 | SN_MEMPROT_ACCESS_CLASS_1, |
| 70 | &nasid_array); |
| 71 | if (ret != 0) { |
| 72 | dev_err(xpc_part, "can't change memory " |
| 73 | "protections\n"); |
| 74 | uncached_free_page(__IA64_UNCACHED_OFFSET | |
| 75 | TO_PHYS((u64)amos_page), 1); |
| 76 | return xpSalError; |
| 77 | } |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | /* clear xpc_vars */ |
| 82 | memset(xpc_vars, 0, sizeof(struct xpc_vars)); |
| 83 | |
| 84 | xpc_vars->version = XPC_V_VERSION; |
| 85 | xpc_vars->act_nasid = cpuid_to_nasid(0); |
| 86 | xpc_vars->act_phys_cpuid = cpu_physical_id(0); |
| 87 | xpc_vars->vars_part_pa = __pa(xpc_vars_part); |
| 88 | xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); |
| 89 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ |
| 90 | |
| 91 | /* clear xpc_vars_part */ |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame^] | 92 | memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part_sn2) * |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 93 | xp_max_npartitions); |
| 94 | |
| 95 | /* initialize the activate IRQ related AMO variables */ |
| 96 | for (i = 0; i < xp_nasid_mask_words; i++) |
| 97 | (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); |
| 98 | |
| 99 | /* initialize the engaged remote partitions related AMO variables */ |
| 100 | (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); |
| 101 | (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); |
| 102 | |
| 103 | return xpSuccess; |
| 104 | } |
| 105 | |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame^] | 106 | /* |
| 107 | * Setup the infrastructure necessary to support XPartition Communication |
| 108 | * between the specified remote partition and the local one. |
| 109 | */ |
| 110 | static enum xp_retval |
| 111 | xpc_setup_infrastructure_sn2(struct xpc_partition *part) |
| 112 | { |
| 113 | enum xp_retval retval; |
| 114 | int ret; |
| 115 | int cpuid; |
| 116 | int ch_number; |
| 117 | struct xpc_channel *ch; |
| 118 | struct timer_list *timer; |
| 119 | short partid = XPC_PARTID(part); |
| 120 | |
| 121 | /* |
| 122 | * Allocate all of the channel structures as a contiguous chunk of |
| 123 | * memory. |
| 124 | */ |
| 125 | DBUG_ON(part->channels != NULL); |
| 126 | part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, |
| 127 | GFP_KERNEL); |
| 128 | if (part->channels == NULL) { |
| 129 | dev_err(xpc_chan, "can't get memory for channels\n"); |
| 130 | return xpNoMemory; |
| 131 | } |
| 132 | |
| 133 | /* allocate all the required GET/PUT values */ |
| 134 | |
| 135 | part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, |
| 136 | GFP_KERNEL, |
| 137 | &part->local_GPs_base); |
| 138 | if (part->local_GPs == NULL) { |
| 139 | dev_err(xpc_chan, "can't get memory for local get/put " |
| 140 | "values\n"); |
| 141 | retval = xpNoMemory; |
| 142 | goto out_1; |
| 143 | } |
| 144 | |
| 145 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, |
| 146 | GFP_KERNEL, |
| 147 | &part-> |
| 148 | remote_GPs_base); |
| 149 | if (part->remote_GPs == NULL) { |
| 150 | dev_err(xpc_chan, "can't get memory for remote get/put " |
| 151 | "values\n"); |
| 152 | retval = xpNoMemory; |
| 153 | goto out_2; |
| 154 | } |
| 155 | |
| 156 | part->remote_GPs_pa = 0; |
| 157 | |
| 158 | /* allocate all the required open and close args */ |
| 159 | |
| 160 | part->local_openclose_args = |
| 161 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, |
| 162 | &part->local_openclose_args_base); |
| 163 | if (part->local_openclose_args == NULL) { |
| 164 | dev_err(xpc_chan, "can't get memory for local connect args\n"); |
| 165 | retval = xpNoMemory; |
| 166 | goto out_3; |
| 167 | } |
| 168 | |
| 169 | part->remote_openclose_args = |
| 170 | xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, |
| 171 | &part->remote_openclose_args_base); |
| 172 | if (part->remote_openclose_args == NULL) { |
| 173 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); |
| 174 | retval = xpNoMemory; |
| 175 | goto out_4; |
| 176 | } |
| 177 | |
| 178 | part->remote_openclose_args_pa = 0; |
| 179 | |
| 180 | part->local_IPI_amo_va = xpc_IPI_init(partid); |
| 181 | part->local_IPI_amo = 0; |
| 182 | spin_lock_init(&part->IPI_lock); |
| 183 | |
| 184 | part->remote_IPI_nasid = 0; |
| 185 | part->remote_IPI_phys_cpuid = 0; |
| 186 | part->remote_IPI_amo_va = NULL; |
| 187 | |
| 188 | atomic_set(&part->channel_mgr_requests, 1); |
| 189 | init_waitqueue_head(&part->channel_mgr_wq); |
| 190 | |
| 191 | sprintf(part->IPI_owner, "xpc%02d", partid); |
| 192 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, |
| 193 | part->IPI_owner, (void *)(u64)partid); |
| 194 | if (ret != 0) { |
| 195 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " |
| 196 | "errno=%d\n", -ret); |
| 197 | retval = xpLackOfResources; |
| 198 | goto out_5; |
| 199 | } |
| 200 | |
| 201 | /* Setup a timer to check for dropped IPIs */ |
| 202 | timer = &part->dropped_IPI_timer; |
| 203 | init_timer(timer); |
| 204 | timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check; |
| 205 | timer->data = (unsigned long)part; |
| 206 | timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL; |
| 207 | add_timer(timer); |
| 208 | |
| 209 | part->nchannels = XPC_MAX_NCHANNELS; |
| 210 | |
| 211 | atomic_set(&part->nchannels_active, 0); |
| 212 | atomic_set(&part->nchannels_engaged, 0); |
| 213 | |
| 214 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { |
| 215 | ch = &part->channels[ch_number]; |
| 216 | |
| 217 | ch->partid = partid; |
| 218 | ch->number = ch_number; |
| 219 | ch->flags = XPC_C_DISCONNECTED; |
| 220 | |
| 221 | ch->local_GP = &part->local_GPs[ch_number]; |
| 222 | ch->local_openclose_args = |
| 223 | &part->local_openclose_args[ch_number]; |
| 224 | |
| 225 | atomic_set(&ch->kthreads_assigned, 0); |
| 226 | atomic_set(&ch->kthreads_idle, 0); |
| 227 | atomic_set(&ch->kthreads_active, 0); |
| 228 | |
| 229 | atomic_set(&ch->references, 0); |
| 230 | atomic_set(&ch->n_to_notify, 0); |
| 231 | |
| 232 | spin_lock_init(&ch->lock); |
| 233 | mutex_init(&ch->msg_to_pull_mutex); |
| 234 | init_completion(&ch->wdisconnect_wait); |
| 235 | |
| 236 | atomic_set(&ch->n_on_msg_allocate_wq, 0); |
| 237 | init_waitqueue_head(&ch->msg_allocate_wq); |
| 238 | init_waitqueue_head(&ch->idle_wq); |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * With the setting of the partition setup_state to XPC_P_SETUP, we're |
| 243 | * declaring that this partition is ready to go. |
| 244 | */ |
| 245 | part->setup_state = XPC_P_SETUP; |
| 246 | |
| 247 | /* |
| 248 | * Setup the per partition specific variables required by the |
| 249 | * remote partition to establish channel connections with us. |
| 250 | * |
| 251 | * The setting of the magic # indicates that these per partition |
| 252 | * specific variables are ready to be used. |
| 253 | */ |
| 254 | xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); |
| 255 | xpc_vars_part[partid].openclose_args_pa = |
| 256 | __pa(part->local_openclose_args); |
| 257 | xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); |
| 258 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ |
| 259 | xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); |
| 260 | xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); |
| 261 | xpc_vars_part[partid].nchannels = part->nchannels; |
| 262 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; |
| 263 | |
| 264 | return xpSuccess; |
| 265 | |
| 266 | /* setup of infrastructure failed */ |
| 267 | out_5: |
| 268 | kfree(part->remote_openclose_args_base); |
| 269 | part->remote_openclose_args = NULL; |
| 270 | out_4: |
| 271 | kfree(part->local_openclose_args_base); |
| 272 | part->local_openclose_args = NULL; |
| 273 | out_3: |
| 274 | kfree(part->remote_GPs_base); |
| 275 | part->remote_GPs = NULL; |
| 276 | out_2: |
| 277 | kfree(part->local_GPs_base); |
| 278 | part->local_GPs = NULL; |
| 279 | out_1: |
| 280 | kfree(part->channels); |
| 281 | part->channels = NULL; |
| 282 | return retval; |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Teardown the infrastructure necessary to support XPartition Communication |
| 287 | * between the specified remote partition and the local one. |
| 288 | */ |
| 289 | static void |
| 290 | xpc_teardown_infrastructure_sn2(struct xpc_partition *part) |
| 291 | { |
| 292 | short partid = XPC_PARTID(part); |
| 293 | |
| 294 | /* |
| 295 | * We start off by making this partition inaccessible to local |
| 296 | * processes by marking it as no longer setup. Then we make it |
| 297 | * inaccessible to remote processes by clearing the XPC per partition |
| 298 | * specific variable's magic # (which indicates that these variables |
| 299 | * are no longer valid) and by ignoring all XPC notify IPIs sent to |
| 300 | * this partition. |
| 301 | */ |
| 302 | |
| 303 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); |
| 304 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); |
| 305 | DBUG_ON(part->setup_state != XPC_P_SETUP); |
| 306 | part->setup_state = XPC_P_WTEARDOWN; |
| 307 | |
| 308 | xpc_vars_part[partid].magic = 0; |
| 309 | |
| 310 | free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); |
| 311 | |
| 312 | /* |
| 313 | * Before proceeding with the teardown we have to wait until all |
| 314 | * existing references cease. |
| 315 | */ |
| 316 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); |
| 317 | |
| 318 | /* now we can begin tearing down the infrastructure */ |
| 319 | |
| 320 | part->setup_state = XPC_P_TORNDOWN; |
| 321 | |
| 322 | /* in case we've still got outstanding timers registered... */ |
| 323 | del_timer_sync(&part->dropped_IPI_timer); |
| 324 | |
| 325 | kfree(part->remote_openclose_args_base); |
| 326 | part->remote_openclose_args = NULL; |
| 327 | kfree(part->local_openclose_args_base); |
| 328 | part->local_openclose_args = NULL; |
| 329 | kfree(part->remote_GPs_base); |
| 330 | part->remote_GPs = NULL; |
| 331 | kfree(part->local_GPs_base); |
| 332 | part->local_GPs = NULL; |
| 333 | kfree(part->channels); |
| 334 | part->channels = NULL; |
| 335 | part->local_IPI_amo_va = NULL; |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline |
| 340 | * (or multiple cachelines) from a remote partition. |
| 341 | * |
| 342 | * src must be a cacheline aligned physical address on the remote partition. |
| 343 | * dst must be a cacheline aligned virtual address on this partition. |
| 344 | * cnt must be cacheline sized |
| 345 | */ |
| 346 | /* >>> Replace this function by call to xp_remote_memcpy() or bte_copy()? */ |
| 347 | static enum xp_retval |
| 348 | xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, |
| 349 | const void *src, size_t cnt) |
| 350 | { |
| 351 | enum xp_retval ret; |
| 352 | |
| 353 | DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); |
| 354 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); |
| 355 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); |
| 356 | |
| 357 | if (part->act_state == XPC_P_DEACTIVATING) |
| 358 | return part->reason; |
| 359 | |
| 360 | ret = xp_remote_memcpy(dst, src, cnt); |
| 361 | if (ret != xpSuccess) { |
| 362 | dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," |
| 363 | " ret=%d\n", XPC_PARTID(part), ret); |
| 364 | } |
| 365 | return ret; |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * Pull the remote per partition specific variables from the specified |
| 370 | * partition. |
| 371 | */ |
| 372 | static enum xp_retval |
| 373 | xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) |
| 374 | { |
| 375 | u8 buffer[L1_CACHE_BYTES * 2]; |
| 376 | struct xpc_vars_part_sn2 *pulled_entry_cacheline = |
| 377 | (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); |
| 378 | struct xpc_vars_part_sn2 *pulled_entry; |
| 379 | u64 remote_entry_cacheline_pa, remote_entry_pa; |
| 380 | short partid = XPC_PARTID(part); |
| 381 | enum xp_retval ret; |
| 382 | |
| 383 | /* pull the cacheline that contains the variables we're interested in */ |
| 384 | |
| 385 | DBUG_ON(part->remote_vars_part_pa != |
| 386 | L1_CACHE_ALIGN(part->remote_vars_part_pa)); |
| 387 | DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2); |
| 388 | |
| 389 | remote_entry_pa = part->remote_vars_part_pa + |
| 390 | sn_partition_id * sizeof(struct xpc_vars_part_sn2); |
| 391 | |
| 392 | remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); |
| 393 | |
| 394 | pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline |
| 395 | + (remote_entry_pa & |
| 396 | (L1_CACHE_BYTES - 1))); |
| 397 | |
| 398 | ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, |
| 399 | (void *)remote_entry_cacheline_pa, |
| 400 | L1_CACHE_BYTES); |
| 401 | if (ret != xpSuccess) { |
| 402 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " |
| 403 | "partition %d, ret=%d\n", partid, ret); |
| 404 | return ret; |
| 405 | } |
| 406 | |
| 407 | /* see if they've been set up yet */ |
| 408 | |
| 409 | if (pulled_entry->magic != XPC_VP_MAGIC1 && |
| 410 | pulled_entry->magic != XPC_VP_MAGIC2) { |
| 411 | |
| 412 | if (pulled_entry->magic != 0) { |
| 413 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " |
| 414 | "partition %d has bad magic value (=0x%lx)\n", |
| 415 | partid, sn_partition_id, pulled_entry->magic); |
| 416 | return xpBadMagic; |
| 417 | } |
| 418 | |
| 419 | /* they've not been initialized yet */ |
| 420 | return xpRetry; |
| 421 | } |
| 422 | |
| 423 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { |
| 424 | |
| 425 | /* validate the variables */ |
| 426 | |
| 427 | if (pulled_entry->GPs_pa == 0 || |
| 428 | pulled_entry->openclose_args_pa == 0 || |
| 429 | pulled_entry->IPI_amo_pa == 0) { |
| 430 | |
| 431 | dev_err(xpc_chan, "partition %d's XPC vars_part for " |
| 432 | "partition %d are not valid\n", partid, |
| 433 | sn_partition_id); |
| 434 | return xpInvalidAddress; |
| 435 | } |
| 436 | |
| 437 | /* the variables we imported look to be valid */ |
| 438 | |
| 439 | part->remote_GPs_pa = pulled_entry->GPs_pa; |
| 440 | part->remote_openclose_args_pa = |
| 441 | pulled_entry->openclose_args_pa; |
| 442 | part->remote_IPI_amo_va = |
| 443 | (AMO_t *)__va(pulled_entry->IPI_amo_pa); |
| 444 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; |
| 445 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; |
| 446 | |
| 447 | if (part->nchannels > pulled_entry->nchannels) |
| 448 | part->nchannels = pulled_entry->nchannels; |
| 449 | |
| 450 | /* let the other side know that we've pulled their variables */ |
| 451 | |
| 452 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; |
| 453 | } |
| 454 | |
| 455 | if (pulled_entry->magic == XPC_VP_MAGIC1) |
| 456 | return xpRetry; |
| 457 | |
| 458 | return xpSuccess; |
| 459 | } |
| 460 | |
| 461 | /* |
| 462 | * Establish first contact with the remote partititon. This involves pulling |
| 463 | * the XPC per partition variables from the remote partition and waiting for |
| 464 | * the remote partition to pull ours. |
| 465 | */ |
| 466 | static enum xp_retval |
| 467 | xpc_make_first_contact_sn2(struct xpc_partition *part) |
| 468 | { |
| 469 | enum xp_retval ret; |
| 470 | |
| 471 | while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) { |
| 472 | if (ret != xpRetry) { |
| 473 | XPC_DEACTIVATE_PARTITION(part, ret); |
| 474 | return ret; |
| 475 | } |
| 476 | |
| 477 | dev_dbg(xpc_part, "waiting to make first contact with " |
| 478 | "partition %d\n", XPC_PARTID(part)); |
| 479 | |
| 480 | /* wait a 1/4 of a second or so */ |
| 481 | (void)msleep_interruptible(250); |
| 482 | |
| 483 | if (part->act_state == XPC_P_DEACTIVATING) |
| 484 | return part->reason; |
| 485 | } |
| 486 | |
| 487 | return xpSuccess; |
| 488 | } |
| 489 | |
| 490 | /* |
| 491 | * Get the IPI flags and pull the openclose args and/or remote GPs as needed. |
| 492 | */ |
| 493 | static u64 |
| 494 | xpc_get_IPI_flags_sn2(struct xpc_partition *part) |
| 495 | { |
| 496 | unsigned long irq_flags; |
| 497 | u64 IPI_amo; |
| 498 | enum xp_retval ret; |
| 499 | |
| 500 | /* |
| 501 | * See if there are any IPI flags to be handled. |
| 502 | */ |
| 503 | |
| 504 | spin_lock_irqsave(&part->IPI_lock, irq_flags); |
| 505 | IPI_amo = part->local_IPI_amo; |
| 506 | if (IPI_amo != 0) |
| 507 | part->local_IPI_amo = 0; |
| 508 | |
| 509 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); |
| 510 | |
| 511 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { |
| 512 | ret = xpc_pull_remote_cachelines_sn2(part, |
| 513 | part->remote_openclose_args, |
| 514 | (void *)part-> |
| 515 | remote_openclose_args_pa, |
| 516 | XPC_OPENCLOSE_ARGS_SIZE); |
| 517 | if (ret != xpSuccess) { |
| 518 | XPC_DEACTIVATE_PARTITION(part, ret); |
| 519 | |
| 520 | dev_dbg(xpc_chan, "failed to pull openclose args from " |
| 521 | "partition %d, ret=%d\n", XPC_PARTID(part), |
| 522 | ret); |
| 523 | |
| 524 | /* don't bother processing IPIs anymore */ |
| 525 | IPI_amo = 0; |
| 526 | } |
| 527 | } |
| 528 | |
| 529 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { |
| 530 | ret = xpc_pull_remote_cachelines_sn2(part, part->remote_GPs, |
| 531 | (void *)part->remote_GPs_pa, |
| 532 | XPC_GP_SIZE); |
| 533 | if (ret != xpSuccess) { |
| 534 | XPC_DEACTIVATE_PARTITION(part, ret); |
| 535 | |
| 536 | dev_dbg(xpc_chan, "failed to pull GPs from partition " |
| 537 | "%d, ret=%d\n", XPC_PARTID(part), ret); |
| 538 | |
| 539 | /* don't bother processing IPIs anymore */ |
| 540 | IPI_amo = 0; |
| 541 | } |
| 542 | } |
| 543 | |
| 544 | return IPI_amo; |
| 545 | } |
| 546 | |
| 547 | static struct xpc_msg * |
| 548 | xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) |
| 549 | { |
| 550 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
| 551 | struct xpc_msg *remote_msg, *msg; |
| 552 | u32 msg_index, nmsgs; |
| 553 | u64 msg_offset; |
| 554 | enum xp_retval ret; |
| 555 | |
| 556 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { |
| 557 | /* we were interrupted by a signal */ |
| 558 | return NULL; |
| 559 | } |
| 560 | |
| 561 | while (get >= ch->next_msg_to_pull) { |
| 562 | |
| 563 | /* pull as many messages as are ready and able to be pulled */ |
| 564 | |
| 565 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; |
| 566 | |
| 567 | DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); |
| 568 | nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; |
| 569 | if (msg_index + nmsgs > ch->remote_nentries) { |
| 570 | /* ignore the ones that wrap the msg queue for now */ |
| 571 | nmsgs = ch->remote_nentries - msg_index; |
| 572 | } |
| 573 | |
| 574 | msg_offset = msg_index * ch->msg_size; |
| 575 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); |
| 576 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + |
| 577 | msg_offset); |
| 578 | |
| 579 | ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg, |
| 580 | nmsgs * ch->msg_size); |
| 581 | if (ret != xpSuccess) { |
| 582 | |
| 583 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" |
| 584 | " msg %ld from partition %d, channel=%d, " |
| 585 | "ret=%d\n", nmsgs, ch->next_msg_to_pull, |
| 586 | ch->partid, ch->number, ret); |
| 587 | |
| 588 | XPC_DEACTIVATE_PARTITION(part, ret); |
| 589 | |
| 590 | mutex_unlock(&ch->msg_to_pull_mutex); |
| 591 | return NULL; |
| 592 | } |
| 593 | |
| 594 | ch->next_msg_to_pull += nmsgs; |
| 595 | } |
| 596 | |
| 597 | mutex_unlock(&ch->msg_to_pull_mutex); |
| 598 | |
| 599 | /* return the message we were looking for */ |
| 600 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; |
| 601 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); |
| 602 | |
| 603 | return msg; |
| 604 | } |
| 605 | |
| 606 | /* |
| 607 | * Get a message to be delivered. |
| 608 | */ |
| 609 | static struct xpc_msg * |
| 610 | xpc_get_deliverable_msg_sn2(struct xpc_channel *ch) |
| 611 | { |
| 612 | struct xpc_msg *msg = NULL; |
| 613 | s64 get; |
| 614 | |
| 615 | do { |
| 616 | if (ch->flags & XPC_C_DISCONNECTING) |
| 617 | break; |
| 618 | |
| 619 | get = ch->w_local_GP.get; |
| 620 | rmb(); /* guarantee that .get loads before .put */ |
| 621 | if (get == ch->w_remote_GP.put) |
| 622 | break; |
| 623 | |
| 624 | /* There are messages waiting to be pulled and delivered. |
| 625 | * We need to try to secure one for ourselves. We'll do this |
| 626 | * by trying to increment w_local_GP.get and hope that no one |
| 627 | * else beats us to it. If they do, we'll we'll simply have |
| 628 | * to try again for the next one. |
| 629 | */ |
| 630 | |
| 631 | if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { |
| 632 | /* we got the entry referenced by get */ |
| 633 | |
| 634 | dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " |
| 635 | "partid=%d, channel=%d\n", get + 1, |
| 636 | ch->partid, ch->number); |
| 637 | |
| 638 | /* pull the message from the remote partition */ |
| 639 | |
| 640 | msg = xpc_pull_remote_msg_sn2(ch, get); |
| 641 | |
| 642 | DBUG_ON(msg != NULL && msg->number != get); |
| 643 | DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); |
| 644 | DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); |
| 645 | |
| 646 | break; |
| 647 | } |
| 648 | |
| 649 | } while (1); |
| 650 | |
| 651 | return msg; |
| 652 | } |
| 653 | |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 654 | void |
| 655 | xpc_init_sn2(void) |
| 656 | { |
| 657 | xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; |
Dean Nelson | e17d416 | 2008-07-29 22:34:06 -0700 | [diff] [blame^] | 658 | xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; |
| 659 | xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; |
| 660 | xpc_make_first_contact = xpc_make_first_contact_sn2; |
| 661 | xpc_get_IPI_flags = xpc_get_IPI_flags_sn2; |
| 662 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; |
Dean Nelson | 94bd270 | 2008-07-29 22:34:05 -0700 | [diff] [blame] | 663 | } |
| 664 | |
| 665 | void |
| 666 | xpc_exit_sn2(void) |
| 667 | { |
| 668 | } |