blob: ade0925eab0e4b5e3b8c7b41e97e34c78adfe525 [file] [log] [blame]
Jack Steiner9a0deec2008-07-29 22:33:58 -07001/*
2 * SN Platform GRU Driver
3 *
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
5 *
Jack Steiner8820f272009-06-17 16:28:36 -07006 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
Jack Steiner9a0deec2008-07-29 22:33:58 -07007 *
Jack Steiner8820f272009-06-17 16:28:36 -07008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Jack Steiner9a0deec2008-07-29 22:33:58 -070021 */
22
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/device.h>
29#include <linux/list.h>
Jack Steiner7f2251b2009-12-15 16:48:08 -080030#include <linux/err.h>
Jack Steiner9a0deec2008-07-29 22:33:58 -070031#include <asm/uv/uv_hub.h>
32#include "gru.h"
33#include "grutables.h"
34#include "gruhandles.h"
35
Jack Steiner9ca8e40c12008-07-29 22:34:02 -070036unsigned long gru_options __read_mostly;
Jack Steiner9a0deec2008-07-29 22:33:58 -070037
38static struct device_driver gru_driver = {
39 .name = "gru"
40};
41
42static struct device gru_device = {
Kay Sieversbb0dc432009-01-06 10:44:37 -080043 .init_name = "",
Jack Steiner9a0deec2008-07-29 22:33:58 -070044 .driver = &gru_driver,
45};
46
47struct device *grudev = &gru_device;
48
49/*
50 * Select a gru fault map to be used by the current cpu. Note that
51 * multiple cpus may be using the same map.
Jack Steiner9a0deec2008-07-29 22:33:58 -070052 * ZZZ should be inline but did not work on emulator
53 */
54int gru_cpu_fault_map_id(void)
55{
Jack Steiner4107e1d2009-12-15 16:48:11 -080056 int cpu = smp_processor_id();
57 int id, core;
58
59 core = uv_cpu_core_number(cpu);
60 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
61 return id;
Jack Steiner9a0deec2008-07-29 22:33:58 -070062}
63
64/*--------- ASID Management -------------------------------------------
65 *
66 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
67 * Once MAX is reached, flush the TLB & start over. However,
68 * some asids may still be in use. There won't be many (percentage wise) still
69 * in use. Search active contexts & determine the value of the first
70 * asid in use ("x"s below). Set "limit" to this value.
71 * This defines a block of assignable asids.
72 *
73 * When "limit" is reached, search forward from limit+1 and determine the
74 * next block of assignable asids.
75 *
76 * Repeat until MAX_ASID is reached, then start over again.
77 *
78 * Each time MAX_ASID is reached, increment the asid generation. Since
79 * the search for in-use asids only checks contexts with GRUs currently
80 * assigned, asids in some contexts will be missed. Prior to loading
81 * a context, the asid generation of the GTS asid is rechecked. If it
82 * doesn't match the current generation, a new asid will be assigned.
83 *
84 * 0---------------x------------x---------------------x----|
85 * ^-next ^-limit ^-MAX_ASID
86 *
87 * All asid manipulation & context loading/unloading is protected by the
88 * gs_lock.
89 */
90
91/* Hit the asid limit. Start over */
92static int gru_wrap_asid(struct gru_state *gru)
93{
Jack Steiner43884602009-04-02 16:59:05 -070094 gru_dbg(grudev, "gid %d\n", gru->gs_gid);
Jack Steiner9a0deec2008-07-29 22:33:58 -070095 STAT(asid_wrap);
96 gru->gs_asid_gen++;
Jack Steiner9a0deec2008-07-29 22:33:58 -070097 return MIN_ASID;
98}
99
100/* Find the next chunk of unused asids */
101static int gru_reset_asid_limit(struct gru_state *gru, int asid)
102{
103 int i, gid, inuse_asid, limit;
104
Jack Steiner43884602009-04-02 16:59:05 -0700105 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700106 STAT(asid_next);
107 limit = MAX_ASID;
108 if (asid >= limit)
109 asid = gru_wrap_asid(gru);
Jack Steiner87419412009-04-02 16:59:08 -0700110 gru_flush_all_tlb(gru);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700111 gid = gru->gs_gid;
112again:
113 for (i = 0; i < GRU_NUM_CCH; i++) {
Jack Steiner836ce672009-06-17 16:28:22 -0700114 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700115 continue;
116 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
Jack Steiner43884602009-04-02 16:59:05 -0700117 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
118 gru->gs_gid, gru->gs_gts[i], gru->gs_gts[i]->ts_gms,
119 inuse_asid, i);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700120 if (inuse_asid == asid) {
121 asid += ASID_INC;
122 if (asid >= limit) {
123 /*
124 * empty range: reset the range limit and
125 * start over
126 */
127 limit = MAX_ASID;
128 if (asid >= MAX_ASID)
129 asid = gru_wrap_asid(gru);
130 goto again;
131 }
132 }
133
134 if ((inuse_asid > asid) && (inuse_asid < limit))
135 limit = inuse_asid;
136 }
137 gru->gs_asid_limit = limit;
138 gru->gs_asid = asid;
Jack Steiner43884602009-04-02 16:59:05 -0700139 gru_dbg(grudev, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru->gs_gid,
140 asid, limit);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700141 return asid;
142}
143
144/* Assign a new ASID to a thread context. */
145static int gru_assign_asid(struct gru_state *gru)
146{
147 int asid;
148
Jack Steiner9a0deec2008-07-29 22:33:58 -0700149 gru->gs_asid += ASID_INC;
150 asid = gru->gs_asid;
151 if (asid >= gru->gs_asid_limit)
152 asid = gru_reset_asid_limit(gru, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700153
Jack Steiner43884602009-04-02 16:59:05 -0700154 gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700155 return asid;
156}
157
158/*
159 * Clear n bits in a word. Return a word indicating the bits that were cleared.
160 * Optionally, build an array of chars that contain the bit numbers allocated.
161 */
162static unsigned long reserve_resources(unsigned long *p, int n, int mmax,
163 char *idx)
164{
165 unsigned long bits = 0;
166 int i;
167
Jack Steiner3eac2e92009-06-17 16:28:23 -0700168 while (n--) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700169 i = find_first_bit(p, mmax);
170 if (i == mmax)
171 BUG();
172 __clear_bit(i, p);
173 __set_bit(i, &bits);
174 if (idx)
175 *idx++ = i;
Jack Steiner3eac2e92009-06-17 16:28:23 -0700176 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700177 return bits;
178}
179
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700180unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700181 char *cbmap)
182{
183 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU,
184 cbmap);
185}
186
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700187unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700188 char *dsmap)
189{
190 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU,
191 dsmap);
192}
193
194static void reserve_gru_resources(struct gru_state *gru,
195 struct gru_thread_state *gts)
196{
197 gru->gs_active_contexts++;
198 gts->ts_cbr_map =
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700199 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count,
Jack Steiner9a0deec2008-07-29 22:33:58 -0700200 gts->ts_cbr_idx);
201 gts->ts_dsr_map =
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700202 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700203}
204
205static void free_gru_resources(struct gru_state *gru,
206 struct gru_thread_state *gts)
207{
208 gru->gs_active_contexts--;
209 gru->gs_cbr_map |= gts->ts_cbr_map;
210 gru->gs_dsr_map |= gts->ts_dsr_map;
211}
212
213/*
214 * Check if a GRU has sufficient free resources to satisfy an allocation
215 * request. Note: GRU locks may or may not be held when this is called. If
216 * not held, recheck after acquiring the appropriate locks.
217 *
218 * Returns 1 if sufficient resources, 0 if not
219 */
220static int check_gru_resources(struct gru_state *gru, int cbr_au_count,
221 int dsr_au_count, int max_active_contexts)
222{
223 return hweight64(gru->gs_cbr_map) >= cbr_au_count
224 && hweight64(gru->gs_dsr_map) >= dsr_au_count
225 && gru->gs_active_contexts < max_active_contexts;
226}
227
228/*
229 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
230 * context.
231 */
Jack Steiner43884602009-04-02 16:59:05 -0700232static int gru_load_mm_tracker(struct gru_state *gru,
233 struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700234{
Jack Steiner43884602009-04-02 16:59:05 -0700235 struct gru_mm_struct *gms = gts->ts_gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700236 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid];
Jack Steiner43884602009-04-02 16:59:05 -0700237 unsigned short ctxbitmap = (1 << gts->ts_ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700238 int asid;
239
240 spin_lock(&gms->ms_asid_lock);
241 asid = asids->mt_asid;
242
Jack Steiner87419412009-04-02 16:59:08 -0700243 spin_lock(&gru->gs_asid_lock);
244 if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
245 gru->gs_asid_gen)) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700246 asid = gru_assign_asid(gru);
247 asids->mt_asid = asid;
248 asids->mt_asid_gen = gru->gs_asid_gen;
249 STAT(asid_new);
250 } else {
251 STAT(asid_reuse);
252 }
Jack Steiner87419412009-04-02 16:59:08 -0700253 spin_unlock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700254
255 BUG_ON(asids->mt_ctxbitmap & ctxbitmap);
256 asids->mt_ctxbitmap |= ctxbitmap;
257 if (!test_bit(gru->gs_gid, gms->ms_asidmap))
258 __set_bit(gru->gs_gid, gms->ms_asidmap);
259 spin_unlock(&gms->ms_asid_lock);
260
261 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700262 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
263 gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
264 gms->ms_asidmap[0]);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700265 return asid;
266}
267
268static void gru_unload_mm_tracker(struct gru_state *gru,
Jack Steiner43884602009-04-02 16:59:05 -0700269 struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700270{
Jack Steiner43884602009-04-02 16:59:05 -0700271 struct gru_mm_struct *gms = gts->ts_gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700272 struct gru_mm_tracker *asids;
273 unsigned short ctxbitmap;
274
275 asids = &gms->ms_asids[gru->gs_gid];
Jack Steiner43884602009-04-02 16:59:05 -0700276 ctxbitmap = (1 << gts->ts_ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700277 spin_lock(&gms->ms_asid_lock);
Jack Steiner87419412009-04-02 16:59:08 -0700278 spin_lock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700279 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
280 asids->mt_ctxbitmap ^= ctxbitmap;
Jack Steiner43884602009-04-02 16:59:05 -0700281 gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
282 gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
Jack Steiner87419412009-04-02 16:59:08 -0700283 spin_unlock(&gru->gs_asid_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700284 spin_unlock(&gms->ms_asid_lock);
285}
286
287/*
288 * Decrement the reference count on a GTS structure. Free the structure
289 * if the reference count goes to zero.
290 */
291void gts_drop(struct gru_thread_state *gts)
292{
293 if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
Jack Steiner7f2251b2009-12-15 16:48:08 -0800294 if (gts->ts_gms)
295 gru_drop_mmu_notifier(gts->ts_gms);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700296 kfree(gts);
297 STAT(gts_free);
298 }
299}
300
301/*
302 * Locate the GTS structure for the current thread.
303 */
304static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
305 *vdata, int tsid)
306{
307 struct gru_thread_state *gts;
308
309 list_for_each_entry(gts, &vdata->vd_head, ts_next)
310 if (gts->ts_tsid == tsid)
311 return gts;
312 return NULL;
313}
314
315/*
316 * Allocate a thread state structure.
317 */
Jack Steiner364b76d2009-06-17 16:28:20 -0700318struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
Jack Steinerc5502222009-12-15 16:48:13 -0800319 int cbr_au_count, int dsr_au_count,
320 unsigned char tlb_preload_count, int options, int tsid)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700321{
322 struct gru_thread_state *gts;
Jack Steiner7f2251b2009-12-15 16:48:08 -0800323 struct gru_mm_struct *gms;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700324 int bytes;
325
Jack Steiner364b76d2009-06-17 16:28:20 -0700326 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700327 bytes += sizeof(struct gru_thread_state);
Jack Steiner940229b2009-06-17 16:28:24 -0700328 gts = kmalloc(bytes, GFP_KERNEL);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700329 if (!gts)
Jack Steiner7f2251b2009-12-15 16:48:08 -0800330 return ERR_PTR(-ENOMEM);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700331
332 STAT(gts_alloc);
Jack Steiner940229b2009-06-17 16:28:24 -0700333 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
Jack Steiner9a0deec2008-07-29 22:33:58 -0700334 atomic_set(&gts->ts_refcnt, 1);
335 mutex_init(&gts->ts_ctxlock);
Jack Steiner364b76d2009-06-17 16:28:20 -0700336 gts->ts_cbr_au_count = cbr_au_count;
337 gts->ts_dsr_au_count = dsr_au_count;
Jack Steinerc5502222009-12-15 16:48:13 -0800338 gts->ts_tlb_preload_count = tlb_preload_count;
Jack Steiner364b76d2009-06-17 16:28:20 -0700339 gts->ts_user_options = options;
Jack Steiner518e5cd2009-12-15 16:48:04 -0800340 gts->ts_user_blade_id = -1;
341 gts->ts_user_chiplet_id = -1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700342 gts->ts_tsid = tsid;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700343 gts->ts_ctxnum = NULLCTX;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700344 gts->ts_tlb_int_select = -1;
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700345 gts->ts_cch_req_slice = -1;
Jack Steiner7b8274e2009-04-02 16:59:12 -0700346 gts->ts_sizeavail = GRU_SIZEAVAIL(PAGE_SHIFT);
Jack Steiner364b76d2009-06-17 16:28:20 -0700347 if (vma) {
348 gts->ts_mm = current->mm;
349 gts->ts_vma = vma;
Jack Steiner7f2251b2009-12-15 16:48:08 -0800350 gms = gru_register_mmu_notifier();
351 if (IS_ERR(gms))
Jack Steiner364b76d2009-06-17 16:28:20 -0700352 goto err;
Jack Steiner7f2251b2009-12-15 16:48:08 -0800353 gts->ts_gms = gms;
Jack Steiner364b76d2009-06-17 16:28:20 -0700354 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700355
Jack Steiner364b76d2009-06-17 16:28:20 -0700356 gru_dbg(grudev, "alloc gts %p\n", gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700357 return gts;
358
359err:
360 gts_drop(gts);
Jack Steiner7f2251b2009-12-15 16:48:08 -0800361 return ERR_CAST(gms);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700362}
363
364/*
365 * Allocate a vma private data structure.
366 */
367struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
368{
369 struct gru_vma_data *vdata = NULL;
370
371 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL);
372 if (!vdata)
373 return NULL;
374
Jack Steiner563447d2009-12-15 16:48:12 -0800375 STAT(vdata_alloc);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700376 INIT_LIST_HEAD(&vdata->vd_head);
377 spin_lock_init(&vdata->vd_lock);
378 gru_dbg(grudev, "alloc vdata %p\n", vdata);
379 return vdata;
380}
381
382/*
383 * Find the thread state structure for the current thread.
384 */
385struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma,
386 int tsid)
387{
388 struct gru_vma_data *vdata = vma->vm_private_data;
389 struct gru_thread_state *gts;
390
391 spin_lock(&vdata->vd_lock);
392 gts = gru_find_current_gts_nolock(vdata, tsid);
393 spin_unlock(&vdata->vd_lock);
394 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
395 return gts;
396}
397
398/*
399 * Allocate a new thread state for a GSEG. Note that races may allow
400 * another thread to race to create a gts.
401 */
402struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
403 int tsid)
404{
405 struct gru_vma_data *vdata = vma->vm_private_data;
406 struct gru_thread_state *gts, *ngts;
407
Jack Steinerc5502222009-12-15 16:48:13 -0800408 gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
409 vdata->vd_dsr_au_count,
410 vdata->vd_tlb_preload_count,
Jack Steiner364b76d2009-06-17 16:28:20 -0700411 vdata->vd_user_options, tsid);
Jack Steiner7f2251b2009-12-15 16:48:08 -0800412 if (IS_ERR(gts))
413 return gts;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700414
415 spin_lock(&vdata->vd_lock);
416 ngts = gru_find_current_gts_nolock(vdata, tsid);
417 if (ngts) {
418 gts_drop(gts);
419 gts = ngts;
420 STAT(gts_double_allocate);
421 } else {
422 list_add(&gts->ts_next, &vdata->vd_head);
423 }
424 spin_unlock(&vdata->vd_lock);
425 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts);
426 return gts;
427}
428
429/*
430 * Free the GRU context assigned to the thread state.
431 */
432static void gru_free_gru_context(struct gru_thread_state *gts)
433{
434 struct gru_state *gru;
435
436 gru = gts->ts_gru;
Jack Steiner43884602009-04-02 16:59:05 -0700437 gru_dbg(grudev, "gts %p, gid %d\n", gts, gru->gs_gid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700438
439 spin_lock(&gru->gs_lock);
440 gru->gs_gts[gts->ts_ctxnum] = NULL;
441 free_gru_resources(gru, gts);
442 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0);
443 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map);
444 gts->ts_ctxnum = NULLCTX;
445 gts->ts_gru = NULL;
Jack Steiner87419412009-04-02 16:59:08 -0700446 gts->ts_blade = -1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700447 spin_unlock(&gru->gs_lock);
448
449 gts_drop(gts);
450 STAT(free_context);
451}
452
453/*
454 * Prefetching cachelines help hardware performance.
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700455 * (Strictly a performance enhancement. Not functionally required).
Jack Steiner9a0deec2008-07-29 22:33:58 -0700456 */
457static void prefetch_data(void *p, int num, int stride)
458{
459 while (num-- > 0) {
460 prefetchw(p);
461 p += stride;
462 }
463}
464
465static inline long gru_copy_handle(void *d, void *s)
466{
467 memcpy(d, s, GRU_HANDLE_BYTES);
468 return GRU_HANDLE_BYTES;
469}
470
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700471static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
472 unsigned long cbrmap, unsigned long length)
Jack Steiner923f7f62008-10-15 22:05:13 -0700473{
474 int i, scr;
475
476 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES,
477 GRU_CACHE_LINE_BYTES);
478
479 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
480 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES);
481 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1,
482 GRU_CACHE_LINE_BYTES);
483 cb += GRU_HANDLE_STRIDE;
484 }
485}
486
Jack Steiner9a0deec2008-07-29 22:33:58 -0700487static void gru_load_context_data(void *save, void *grubase, int ctxnum,
Jack Steiner940229b2009-06-17 16:28:24 -0700488 unsigned long cbrmap, unsigned long dsrmap,
489 int data_valid)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700490{
491 void *gseg, *cb, *cbe;
492 unsigned long length;
493 int i, scr;
494
495 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700496 cb = gseg + GRU_CB_BASE;
497 cbe = grubase + GRU_CBE_BASE;
Jack Steiner923f7f62008-10-15 22:05:13 -0700498 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
499 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700500
Jack Steiner9a0deec2008-07-29 22:33:58 -0700501 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
Jack Steiner940229b2009-06-17 16:28:24 -0700502 if (data_valid) {
503 save += gru_copy_handle(cb, save);
504 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
505 save);
506 } else {
507 memset(cb, 0, GRU_CACHE_LINE_BYTES);
508 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
509 GRU_CACHE_LINE_BYTES);
510 }
Jack Steiner67bf04a2009-12-15 16:48:11 -0800511 /* Flush CBE to hide race in context restart */
512 mb();
513 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700514 cb += GRU_HANDLE_STRIDE;
515 }
516
Jack Steiner940229b2009-06-17 16:28:24 -0700517 if (data_valid)
518 memcpy(gseg + GRU_DS_BASE, save, length);
519 else
520 memset(gseg + GRU_DS_BASE, 0, length);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700521}
522
523static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
524 unsigned long cbrmap, unsigned long dsrmap)
525{
526 void *gseg, *cb, *cbe;
527 unsigned long length;
528 int i, scr;
529
530 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700531 cb = gseg + GRU_CB_BASE;
532 cbe = grubase + GRU_CBE_BASE;
Jack Steiner923f7f62008-10-15 22:05:13 -0700533 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
Jack Steiner67bf04a2009-12-15 16:48:11 -0800534
535 /* CBEs may not be coherent. Flush them from cache */
536 for_each_cbr_in_allocation_map(i, &cbrmap, scr)
537 gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
538 mb(); /* Let the CL flush complete */
539
Jack Steiner923f7f62008-10-15 22:05:13 -0700540 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
541
Jack Steiner9a0deec2008-07-29 22:33:58 -0700542 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
543 save += gru_copy_handle(save, cb);
544 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE);
545 cb += GRU_HANDLE_STRIDE;
546 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700547 memcpy(save, gseg + GRU_DS_BASE, length);
548}
549
550void gru_unload_context(struct gru_thread_state *gts, int savestate)
551{
552 struct gru_state *gru = gts->ts_gru;
553 struct gru_context_configuration_handle *cch;
554 int ctxnum = gts->ts_ctxnum;
555
Jack Steiner836ce672009-06-17 16:28:22 -0700556 if (!is_kernel_context(gts))
557 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700558 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
559
Jack Steiner563447d2009-12-15 16:48:12 -0800560 gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
561 gts, gts->ts_cbr_map, gts->ts_dsr_map);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700562 lock_cch_handle(cch);
563 if (cch_interrupt_sync(cch))
564 BUG();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700565
Jack Steiner836ce672009-06-17 16:28:22 -0700566 if (!is_kernel_context(gts))
567 gru_unload_mm_tracker(gru, gts);
Jack Steiner940229b2009-06-17 16:28:24 -0700568 if (savestate) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700569 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
570 ctxnum, gts->ts_cbr_map,
571 gts->ts_dsr_map);
Jack Steiner940229b2009-06-17 16:28:24 -0700572 gts->ts_data_valid = 1;
573 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700574
575 if (cch_deallocate(cch))
576 BUG();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700577 unlock_cch_handle(cch);
578
579 gru_free_gru_context(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700580}
581
582/*
583 * Load a GRU context by copying it from the thread data structure in memory
584 * to the GRU.
585 */
Jack Steinerd57c82b2009-06-17 16:28:20 -0700586void gru_load_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700587{
588 struct gru_state *gru = gts->ts_gru;
589 struct gru_context_configuration_handle *cch;
Jack Steiner6e910072009-06-17 16:28:21 -0700590 int i, err, asid, ctxnum = gts->ts_ctxnum;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700591
Jack Steiner9a0deec2008-07-29 22:33:58 -0700592 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700593 lock_cch_handle(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700594 cch->tfm_fault_bit_enable =
595 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
596 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
597 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
598 if (cch->tlb_int_enable) {
599 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
600 cch->tlb_int_select = gts->ts_tlb_int_select;
601 }
Jack Steinerb1b19fc2009-06-17 16:28:33 -0700602 if (gts->ts_cch_req_slice >= 0) {
603 cch->req_slice_set_enable = 1;
604 cch->req_slice = gts->ts_cch_req_slice;
605 } else {
606 cch->req_slice_set_enable =0;
607 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700608 cch->tfm_done_bit_enable = 0;
Jack Steiner6e910072009-06-17 16:28:21 -0700609 cch->dsr_allocation_map = gts->ts_dsr_map;
610 cch->cbr_allocation_map = gts->ts_cbr_map;
Jack Steiner836ce672009-06-17 16:28:22 -0700611
612 if (is_kernel_context(gts)) {
613 cch->unmap_enable = 1;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700614 cch->tfm_done_bit_enable = 1;
615 cch->cb_int_enable = 1;
Jack Steiner4107e1d2009-12-15 16:48:11 -0800616 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
Jack Steiner836ce672009-06-17 16:28:22 -0700617 } else {
618 cch->unmap_enable = 0;
Jack Steiner4a7a17c2009-06-17 16:28:25 -0700619 cch->tfm_done_bit_enable = 0;
620 cch->cb_int_enable = 0;
Jack Steiner836ce672009-06-17 16:28:22 -0700621 asid = gru_load_mm_tracker(gru, gts);
622 for (i = 0; i < 8; i++) {
623 cch->asid[i] = asid + i;
624 cch->sizeavail[i] = gts->ts_sizeavail;
625 }
Jack Steiner6e910072009-06-17 16:28:21 -0700626 }
627
628 err = cch_allocate(cch);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700629 if (err) {
630 gru_dbg(grudev,
631 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
632 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map);
633 BUG();
634 }
635
636 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
Jack Steiner940229b2009-06-17 16:28:24 -0700637 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700638
639 if (cch_start(cch))
640 BUG();
641 unlock_cch_handle(cch);
Jack Steiner563447d2009-12-15 16:48:12 -0800642
643 gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
644 gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
645 (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700646}
647
648/*
649 * Update fields in an active CCH:
650 * - retarget interrupts on local blade
Jack Steiner7b8274e2009-04-02 16:59:12 -0700651 * - update sizeavail mask
Jack Steiner9a0deec2008-07-29 22:33:58 -0700652 */
Jack Steiner99f7c222009-12-15 16:48:06 -0800653int gru_update_cch(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700654{
655 struct gru_context_configuration_handle *cch;
656 struct gru_state *gru = gts->ts_gru;
657 int i, ctxnum = gts->ts_ctxnum, ret = 0;
658
659 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
660
661 lock_cch_handle(cch);
662 if (cch->state == CCHSTATE_ACTIVE) {
663 if (gru->gs_gts[gts->ts_ctxnum] != gts)
664 goto exit;
665 if (cch_interrupt(cch))
666 BUG();
Jack Steiner99f7c222009-12-15 16:48:06 -0800667 for (i = 0; i < 8; i++)
668 cch->sizeavail[i] = gts->ts_sizeavail;
669 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
670 cch->tlb_int_select = gru_cpu_fault_map_id();
671 cch->tfm_fault_bit_enable =
672 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
673 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700674 if (cch_start(cch))
675 BUG();
676 ret = 1;
677 }
678exit:
679 unlock_cch_handle(cch);
680 return ret;
681}
682
683/*
684 * Update CCH tlb interrupt select. Required when all the following is true:
685 * - task's GRU context is loaded into a GRU
686 * - task is using interrupt notification for TLB faults
687 * - task has migrated to a different cpu on the same blade where
688 * it was previously running.
689 */
690static int gru_retarget_intr(struct gru_thread_state *gts)
691{
692 if (gts->ts_tlb_int_select < 0
693 || gts->ts_tlb_int_select == gru_cpu_fault_map_id())
694 return 0;
695
696 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
697 gru_cpu_fault_map_id());
Jack Steiner99f7c222009-12-15 16:48:06 -0800698 return gru_update_cch(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700699}
700
Jack Steiner55484c42009-12-15 16:48:05 -0800701/*
702 * Unload the gru context if it is not assigned to the correct blade or
703 * chiplet. Misassignment can occur if the process migrates to a different
704 * blade or if the user changes the selected blade/chiplet.
705 * Return 0 if context correct placed, otherwise 1
706 */
707void gru_check_context_placement(struct gru_thread_state *gts)
708{
709 struct gru_state *gru;
710 int blade_id, chiplet_id;
711
712 /*
713 * If the current task is the context owner, verify that the
714 * context is correctly placed. This test is skipped for non-owner
715 * references. Pthread apps use non-owner references to the CBRs.
716 */
717 gru = gts->ts_gru;
718 if (!gru || gts->ts_tgid_owner != current->tgid)
719 return;
720
721 blade_id = gts->ts_user_blade_id;
722 if (blade_id < 0)
723 blade_id = uv_numa_blade_id();
724
725 chiplet_id = gts->ts_user_chiplet_id;
726 if (gru->gs_blade_id != blade_id ||
727 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) {
728 STAT(check_context_unload);
729 gru_unload_context(gts, 1);
730 } else if (gru_retarget_intr(gts)) {
731 STAT(check_context_retarget_intr);
732 }
733}
734
Jack Steiner9a0deec2008-07-29 22:33:58 -0700735
736/*
737 * Insufficient GRU resources available on the local blade. Steal a context from
738 * a process. This is a hack until a _real_ resource scheduler is written....
739 */
740#define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
741#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
742 ((g)+1) : &(b)->bs_grus[0])
743
Jack Steiner836ce672009-06-17 16:28:22 -0700744static int is_gts_stealable(struct gru_thread_state *gts,
745 struct gru_blade_state *bs)
746{
747 if (is_kernel_context(gts))
748 return down_write_trylock(&bs->bs_kgts_sema);
749 else
750 return mutex_trylock(&gts->ts_ctxlock);
751}
752
753static void gts_stolen(struct gru_thread_state *gts,
754 struct gru_blade_state *bs)
755{
756 if (is_kernel_context(gts)) {
757 up_write(&bs->bs_kgts_sema);
758 STAT(steal_kernel_context);
759 } else {
760 mutex_unlock(&gts->ts_ctxlock);
761 STAT(steal_user_context);
762 }
763}
764
Jack Steiner55484c42009-12-15 16:48:05 -0800765void gru_steal_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700766{
767 struct gru_blade_state *blade;
768 struct gru_state *gru, *gru0;
769 struct gru_thread_state *ngts = NULL;
770 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
Jack Steiner55484c42009-12-15 16:48:05 -0800771 int blade_id = gts->ts_user_blade_id;
772 int chiplet_id = gts->ts_user_chiplet_id;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700773
Jack Steiner55484c42009-12-15 16:48:05 -0800774 if (blade_id < 0)
775 blade_id = uv_numa_blade_id();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700776 cbr = gts->ts_cbr_au_count;
777 dsr = gts->ts_dsr_au_count;
778
Jack Steiner364b76d2009-06-17 16:28:20 -0700779 blade = gru_base[blade_id];
Jack Steiner9a0deec2008-07-29 22:33:58 -0700780 spin_lock(&blade->bs_lock);
781
782 ctxnum = next_ctxnum(blade->bs_lru_ctxnum);
783 gru = blade->bs_lru_gru;
784 if (ctxnum == 0)
785 gru = next_gru(blade, gru);
Jack Steiner55484c42009-12-15 16:48:05 -0800786 blade->bs_lru_gru = gru;
787 blade->bs_lru_ctxnum = ctxnum;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700788 ctxnum0 = ctxnum;
789 gru0 = gru;
790 while (1) {
Jack Steiner55484c42009-12-15 16:48:05 -0800791 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) {
792 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700793 break;
Jack Steiner55484c42009-12-15 16:48:05 -0800794 spin_lock(&gru->gs_lock);
795 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
796 if (flag && gru == gru0 && ctxnum == ctxnum0)
797 break;
798 ngts = gru->gs_gts[ctxnum];
799 /*
800 * We are grabbing locks out of order, so trylock is
801 * needed. GTSs are usually not locked, so the odds of
802 * success are high. If trylock fails, try to steal a
803 * different GSEG.
804 */
805 if (ngts && is_gts_stealable(ngts, blade))
806 break;
807 ngts = NULL;
808 }
809 spin_unlock(&gru->gs_lock);
810 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
Jack Steiner9a0deec2008-07-29 22:33:58 -0700811 break;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700812 }
Jack Steiner55484c42009-12-15 16:48:05 -0800813 if (flag && gru == gru0)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700814 break;
Jack Steiner55484c42009-12-15 16:48:05 -0800815 flag = 1;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700816 ctxnum = 0;
817 gru = next_gru(blade, gru);
818 }
Jack Steiner9a0deec2008-07-29 22:33:58 -0700819 spin_unlock(&blade->bs_lock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700820
821 if (ngts) {
Jack Steiner7e796a72009-06-17 16:28:30 -0700822 gts->ustats.context_stolen++;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700823 ngts->ts_steal_jiffies = jiffies;
Jack Steiner836ce672009-06-17 16:28:22 -0700824 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
825 gts_stolen(ngts, blade);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700826 } else {
827 STAT(steal_context_failed);
828 }
829 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700830 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
Jack Steiner9a0deec2008-07-29 22:33:58 -0700831 " avail cb %ld, ds %ld\n",
832 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map),
833 hweight64(gru->gs_dsr_map));
834}
835
836/*
Jack Steiner55484c42009-12-15 16:48:05 -0800837 * Assign a gru context.
838 */
839static int gru_assign_context_number(struct gru_state *gru)
840{
841 int ctxnum;
842
843 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
844 __set_bit(ctxnum, &gru->gs_context_map);
845 return ctxnum;
846}
847
848/*
Jack Steiner9a0deec2008-07-29 22:33:58 -0700849 * Scan the GRUs on the local blade & assign a GRU context.
850 */
Jack Steiner55484c42009-12-15 16:48:05 -0800851struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
Jack Steiner9a0deec2008-07-29 22:33:58 -0700852{
853 struct gru_state *gru, *grux;
854 int i, max_active_contexts;
Jack Steiner55484c42009-12-15 16:48:05 -0800855 int blade_id = gts->ts_user_blade_id;
856 int chiplet_id = gts->ts_user_chiplet_id;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700857
Jack Steiner55484c42009-12-15 16:48:05 -0800858 if (blade_id < 0)
859 blade_id = uv_numa_blade_id();
Jack Steiner9a0deec2008-07-29 22:33:58 -0700860again:
861 gru = NULL;
862 max_active_contexts = GRU_NUM_CCH;
Jack Steiner55484c42009-12-15 16:48:05 -0800863 for_each_gru_on_blade(grux, blade_id, i) {
864 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id)
865 continue;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700866 if (check_gru_resources(grux, gts->ts_cbr_au_count,
867 gts->ts_dsr_au_count,
868 max_active_contexts)) {
869 gru = grux;
870 max_active_contexts = grux->gs_active_contexts;
871 if (max_active_contexts == 0)
872 break;
873 }
874 }
875
876 if (gru) {
877 spin_lock(&gru->gs_lock);
878 if (!check_gru_resources(gru, gts->ts_cbr_au_count,
879 gts->ts_dsr_au_count, GRU_NUM_CCH)) {
880 spin_unlock(&gru->gs_lock);
881 goto again;
882 }
883 reserve_gru_resources(gru, gts);
884 gts->ts_gru = gru;
Jack Steiner87419412009-04-02 16:59:08 -0700885 gts->ts_blade = gru->gs_blade_id;
Jack Steiner55484c42009-12-15 16:48:05 -0800886 gts->ts_ctxnum = gru_assign_context_number(gru);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700887 atomic_inc(&gts->ts_refcnt);
888 gru->gs_gts[gts->ts_ctxnum] = gts;
Jack Steiner9a0deec2008-07-29 22:33:58 -0700889 spin_unlock(&gru->gs_lock);
890
891 STAT(assign_context);
892 gru_dbg(grudev,
Jack Steiner43884602009-04-02 16:59:05 -0700893 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
Jack Steiner9a0deec2008-07-29 22:33:58 -0700894 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts,
895 gts->ts_gru->gs_gid, gts->ts_ctxnum,
896 gts->ts_cbr_au_count, gts->ts_dsr_au_count);
897 } else {
898 gru_dbg(grudev, "failed to allocate a GTS %s\n", "");
899 STAT(assign_context_failed);
900 }
901
Jack Steiner9a0deec2008-07-29 22:33:58 -0700902 return gru;
903}
904
905/*
906 * gru_nopage
907 *
908 * Map the user's GRU segment
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700909 *
910 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
Jack Steiner9a0deec2008-07-29 22:33:58 -0700911 */
912int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
913{
914 struct gru_thread_state *gts;
915 unsigned long paddr, vaddr;
916
917 vaddr = (unsigned long)vmf->virtual_address;
918 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
919 vma, vaddr, GSEG_BASE(vaddr));
920 STAT(nopfn);
921
Jack Steiner9ca8e40c12008-07-29 22:34:02 -0700922 /* The following check ensures vaddr is a valid address in the VMA */
Jack Steiner9a0deec2008-07-29 22:33:58 -0700923 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
924 if (!gts)
925 return VM_FAULT_SIGBUS;
926
927again:
Jack Steiner9a0deec2008-07-29 22:33:58 -0700928 mutex_lock(&gts->ts_ctxlock);
Jack Steinerfe5bb6b2009-04-02 16:59:04 -0700929 preempt_disable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700930
Jack Steiner55484c42009-12-15 16:48:05 -0800931 gru_check_context_placement(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700932
933 if (!gts->ts_gru) {
Jack Steiner836ce672009-06-17 16:28:22 -0700934 STAT(load_user_context);
Jack Steiner55484c42009-12-15 16:48:05 -0800935 if (!gru_assign_gru_context(gts)) {
Jack Steiner9a0deec2008-07-29 22:33:58 -0700936 preempt_enable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700937 mutex_unlock(&gts->ts_ctxlock);
938 set_current_state(TASK_INTERRUPTIBLE);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700939 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
940 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
Jack Steiner55484c42009-12-15 16:48:05 -0800941 gru_steal_context(gts);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700942 goto again;
943 }
944 gru_load_context(gts);
945 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum);
946 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1),
947 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE,
948 vma->vm_page_prot);
949 }
950
Jack Steiner9a0deec2008-07-29 22:33:58 -0700951 preempt_enable();
Jack Steiner364b76d2009-06-17 16:28:20 -0700952 mutex_unlock(&gts->ts_ctxlock);
Jack Steiner9a0deec2008-07-29 22:33:58 -0700953
954 return VM_FAULT_NOPAGE;
955}
956