blob: 41530193e6d6d896a260585d65bbab65fdb7990d [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
David S. Millera3cf5e62008-08-03 00:01:05 -07003 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
David S. Miller74bf4312006-01-31 18:29:18 -08004 */
5
6#include <linux/kernel.h>
David S. Millera3cf5e62008-08-03 00:01:05 -07007#include <linux/preempt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
David S. Miller74bf4312006-01-31 18:29:18 -08009#include <asm/page.h>
David S. Miller98c55842006-01-31 18:31:20 -080010#include <asm/pgtable.h>
David S. Millerbf6f8412013-04-19 17:26:26 -040011#include <asm/mmu_context.h>
David S. Millerbd407912006-01-31 18:31:38 -080012#include <asm/tsb.h>
David S. Millerbf6f8412013-04-19 17:26:26 -040013#include <asm/tlb.h>
David S. Miller9b4006d2006-03-18 18:12:42 -080014#include <asm/oplib.h>
David S. Miller74bf4312006-01-31 18:29:18 -080015
David S. Miller74bf4312006-01-31 18:29:18 -080016extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17
David S. Millerdcc1e8d2006-03-22 00:49:59 -080018static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080019{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080020 vaddr >>= hash_shift;
David S. Miller98c55842006-01-31 18:31:20 -080021 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080022}
23
David S. Miller8b234272006-02-17 18:01:02 -080024static inline int tag_compare(unsigned long tag, unsigned long vaddr)
David S. Miller74bf4312006-01-31 18:29:18 -080025{
David S. Miller8b234272006-02-17 18:01:02 -080026 return (tag == (vaddr >> 22));
David S. Miller74bf4312006-01-31 18:29:18 -080027}
28
29/* TSB flushes need only occur on the processor initiating the address
30 * space modification, not on each cpu the address space has run on.
31 * Only the TLB flush needs that treatment.
32 */
33
34void flush_tsb_kernel_range(unsigned long start, unsigned long end)
35{
36 unsigned long v;
37
38 for (v = start; v < end; v += PAGE_SIZE) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -080039 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
40 KERNEL_TSB_NENTRIES);
David S. Miller98c55842006-01-31 18:31:20 -080041 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080042
David S. Miller293666b2008-11-15 13:33:25 -080043 if (tag_compare(ent->tag, v))
David S. Miller8b234272006-02-17 18:01:02 -080044 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
David S. Miller74bf4312006-01-31 18:29:18 -080045 }
46}
47
David S. Millerbf6f8412013-04-19 17:26:26 -040048static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
49 unsigned long hash_shift,
50 unsigned long nentries)
51{
52 unsigned long tag, ent, hash;
53
54 v &= ~0x1UL;
55 hash = tsb_hash(v, hash_shift, nentries);
56 ent = tsb + (hash * sizeof(struct tsb));
57 tag = (v >> 22UL);
58
59 tsb_flush(ent, tag);
60}
61
Peter Zijlstra90f08e32011-05-24 17:11:50 -070062static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
63 unsigned long tsb, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080064{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080065 unsigned long i;
David S. Miller74bf4312006-01-31 18:29:18 -080066
David S. Millerbf6f8412013-04-19 17:26:26 -040067 for (i = 0; i < tb->tlb_nr; i++)
68 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080069}
David S. Miller7a1ac522006-03-16 02:02:32 -080070
Peter Zijlstra90f08e32011-05-24 17:11:50 -070071void flush_tsb_user(struct tlb_batch *tb)
David S. Millerdcc1e8d2006-03-22 00:49:59 -080072{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070073 struct mm_struct *mm = tb->mm;
David S. Millerdcc1e8d2006-03-22 00:49:59 -080074 unsigned long nentries, base, flags;
75
76 spin_lock_irqsave(&mm->context.lock, flags);
77
78 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
79 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
80 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
81 base = __pa(base);
Peter Zijlstra90f08e32011-05-24 17:11:50 -070082 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080083
84#ifdef CONFIG_HUGETLB_PAGE
85 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
86 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
87 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
88 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
89 base = __pa(base);
Peter Zijlstra90f08e32011-05-24 17:11:50 -070090 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
David S. Millerdcc1e8d2006-03-22 00:49:59 -080091 }
92#endif
David S. Miller7a1ac522006-03-16 02:02:32 -080093 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Miller74bf4312006-01-31 18:29:18 -080094}
David S. Miller09f94282006-01-31 18:31:06 -080095
David S. Millerbf6f8412013-04-19 17:26:26 -040096void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
97{
98 unsigned long nentries, base, flags;
99
100 spin_lock_irqsave(&mm->context.lock, flags);
101
102 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
103 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
104 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
105 base = __pa(base);
106 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
107
108#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
109 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
110 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
112 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
113 base = __pa(base);
114 __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
115 }
116#endif
117 spin_unlock_irqrestore(&mm->context.lock, flags);
118}
119
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800120#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
121#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
122#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
123#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
124#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
125#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800126#else
127#error Broken base page size setting...
128#endif
129
130#ifdef CONFIG_HUGETLB_PAGE
131#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
132#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
133#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
134#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
135#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
136#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
137#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
138#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
139#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
140#else
141#error Broken huge page size setting...
142#endif
143#endif
144
145static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
David S. Miller98c55842006-01-31 18:31:20 -0800146{
147 unsigned long tsb_reg, base, tsb_paddr;
148 unsigned long page_sz, tte;
149
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800150 mm->context.tsb_block[tsb_idx].tsb_nentries =
151 tsb_bytes / sizeof(struct tsb);
David S. Miller98c55842006-01-31 18:31:20 -0800152
David S. Millerfdd88012014-05-07 14:07:32 -0700153 switch (tsb_idx) {
154 case MM_TSB_BASE:
155 base = TSBMAP_8K_BASE;
156 break;
157#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
158 case MM_TSB_HUGE:
159 base = TSBMAP_4M_BASE;
160 break;
161#endif
162 default:
163 BUG();
164 }
165
David S. Millerc4bce902006-02-11 21:57:54 -0800166 tte = pgprot_val(PAGE_KERNEL_LOCKED);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800167 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
David S. Miller517af332006-02-01 15:55:21 -0800168 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
David S. Miller98c55842006-01-31 18:31:20 -0800169
170 /* Use the smallest page size that can map the whole TSB
171 * in one TLB entry.
172 */
173 switch (tsb_bytes) {
174 case 8192 << 0:
175 tsb_reg = 0x0UL;
176#ifdef DCACHE_ALIASING_POSSIBLE
177 base += (tsb_paddr & 8192);
178#endif
David S. Miller98c55842006-01-31 18:31:20 -0800179 page_sz = 8192;
180 break;
181
182 case 8192 << 1:
183 tsb_reg = 0x1UL;
David S. Miller98c55842006-01-31 18:31:20 -0800184 page_sz = 64 * 1024;
185 break;
186
187 case 8192 << 2:
188 tsb_reg = 0x2UL;
David S. Miller98c55842006-01-31 18:31:20 -0800189 page_sz = 64 * 1024;
190 break;
191
192 case 8192 << 3:
193 tsb_reg = 0x3UL;
David S. Miller98c55842006-01-31 18:31:20 -0800194 page_sz = 64 * 1024;
195 break;
196
197 case 8192 << 4:
198 tsb_reg = 0x4UL;
David S. Miller98c55842006-01-31 18:31:20 -0800199 page_sz = 512 * 1024;
200 break;
201
202 case 8192 << 5:
203 tsb_reg = 0x5UL;
David S. Miller98c55842006-01-31 18:31:20 -0800204 page_sz = 512 * 1024;
205 break;
206
207 case 8192 << 6:
208 tsb_reg = 0x6UL;
David S. Miller98c55842006-01-31 18:31:20 -0800209 page_sz = 512 * 1024;
210 break;
211
212 case 8192 << 7:
213 tsb_reg = 0x7UL;
David S. Miller98c55842006-01-31 18:31:20 -0800214 page_sz = 4 * 1024 * 1024;
215 break;
David S. Millerbd407912006-01-31 18:31:38 -0800216
217 default:
David S. Miller7e5766f2007-10-29 00:36:09 -0700218 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
219 current->comm, current->pid, tsb_bytes);
220 do_exit(SIGSEGV);
Joe Perches6cb79b32011-06-03 14:45:23 +0000221 }
David S. Millerc4bce902006-02-11 21:57:54 -0800222 tte |= pte_sz_bits(page_sz);
David S. Miller98c55842006-01-31 18:31:20 -0800223
David S. Miller618e9ed2006-02-09 17:21:53 -0800224 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800225 /* Physical mapping, no locked TLB entry for TSB. */
226 tsb_reg |= tsb_paddr;
David S. Miller98c55842006-01-31 18:31:20 -0800227
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800228 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
229 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
230 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
David S. Miller517af332006-02-01 15:55:21 -0800231 } else {
232 tsb_reg |= base;
233 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
234 tte |= (tsb_paddr & ~(page_sz - 1UL));
235
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800236 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
237 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
238 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
David S. Miller517af332006-02-01 15:55:21 -0800239 }
240
David S. Miller618e9ed2006-02-09 17:21:53 -0800241 /* Setup the Hypervisor TSB descriptor. */
242 if (tlb_type == hypervisor) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800243 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
David S. Miller618e9ed2006-02-09 17:21:53 -0800244
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800245 switch (tsb_idx) {
246 case MM_TSB_BASE:
247 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
248 break;
249#ifdef CONFIG_HUGETLB_PAGE
250 case MM_TSB_HUGE:
251 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
252 break;
253#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800254 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800255 BUG();
Joe Perches6cb79b32011-06-03 14:45:23 +0000256 }
David S. Miller618e9ed2006-02-09 17:21:53 -0800257 hp->assoc = 1;
258 hp->num_ttes = tsb_bytes / 16;
259 hp->ctx_idx = 0;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800260 switch (tsb_idx) {
261 case MM_TSB_BASE:
262 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
263 break;
264#ifdef CONFIG_HUGETLB_PAGE
265 case MM_TSB_HUGE:
266 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
267 break;
268#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800269 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800270 BUG();
Joe Perches6cb79b32011-06-03 14:45:23 +0000271 }
David S. Miller618e9ed2006-02-09 17:21:53 -0800272 hp->tsb_base = tsb_paddr;
273 hp->resv = 0;
274 }
David S. Miller98c55842006-01-31 18:31:20 -0800275}
276
David S. Miller4dedbf82011-07-25 17:12:20 -0700277struct kmem_cache *pgtable_cache __read_mostly;
278
Christoph Lametere18b8902006-12-06 20:33:20 -0800279static struct kmem_cache *tsb_caches[8] __read_mostly;
David S. Miller9b4006d2006-03-18 18:12:42 -0800280
281static const char *tsb_cache_names[8] = {
282 "tsb_8KB",
283 "tsb_16KB",
284 "tsb_32KB",
285 "tsb_64KB",
286 "tsb_128KB",
287 "tsb_256KB",
288 "tsb_512KB",
289 "tsb_1MB",
290};
291
David Miller3a2cba92007-05-06 14:49:51 -0700292void __init pgtable_cache_init(void)
David S. Miller9b4006d2006-03-18 18:12:42 -0800293{
294 unsigned long i;
295
David S. Miller4dedbf82011-07-25 17:12:20 -0700296 pgtable_cache = kmem_cache_create("pgtable_cache",
297 PAGE_SIZE, PAGE_SIZE,
298 0,
299 _clear_page);
300 if (!pgtable_cache) {
301 prom_printf("pgtable_cache_init(): Could not create!\n");
302 prom_halt();
303 }
304
David S. Miller9b4006d2006-03-18 18:12:42 -0800305 for (i = 0; i < 8; i++) {
306 unsigned long size = 8192 << i;
307 const char *name = tsb_cache_names[i];
308
309 tsb_caches[i] = kmem_cache_create(name,
310 size, size,
Paul Mundt20c2df82007-07-20 10:11:58 +0900311 0, NULL);
David S. Miller9b4006d2006-03-18 18:12:42 -0800312 if (!tsb_caches[i]) {
313 prom_printf("Could not create %s cache\n", name);
314 prom_halt();
315 }
316 }
317}
318
David S. Miller08714202008-11-16 23:49:24 -0800319int sysctl_tsb_ratio = -2;
320
321static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
322{
323 unsigned long num_ents = (new_size / sizeof(struct tsb));
324
325 if (sysctl_tsb_ratio < 0)
326 return num_ents - (num_ents >> -sysctl_tsb_ratio);
327 else
328 return num_ents + (num_ents >> sysctl_tsb_ratio);
329}
330
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800331/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
332 * do_sparc64_fault() invokes this routine to try and grow it.
David S. Miller7a1ac522006-03-16 02:02:32 -0800333 *
David S. Millerbd407912006-01-31 18:31:38 -0800334 * When we reach the maximum TSB size supported, we stick ~0UL into
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800335 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
David S. Millerbd407912006-01-31 18:31:38 -0800336 * will not trigger any longer.
337 *
338 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
339 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
David S. Millerb52439c2006-03-17 23:40:47 -0800340 * must be 512K aligned. It also must be physically contiguous, so we
341 * cannot use vmalloc().
David S. Millerbd407912006-01-31 18:31:38 -0800342 *
343 * The idea here is to grow the TSB when the RSS of the process approaches
344 * the number of entries that the current TSB can hold at once. Currently,
345 * we trigger when the RSS hits 3/4 of the TSB capacity.
346 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800347void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
David S. Millerbd407912006-01-31 18:31:38 -0800348{
349 unsigned long max_tsb_size = 1 * 1024 * 1024;
David S. Miller9b4006d2006-03-18 18:12:42 -0800350 unsigned long new_size, old_size, flags;
David S. Miller7a1ac522006-03-16 02:02:32 -0800351 struct tsb *old_tsb, *new_tsb;
David S. Miller9b4006d2006-03-18 18:12:42 -0800352 unsigned long new_cache_index, old_cache_index;
353 unsigned long new_rss_limit;
David S. Millerb52439c2006-03-17 23:40:47 -0800354 gfp_t gfp_flags;
David S. Millerbd407912006-01-31 18:31:38 -0800355
356 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
357 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
358
David S. Miller9b4006d2006-03-18 18:12:42 -0800359 new_cache_index = 0;
360 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
David S. Miller08714202008-11-16 23:49:24 -0800361 new_rss_limit = tsb_size_to_rss_limit(new_size);
362 if (new_rss_limit > rss)
David S. Millerbd407912006-01-31 18:31:38 -0800363 break;
David S. Miller9b4006d2006-03-18 18:12:42 -0800364 new_cache_index++;
David S. Millerbd407912006-01-31 18:31:38 -0800365 }
366
David S. Miller9b4006d2006-03-18 18:12:42 -0800367 if (new_size == max_tsb_size)
David S. Millerb52439c2006-03-17 23:40:47 -0800368 new_rss_limit = ~0UL;
David S. Millerb52439c2006-03-17 23:40:47 -0800369
David S. Miller9b4006d2006-03-18 18:12:42 -0800370retry_tsb_alloc:
David S. Millerb52439c2006-03-17 23:40:47 -0800371 gfp_flags = GFP_KERNEL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800372 if (new_size > (PAGE_SIZE * 2))
David S. Millerb52439c2006-03-17 23:40:47 -0800373 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
374
David S. Miller1f261ef2008-03-19 04:53:58 -0700375 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
376 gfp_flags, numa_node_id());
David S. Miller9b4006d2006-03-18 18:12:42 -0800377 if (unlikely(!new_tsb)) {
David S. Millerb52439c2006-03-17 23:40:47 -0800378 /* Not being able to fork due to a high-order TSB
379 * allocation failure is very bad behavior. Just back
380 * down to a 0-order allocation and force no TSB
381 * growing for this address space.
382 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800383 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
384 new_cache_index > 0) {
David S. Miller9b4006d2006-03-18 18:12:42 -0800385 new_cache_index = 0;
386 new_size = 8192;
David S. Millerb52439c2006-03-17 23:40:47 -0800387 new_rss_limit = ~0UL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800388 goto retry_tsb_alloc;
David S. Millerb52439c2006-03-17 23:40:47 -0800389 }
390
391 /* If we failed on a TSB grow, we are under serious
392 * memory pressure so don't try to grow any more.
393 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800394 if (mm->context.tsb_block[tsb_index].tsb != NULL)
395 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
David S. Millerbd407912006-01-31 18:31:38 -0800396 return;
David S. Millerb52439c2006-03-17 23:40:47 -0800397 }
David S. Millerbd407912006-01-31 18:31:38 -0800398
David S. Miller8b234272006-02-17 18:01:02 -0800399 /* Mark all tags as invalid. */
David S. Millerbb8646d2006-03-18 23:55:11 -0800400 tsb_init(new_tsb, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800401
402 /* Ok, we are about to commit the changes. If we are
403 * growing an existing TSB the locking is very tricky,
404 * so WATCH OUT!
405 *
406 * We have to hold mm->context.lock while committing to the
407 * new TSB, this synchronizes us with processors in
408 * flush_tsb_user() and switch_mm() for this address space.
409 *
410 * But even with that lock held, processors run asynchronously
411 * accessing the old TSB via TLB miss handling. This is OK
412 * because those actions are just propagating state from the
413 * Linux page tables into the TSB, page table mappings are not
414 * being changed. If a real fault occurs, the processor will
415 * synchronize with us when it hits flush_tsb_user(), this is
416 * also true for the case where vmscan is modifying the page
417 * tables. The only thing we need to be careful with is to
418 * skip any locked TSB entries during copy_tsb().
419 *
420 * When we finish committing to the new TSB, we have to drop
421 * the lock and ask all other cpus running this address space
422 * to run tsb_context_switch() to see the new TSB table.
423 */
424 spin_lock_irqsave(&mm->context.lock, flags);
425
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800426 old_tsb = mm->context.tsb_block[tsb_index].tsb;
427 old_cache_index =
428 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
429 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
430 sizeof(struct tsb));
David S. Miller7a1ac522006-03-16 02:02:32 -0800431
David S. Miller9b4006d2006-03-18 18:12:42 -0800432
David S. Miller7a1ac522006-03-16 02:02:32 -0800433 /* Handle multiple threads trying to grow the TSB at the same time.
434 * One will get in here first, and bump the size and the RSS limit.
435 * The others will get in here next and hit this check.
436 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800437 if (unlikely(old_tsb &&
438 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800439 spin_unlock_irqrestore(&mm->context.lock, flags);
440
David S. Miller9b4006d2006-03-18 18:12:42 -0800441 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
David S. Miller7a1ac522006-03-16 02:02:32 -0800442 return;
443 }
David S. Miller8b234272006-02-17 18:01:02 -0800444
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800445 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
David S. Millerbd407912006-01-31 18:31:38 -0800446
David S. Miller7a1ac522006-03-16 02:02:32 -0800447 if (old_tsb) {
448 extern void copy_tsb(unsigned long old_tsb_base,
449 unsigned long old_tsb_size,
450 unsigned long new_tsb_base,
451 unsigned long new_tsb_size);
452 unsigned long old_tsb_base = (unsigned long) old_tsb;
453 unsigned long new_tsb_base = (unsigned long) new_tsb;
David S. Millerbd407912006-01-31 18:31:38 -0800454
David S. Miller7a1ac522006-03-16 02:02:32 -0800455 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
456 old_tsb_base = __pa(old_tsb_base);
457 new_tsb_base = __pa(new_tsb_base);
458 }
David S. Miller9b4006d2006-03-18 18:12:42 -0800459 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800460 }
David S. Millerbd407912006-01-31 18:31:38 -0800461
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800462 mm->context.tsb_block[tsb_index].tsb = new_tsb;
463 setup_tsb_params(mm, tsb_index, new_size);
David S. Millerbd407912006-01-31 18:31:38 -0800464
David S. Miller7a1ac522006-03-16 02:02:32 -0800465 spin_unlock_irqrestore(&mm->context.lock, flags);
466
David S. Millerbd407912006-01-31 18:31:38 -0800467 /* If old_tsb is NULL, we're being invoked for the first time
468 * from init_new_context().
469 */
470 if (old_tsb) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800471 /* Reload it on the local cpu. */
David S. Millerbd407912006-01-31 18:31:38 -0800472 tsb_context_switch(mm);
473
David S. Miller7a1ac522006-03-16 02:02:32 -0800474 /* Now force other processors to do the same. */
David S. Millera3cf5e62008-08-03 00:01:05 -0700475 preempt_disable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800476 smp_tsb_sync(mm);
David S. Millera3cf5e62008-08-03 00:01:05 -0700477 preempt_enable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800478
479 /* Now it is safe to free the old tsb. */
David S. Miller9b4006d2006-03-18 18:12:42 -0800480 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
David S. Millerbd407912006-01-31 18:31:38 -0800481 }
482}
483
David S. Miller09f94282006-01-31 18:31:06 -0800484int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
485{
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800486#ifdef CONFIG_HUGETLB_PAGE
487 unsigned long huge_pte_count;
488#endif
489 unsigned int i;
490
David S. Millera77754b2006-03-06 19:59:50 -0800491 spin_lock_init(&mm->context.lock);
David S. Miller09f94282006-01-31 18:31:06 -0800492
493 mm->context.sparc64_ctx_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800494
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800495#ifdef CONFIG_HUGETLB_PAGE
496 /* We reset it to zero because the fork() page copying
497 * will re-increment the counters as the parent PTEs are
498 * copied into the child address space.
499 */
500 huge_pte_count = mm->context.huge_pte_count;
501 mm->context.huge_pte_count = 0;
502#endif
503
David S. Millerbd407912006-01-31 18:31:38 -0800504 /* copy_mm() copies over the parent's mm_struct before calling
505 * us, so we need to zero out the TSB pointer or else tsb_grow()
506 * will be confused and think there is an older TSB to free up.
507 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800508 for (i = 0; i < MM_NUM_TSBS; i++)
509 mm->context.tsb_block[i].tsb = NULL;
David S. Miller7a1ac522006-03-16 02:02:32 -0800510
511 /* If this is fork, inherit the parent's TSB size. We would
512 * grow it to that size on the first page fault anyways.
513 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800514 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
David S. Millerbd407912006-01-31 18:31:38 -0800515
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800516#ifdef CONFIG_HUGETLB_PAGE
517 if (unlikely(huge_pte_count))
518 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
519#endif
520
521 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
David S. Millerbd407912006-01-31 18:31:38 -0800522 return -ENOMEM;
David S. Miller09f94282006-01-31 18:31:06 -0800523
524 return 0;
525}
526
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800527static void tsb_destroy_one(struct tsb_config *tp)
528{
529 unsigned long cache_index;
530
531 if (!tp->tsb)
532 return;
533 cache_index = tp->tsb_reg_val & 0x7UL;
534 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
535 tp->tsb = NULL;
536 tp->tsb_reg_val = 0UL;
537}
538
David S. Miller09f94282006-01-31 18:31:06 -0800539void destroy_context(struct mm_struct *mm)
540{
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800541 unsigned long flags, i;
David S. Millerbd407912006-01-31 18:31:38 -0800542
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800543 for (i = 0; i < MM_NUM_TSBS; i++)
544 tsb_destroy_one(&mm->context.tsb_block[i]);
David S. Miller09f94282006-01-31 18:31:06 -0800545
David S. Miller77b838f2006-02-23 21:40:15 -0800546 spin_lock_irqsave(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800547
548 if (CTX_VALID(mm->context)) {
549 unsigned long nr = CTX_NRBITS(mm->context);
550 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
551 }
552
David S. Miller77b838f2006-02-23 21:40:15 -0800553 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800554}