blob: d15b9946650863a0228c6ea9094b2da0d825ff3a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundt811d50c2007-11-20 17:01:55 +09002 * The SH64 TLB miss.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Original code from fault.c
5 * Copyright (C) 2000, 2001 Paolo Alberelli
6 *
7 * Fast PTE->TLB refill path
8 * Copyright (C) 2003 Richard.Curnow@superh.com
9 *
10 * IMPORTANT NOTES :
Paul Mundt811d50c2007-11-20 17:01:55 +090011 * The do_fast_page_fault function is called from a context in entry.S
12 * where very few registers have been saved. In particular, the code in
13 * this file must be compiled not to use ANY caller-save registers that
14 * are not part of the restricted save set. Also, it means that code in
15 * this file must not make calls to functions elsewhere in the kernel, or
16 * else the excepting context will see corruption in its caller-save
17 * registers. Plus, the entry.S save area is non-reentrant, so this code
18 * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
19 * on any exception.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 *
Paul Mundt811d50c2007-11-20 17:01:55 +090021 * This file is subject to the terms and conditions of the GNU General Public
22 * License. See the file "COPYING" in the main directory of this archive
23 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/tlb.h>
37#include <asm/io.h>
38#include <asm/uaccess.h>
39#include <asm/pgalloc.h>
40#include <asm/mmu_context.h>
Paul Mundtf15cbe62008-07-29 08:09:44 +090041#include <cpu/registers.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43/* Callable from fault.c, so not static */
44inline void __do_tlb_refill(unsigned long address,
45 unsigned long long is_text_not_data, pte_t *pte)
46{
47 unsigned long long ptel;
48 unsigned long long pteh=0;
49 struct tlb_info *tlbp;
50 unsigned long long next;
51
52 /* Get PTEL first */
53 ptel = pte_val(*pte);
54
55 /*
56 * Set PTEH register
57 */
Paul Mundtc7914832009-08-04 17:14:39 +090058 pteh = neff_sign_extend(address & MMU_VPN_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60 /* Set the ASID. */
61 pteh |= get_asid() << PTEH_ASID_SHIFT;
62 pteh |= PTEH_VALID;
63
64 /* Set PTEL register, set_pte has performed the sign extension */
65 ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
66
67 tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
68 next = tlbp->next;
69 __flush_tlb_slot(next);
70 asm volatile ("putcfg %0,1,%2\n\n\t"
71 "putcfg %0,0,%1\n"
72 : : "r" (next), "r" (pteh), "r" (ptel) );
73
74 next += TLB_STEP;
75 if (next > tlbp->last) next = tlbp->first;
76 tlbp->next = next;
77
78}
79
Paul Mundt811d50c2007-11-20 17:01:55 +090080static int handle_vmalloc_fault(struct mm_struct *mm,
81 unsigned long protection_flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 unsigned long long textaccess,
83 unsigned long address)
84{
85 pgd_t *dir;
Paul Mundt811d50c2007-11-20 17:01:55 +090086 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 pmd_t *pmd;
88 static pte_t *pte;
89 pte_t entry;
90
91 dir = pgd_offset_k(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Paul Mundt811d50c2007-11-20 17:01:55 +090093 pud = pud_offset(dir, address);
94 if (pud_none_or_clear_bad(pud))
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Paul Mundt811d50c2007-11-20 17:01:55 +090097 pmd = pmd_offset(pud, address);
98 if (pmd_none_or_clear_bad(pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 pte = pte_offset_kernel(pmd, address);
102 entry = *pte;
103
Paul Mundt811d50c2007-11-20 17:01:55 +0900104 if (pte_none(entry) || !pte_present(entry))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 return 0;
Paul Mundt811d50c2007-11-20 17:01:55 +0900106 if ((pte_val(entry) & protection_flags) != protection_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 __do_tlb_refill(address, textaccess, pte);
110
111 return 1;
112}
113
Paul Mundt811d50c2007-11-20 17:01:55 +0900114static int handle_tlbmiss(struct mm_struct *mm,
115 unsigned long long protection_flags,
116 unsigned long long textaccess,
117 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
119 pgd_t *dir;
Paul Mundt811d50c2007-11-20 17:01:55 +0900120 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 pmd_t *pmd;
122 pte_t *pte;
123 pte_t entry;
124
125 /* NB. The PGD currently only contains a single entry - there is no
126 page table tree stored for the top half of the address space since
127 virtual pages in that region should never be mapped in user mode.
128 (In kernel mode, the only things in that region are the 512Mb super
129 page (locked in), and vmalloc (modules) + I/O device pages (handled
130 by handle_vmalloc_fault), so no PGD for the upper half is required
131 by kernel mode either).
132
133 See how mm->pgd is allocated and initialised in pgd_alloc to see why
134 the next test is necessary. - RPC */
Paul Mundt811d50c2007-11-20 17:01:55 +0900135 if (address >= (unsigned long) TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 /* upper half - never has page table entries. */
137 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Paul Mundt811d50c2007-11-20 17:01:55 +0900139 dir = pgd_offset(mm, address);
140 if (pgd_none(*dir) || !pgd_present(*dir))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 return 0;
Paul Mundt811d50c2007-11-20 17:01:55 +0900142 if (!pgd_present(*dir))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 return 0;
Paul Mundt811d50c2007-11-20 17:01:55 +0900144
145 pud = pud_offset(dir, address);
146 if (pud_none(*pud) || !pud_present(*pud))
147 return 0;
148
149 pmd = pmd_offset(pud, address);
150 if (pmd_none(*pmd) || !pmd_present(*pmd))
151 return 0;
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 pte = pte_offset_kernel(pmd, address);
154 entry = *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Paul Mundt811d50c2007-11-20 17:01:55 +0900156 if (pte_none(entry) || !pte_present(entry))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return 0;
Paul Mundt811d50c2007-11-20 17:01:55 +0900158
159 /*
160 * If the page doesn't have sufficient protection bits set to
161 * service the kind of fault being handled, there's not much
162 * point doing the TLB refill. Punt the fault to the general
163 * handler.
164 */
165 if ((pte_val(entry) & protection_flags) != protection_flags)
166 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 __do_tlb_refill(address, textaccess, pte);
169
170 return 1;
171}
172
Paul Mundt811d50c2007-11-20 17:01:55 +0900173/*
174 * Put all this information into one structure so that everything is just
175 * arithmetic relative to a single base address. This reduces the number
176 * of movi/shori pairs needed just to load addresses of static data.
177 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178struct expevt_lookup {
179 unsigned short protection_flags[8];
180 unsigned char is_text_access[8];
181 unsigned char is_write_access[8];
182};
183
184#define PRU (1<<9)
185#define PRW (1<<8)
186#define PRX (1<<7)
187#define PRR (1<<6)
188
189#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
190#define YOUNG (_PAGE_ACCESSED)
191
192/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
193 the fault happened in user mode or privileged mode. */
194static struct expevt_lookup expevt_lookup_table = {
195 .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
196 .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
197};
198
199/*
200 This routine handles page faults that can be serviced just by refilling a
201 TLB entry from an existing page table entry. (This case represents a very
202 large majority of page faults.) Return 1 if the fault was successfully
203 handled. Return 0 if the fault could not be handled. (This leads into the
204 general fault handling in fault.c which deals with mapping file-backed
205 pages, stack growth, segmentation faults, swapping etc etc)
206 */
Paul Mundt811d50c2007-11-20 17:01:55 +0900207asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
208 unsigned long long expevt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 unsigned long address)
210{
211 struct task_struct *tsk;
212 struct mm_struct *mm;
213 unsigned long long textaccess;
214 unsigned long long protection_flags;
215 unsigned long long index;
216 unsigned long long expevt4;
217
Paul Mundt811d50c2007-11-20 17:01:55 +0900218 /* The next few lines implement a way of hashing EXPEVT into a
219 * small array index which can be used to lookup parameters
220 * specific to the type of TLBMISS being handled.
221 *
222 * Note:
223 * ITLBMISS has EXPEVT==0xa40
224 * RTLBMISS has EXPEVT==0x040
225 * WTLBMISS has EXPEVT==0x060
226 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 expevt4 = (expevt >> 4);
Paul Mundt811d50c2007-11-20 17:01:55 +0900228 /* TODO : xor ssr_md into this expression too. Then we can check
229 * that PRU is set when it needs to be. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 index = expevt4 ^ (expevt4 >> 5);
231 index &= 7;
232 protection_flags = expevt_lookup_table.protection_flags[index];
233 textaccess = expevt_lookup_table.is_text_access[index];
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 /* SIM
236 * Note this is now called with interrupts still disabled
237 * This is to cope with being called for a missing IO port
Simon Arlott0a354772007-05-14 08:25:48 +0900238 * address with interrupts disabled. This should be fixed as
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 * soon as we have a better 'fast path' miss handler.
240 *
241 * Plus take care how you try and debug this stuff.
242 * For example, writing debug data to a port which you
243 * have just faulted on is not going to work.
244 */
245
246 tsk = current;
247 mm = tsk->mm;
248
Paul Mundt28080322012-05-14 15:33:28 +0900249 if (is_vmalloc_addr((void *)address)) {
Paul Mundt811d50c2007-11-20 17:01:55 +0900250 if (ssr_md)
251 /*
252 * Process-contexts can never have this address
253 * range mapped
254 */
255 if (handle_vmalloc_fault(mm, protection_flags,
256 textaccess, address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 } else if (!in_interrupt() && mm) {
Paul Mundt811d50c2007-11-20 17:01:55 +0900259 if (handle_tlbmiss(mm, protection_flags, textaccess, address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
262
263 return 0;
264}