blob: a3c3a5bcd126a2e8c4a58ea7475caf0e3657d4c5 [file] [log] [blame]
Adrian Bunkb00dc832008-05-19 16:52:27 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
David S. Miller4fe3ebe2008-07-17 22:11:32 -07005 * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
S.Çağlar Onurcbc9fc52008-02-17 23:24:10 -080010#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <asm/asi.h>
16#include <asm/ptrace.h>
17#include <asm/pstate.h>
18#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/uaccess.h>
20#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/bitops.h>
David S. Miller121dd5f2009-12-11 01:07:53 -080022#include <linux/perf_event.h>
Akinobu Mitac7ec2b52010-02-28 03:31:29 -080023#include <linux/ratelimit.h>
David S. Millerd600cbe2011-08-01 19:41:12 -070024#include <linux/bitops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/fpumacro.h>
David Howellsd550bbd2012-03-28 18:30:03 +010026#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028enum direction {
29 load, /* ld, ldd, ldh, ldsh */
30 store, /* st, std, sth, stsh */
31 both, /* Swap, ldstub, cas, ... */
32 fpld,
33 fpst,
34 invalid,
35};
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037static inline enum direction decode_direction(unsigned int insn)
38{
39 unsigned long tmp = (insn >> 21) & 1;
40
41 if (!tmp)
42 return load;
43 else {
44 switch ((insn>>19)&0xf) {
45 case 15: /* swap* */
46 return both;
47 default:
48 return store;
49 }
50 }
51}
52
53/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
David S. Millerbaa06772010-04-19 13:46:48 -070054static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
56 unsigned int tmp;
57
58 tmp = ((insn >> 19) & 0xf);
59 if (tmp == 11 || tmp == 14) /* ldx/stx */
60 return 8;
61 tmp &= 3;
62 if (!tmp)
63 return 4;
64 else if (tmp == 3)
65 return 16; /* ldd/std - Although it is actually 8 */
66 else if (tmp == 2)
67 return 2;
68 else {
69 printk("Impossible unaligned trap. insn=%08x\n", insn);
David S. Millerbaa06772010-04-19 13:46:48 -070070 die_if_kernel("Byte sized unaligned access?!?!", regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72 /* GCC should never warn that control reaches the end
73 * of this function without returning a value because
74 * die_if_kernel() is marked with attribute 'noreturn'.
75 * Alas, some versions do...
76 */
77
78 return 0;
79 }
80}
81
82static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
83{
84 if (insn & 0x800000) {
85 if (insn & 0x2000)
86 return (unsigned char)(regs->tstate >> 24); /* %asi */
87 else
88 return (unsigned char)(insn >> 5); /* imm_asi */
89 } else
90 return ASI_P;
91}
92
93/* 0x400000 = signed, 0 = unsigned */
94static inline int decode_signedness(unsigned int insn)
95{
96 return (insn & 0x400000);
97}
98
99static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
100 unsigned int rd, int from_kernel)
101{
102 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
103 if (from_kernel != 0)
104 __asm__ __volatile__("flushw");
105 else
106 flushw_user();
107 }
108}
109
110static inline long sign_extend_imm13(long imm)
111{
112 return imm << 51 >> 51;
113}
114
115static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
116{
117 unsigned long value;
118
119 if (reg < 16)
120 return (!reg ? 0 : regs->u_regs[reg]);
121 if (regs->tstate & TSTATE_PRIV) {
122 struct reg_window *win;
123 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
124 value = win->locals[reg - 16];
125 } else if (test_thread_flag(TIF_32BIT)) {
126 struct reg_window32 __user *win32;
127 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
128 get_user(value, &win32->locals[reg - 16]);
129 } else {
130 struct reg_window __user *win;
131 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
132 get_user(value, &win->locals[reg - 16]);
133 }
134 return value;
135}
136
137static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
138{
139 if (reg < 16)
140 return &regs->u_regs[reg];
141 if (regs->tstate & TSTATE_PRIV) {
142 struct reg_window *win;
143 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
144 return &win->locals[reg - 16];
145 } else if (test_thread_flag(TIF_32BIT)) {
146 struct reg_window32 *win32;
147 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
148 return (unsigned long *)&win32->locals[reg - 16];
149 } else {
150 struct reg_window *win;
151 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
152 return &win->locals[reg - 16];
153 }
154}
155
156unsigned long compute_effective_address(struct pt_regs *regs,
157 unsigned int insn, unsigned int rd)
158{
David S. Millerd2671e62014-04-28 23:50:08 -0700159 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 unsigned int rs1 = (insn >> 14) & 0x1f;
161 unsigned int rs2 = insn & 0x1f;
David S. Millerd2671e62014-04-28 23:50:08 -0700162 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 if (insn & 0x2000) {
165 maybe_flush_windows(rs1, 0, rd, from_kernel);
David S. Millerd2671e62014-04-28 23:50:08 -0700166 addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 } else {
168 maybe_flush_windows(rs1, rs2, rd, from_kernel);
David S. Millerd2671e62014-04-28 23:50:08 -0700169 addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 }
David S. Millerd2671e62014-04-28 23:50:08 -0700171
172 if (!from_kernel && test_thread_flag(TIF_32BIT))
173 addr &= 0xffffffff;
174
175 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
178/* This is just to make gcc think die_if_kernel does return... */
Adrian Bunk3ff6eec2008-01-24 22:16:20 +0100179static void __used unaligned_panic(char *str, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
181 die_if_kernel(str, regs);
182}
183
David S. Miller5fd29752005-09-28 20:41:45 -0700184extern int do_int_load(unsigned long *dest_reg, int size,
185 unsigned long *saddr, int is_signed, int asi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
David S. Miller5fd29752005-09-28 20:41:45 -0700187extern int __do_int_store(unsigned long *dst_addr, int size,
188 unsigned long src_val, int asi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
David S. Miller5fd29752005-09-28 20:41:45 -0700190static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
191 struct pt_regs *regs, int asi, int orig_asi)
David S. Millera3f99852005-08-19 15:55:33 -0700192{
193 unsigned long zero = 0;
David S. Millerff171d82005-09-19 19:56:06 -0700194 unsigned long *src_val_p = &zero;
195 unsigned long src_val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
David S. Millera3f99852005-08-19 15:55:33 -0700197 if (size == 16) {
198 size = 8;
199 zero = (((long)(reg_num ?
200 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
201 (unsigned)fetch_reg(reg_num + 1, regs);
202 } else if (reg_num) {
David S. Millerff171d82005-09-19 19:56:06 -0700203 src_val_p = fetch_reg_addr(reg_num, regs);
204 }
205 src_val = *src_val_p;
206 if (unlikely(asi != orig_asi)) {
207 switch (size) {
208 case 2:
209 src_val = swab16(src_val);
210 break;
211 case 4:
212 src_val = swab32(src_val);
213 break;
214 case 8:
215 src_val = swab64(src_val);
216 break;
217 case 16:
218 default:
219 BUG();
220 break;
Joe Perches6cb79b32011-06-03 14:45:23 +0000221 }
David S. Millera3f99852005-08-19 15:55:33 -0700222 }
David S. Miller5fd29752005-09-28 20:41:45 -0700223 return __do_int_store(dst_addr, size, src_val, asi);
David S. Millera3f99852005-08-19 15:55:33 -0700224}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226static inline void advance(struct pt_regs *regs)
227{
228 regs->tpc = regs->tnpc;
229 regs->tnpc += 4;
230 if (test_thread_flag(TIF_32BIT)) {
231 regs->tpc &= 0xffffffff;
232 regs->tnpc &= 0xffffffff;
233 }
234}
235
236static inline int floating_point_load_or_store_p(unsigned int insn)
237{
238 return (insn >> 24) & 1;
239}
240
241static inline int ok_for_kernel(unsigned int insn)
242{
243 return !floating_point_load_or_store_p(insn);
244}
245
David S. Millerc449c382006-11-28 20:18:05 -0800246static void kernel_mna_trap_fault(int fixup_tstate_asi)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
David S. Millera3f99852005-08-19 15:55:33 -0700248 struct pt_regs *regs = current_thread_info()->kern_una_regs;
249 unsigned int insn = current_thread_info()->kern_una_insn;
David S. Miller8cf14af2005-09-28 20:21:11 -0700250 const struct exception_table_entry *entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Miller8cf14af2005-09-28 20:21:11 -0700252 entry = search_exception_tables(regs->tpc);
253 if (!entry) {
David S. Millera3f99852005-08-19 15:55:33 -0700254 unsigned long address;
255
256 address = compute_effective_address(regs, insn,
257 ((insn >> 25) & 0x1f));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 if (address < PAGE_SIZE) {
David S. Millera3f99852005-08-19 15:55:33 -0700259 printk(KERN_ALERT "Unable to handle kernel NULL "
260 "pointer dereference in mna handler");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 } else
David S. Millera3f99852005-08-19 15:55:33 -0700262 printk(KERN_ALERT "Unable to handle kernel paging "
263 "request in mna handler");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 printk(KERN_ALERT " at virtual address %016lx\n",address);
David S. Millera3f99852005-08-19 15:55:33 -0700265 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 (current->mm ? CTX_HWBITS(current->mm->context) :
267 CTX_HWBITS(current->active_mm->context)));
David S. Millera3f99852005-08-19 15:55:33 -0700268 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 (current->mm ? (unsigned long) current->mm->pgd :
270 (unsigned long) current->active_mm->pgd));
271 die_if_kernel("Oops", regs);
272 /* Not reached */
273 }
David S. Miller8cf14af2005-09-28 20:21:11 -0700274 regs->tpc = entry->fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 regs->tnpc = regs->tpc + 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
David S. Millerc449c382006-11-28 20:18:05 -0800277 if (fixup_tstate_asi) {
278 regs->tstate &= ~TSTATE_ASI;
279 regs->tstate |= (ASI_AIUS << 24UL);
280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}
282
David S. Millerc449c382006-11-28 20:18:05 -0800283static void log_unaligned(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284{
Akinobu Mitac7ec2b52010-02-28 03:31:29 -0800285 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
David S. Millera3f99852005-08-19 15:55:33 -0700286
Akinobu Mitac7ec2b52010-02-28 03:31:29 -0800287 if (__ratelimit(&ratelimit)) {
David S. Miller4fe3ebe2008-07-17 22:11:32 -0700288 printk("Kernel unaligned access at TPC[%lx] %pS\n",
289 regs->tpc, (void *) regs->tpc);
David S. Miller27cc64c2006-06-21 22:31:08 -0700290 }
David S. Millerc449c382006-11-28 20:18:05 -0800291}
292
293asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
294{
295 enum direction dir = decode_direction(insn);
David S. Millerbaa06772010-04-19 13:46:48 -0700296 int size = decode_access_size(regs, insn);
David S. Millerc449c382006-11-28 20:18:05 -0800297 int orig_asi, asi;
298
299 current_thread_info()->kern_una_regs = regs;
300 current_thread_info()->kern_una_insn = insn;
301
302 orig_asi = asi = decode_asi(insn, regs);
303
304 /* If this is a {get,put}_user() on an unaligned userspace pointer,
305 * just signal a fault and do not log the event.
306 */
307 if (asi == ASI_AIUS) {
308 kernel_mna_trap_fault(0);
309 return;
310 }
311
312 log_unaligned(regs);
David S. Miller27cc64c2006-06-21 22:31:08 -0700313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (!ok_for_kernel(insn) || dir == both) {
David S. Millera3f99852005-08-19 15:55:33 -0700315 printk("Unsupported unaligned load/store trap for kernel "
316 "at <%016lx>.\n", regs->tpc);
317 unaligned_panic("Kernel does fpu/atomic "
318 "unaligned load/store.", regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
David S. Millerc449c382006-11-28 20:18:05 -0800320 kernel_mna_trap_fault(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 } else {
David S. Miller705747a2005-09-28 16:48:40 -0700322 unsigned long addr, *reg_addr;
David S. Millerc449c382006-11-28 20:18:05 -0800323 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
David S. Millera3f99852005-08-19 15:55:33 -0700325 addr = compute_effective_address(regs, insn,
326 ((insn >> 25) & 0x1f));
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200327 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
David S. Millerff171d82005-09-19 19:56:06 -0700328 switch (asi) {
329 case ASI_NL:
330 case ASI_AIUPL:
331 case ASI_AIUSL:
332 case ASI_PL:
333 case ASI_SL:
334 case ASI_PNFL:
335 case ASI_SNFL:
336 asi &= ~0x08;
337 break;
Joe Perches6cb79b32011-06-03 14:45:23 +0000338 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 switch (dir) {
340 case load:
David S. Miller705747a2005-09-28 16:48:40 -0700341 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
David S. Miller5fd29752005-09-28 20:41:45 -0700342 err = do_int_load(reg_addr, size,
343 (unsigned long *) addr,
344 decode_signedness(insn), asi);
345 if (likely(!err) && unlikely(asi != orig_asi)) {
David S. Miller705747a2005-09-28 16:48:40 -0700346 unsigned long val_in = *reg_addr;
David S. Millerff171d82005-09-19 19:56:06 -0700347 switch (size) {
348 case 2:
349 val_in = swab16(val_in);
350 break;
351 case 4:
352 val_in = swab32(val_in);
353 break;
354 case 8:
355 val_in = swab64(val_in);
356 break;
357 case 16:
358 default:
359 BUG();
360 break;
Joe Perches6cb79b32011-06-03 14:45:23 +0000361 }
David S. Miller705747a2005-09-28 16:48:40 -0700362 *reg_addr = val_in;
David S. Millerff171d82005-09-19 19:56:06 -0700363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 break;
365
366 case store:
David S. Miller5fd29752005-09-28 20:41:45 -0700367 err = do_int_store(((insn>>25)&0x1f), size,
368 (unsigned long *) addr, regs,
369 asi, orig_asi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 break;
David S. Millera3f99852005-08-19 15:55:33 -0700371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 default:
373 panic("Impossible kernel unaligned trap.");
374 /* Not reached... */
375 }
David S. Miller5fd29752005-09-28 20:41:45 -0700376 if (unlikely(err))
David S. Millerc449c382006-11-28 20:18:05 -0800377 kernel_mna_trap_fault(1);
David S. Miller5fd29752005-09-28 20:41:45 -0700378 else
379 advance(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 }
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383int handle_popc(u32 insn, struct pt_regs *regs)
384{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
David S. Millerd600cbe2011-08-01 19:41:12 -0700386 int ret, rd = ((insn >> 25) & 0x1f);
387 u64 value;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200389 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 if (insn & 0x2000) {
391 maybe_flush_windows(0, 0, rd, from_kernel);
392 value = sign_extend_imm13(insn);
393 } else {
394 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
395 value = fetch_reg(insn & 0x1f, regs);
396 }
David S. Millerd600cbe2011-08-01 19:41:12 -0700397 ret = hweight64(value);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 if (rd < 16) {
399 if (rd)
400 regs->u_regs[rd] = ret;
401 } else {
402 if (test_thread_flag(TIF_32BIT)) {
403 struct reg_window32 __user *win32;
404 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
405 put_user(ret, &win32->locals[rd - 16]);
406 } else {
407 struct reg_window __user *win;
408 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
409 put_user(ret, &win->locals[rd - 16]);
410 }
411 }
412 advance(regs);
413 return 1;
414}
415
416extern void do_fpother(struct pt_regs *regs);
417extern void do_privact(struct pt_regs *regs);
David S. Miller6c52a962005-08-29 12:45:11 -0700418extern void spitfire_data_access_exception(struct pt_regs *regs,
419 unsigned long sfsr,
420 unsigned long sfar);
David S. Millered6b0b42006-02-09 20:20:34 -0800421extern void sun4v_data_access_exception(struct pt_regs *regs,
422 unsigned long addr,
423 unsigned long type_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425int handle_ldf_stq(u32 insn, struct pt_regs *regs)
426{
427 unsigned long addr = compute_effective_address(regs, insn, 0);
428 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
429 struct fpustate *f = FPUSTATE;
430 int asi = decode_asi(insn, regs);
431 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
432
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200433 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
David S. Miller121dd5f2009-12-11 01:07:53 -0800434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 save_and_clear_fpu();
436 current_thread_info()->xfsr[0] &= ~0x1c000;
437 if (freg & 3) {
438 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
439 do_fpother(regs);
440 return 0;
441 }
442 if (insn & 0x200000) {
443 /* STQ */
444 u64 first = 0, second = 0;
445
446 if (current_thread_info()->fpsaved[0] & flag) {
447 first = *(u64 *)&f->regs[freg];
448 second = *(u64 *)&f->regs[freg+2];
449 }
450 if (asi < 0x80) {
451 do_privact(regs);
452 return 1;
453 }
454 switch (asi) {
455 case ASI_P:
456 case ASI_S: break;
457 case ASI_PL:
458 case ASI_SL:
459 {
460 /* Need to convert endians */
461 u64 tmp = __swab64p(&first);
462
463 first = __swab64p(&second);
464 second = tmp;
465 break;
466 }
467 default:
David S. Millered6b0b42006-02-09 20:20:34 -0800468 if (tlb_type == hypervisor)
469 sun4v_data_access_exception(regs, addr, 0);
470 else
471 spitfire_data_access_exception(regs, 0, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 return 1;
473 }
474 if (put_user (first >> 32, (u32 __user *)addr) ||
475 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
476 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
477 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
David S. Millered6b0b42006-02-09 20:20:34 -0800478 if (tlb_type == hypervisor)
479 sun4v_data_access_exception(regs, addr, 0);
480 else
481 spitfire_data_access_exception(regs, 0, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 return 1;
483 }
484 } else {
485 /* LDF, LDDF, LDQF */
486 u32 data[4] __attribute__ ((aligned(8)));
487 int size, i;
488 int err;
489
490 if (asi < 0x80) {
491 do_privact(regs);
492 return 1;
493 } else if (asi > ASI_SNFL) {
David S. Millered6b0b42006-02-09 20:20:34 -0800494 if (tlb_type == hypervisor)
495 sun4v_data_access_exception(regs, addr, 0);
496 else
497 spitfire_data_access_exception(regs, 0, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 return 1;
499 }
500 switch (insn & 0x180000) {
501 case 0x000000: size = 1; break;
502 case 0x100000: size = 4; break;
503 default: size = 2; break;
504 }
505 for (i = 0; i < size; i++)
506 data[i] = 0;
507
508 err = get_user (data[0], (u32 __user *) addr);
509 if (!err) {
510 for (i = 1; i < size; i++)
511 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
512 }
513 if (err && !(asi & 0x2 /* NF */)) {
David S. Millered6b0b42006-02-09 20:20:34 -0800514 if (tlb_type == hypervisor)
515 sun4v_data_access_exception(regs, addr, 0);
516 else
517 spitfire_data_access_exception(regs, 0, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 return 1;
519 }
520 if (asi & 0x8) /* Little */ {
521 u64 tmp;
522
523 switch (size) {
524 case 1: data[0] = le32_to_cpup(data + 0); break;
525 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
526 break;
527 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
528 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
529 *(u64 *)(data + 2) = tmp;
530 break;
531 }
532 }
533 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
534 current_thread_info()->fpsaved[0] = FPRS_FEF;
535 current_thread_info()->gsr[0] = 0;
536 }
537 if (!(current_thread_info()->fpsaved[0] & flag)) {
538 if (freg < 32)
539 memset(f->regs, 0, 32*sizeof(u32));
540 else
541 memset(f->regs+32, 0, 32*sizeof(u32));
542 }
543 memcpy(f->regs + freg, data, size * 4);
544 current_thread_info()->fpsaved[0] |= flag;
545 }
546 advance(regs);
547 return 1;
548}
549
550void handle_ld_nf(u32 insn, struct pt_regs *regs)
551{
552 int rd = ((insn >> 25) & 0x1f);
553 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
554 unsigned long *reg;
555
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200556 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
David S. Miller121dd5f2009-12-11 01:07:53 -0800557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 maybe_flush_windows(0, 0, rd, from_kernel);
559 reg = fetch_reg_addr(rd, regs);
560 if (from_kernel || rd < 16) {
561 reg[0] = 0;
562 if ((insn & 0x780000) == 0x180000)
563 reg[1] = 0;
564 } else if (test_thread_flag(TIF_32BIT)) {
565 put_user(0, (int __user *) reg);
566 if ((insn & 0x780000) == 0x180000)
567 put_user(0, ((int __user *) reg) + 1);
568 } else {
569 put_user(0, (unsigned long __user *) reg);
570 if ((insn & 0x780000) == 0x180000)
571 put_user(0, (unsigned long __user *) reg + 1);
572 }
573 advance(regs);
574}
575
576void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
577{
578 unsigned long pc = regs->tpc;
579 unsigned long tstate = regs->tstate;
580 u32 insn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 u64 value;
David S. Millered6b0b42006-02-09 20:20:34 -0800582 u8 freg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 int flag;
584 struct fpustate *f = FPUSTATE;
585
586 if (tstate & TSTATE_PRIV)
587 die_if_kernel("lddfmna from kernel", regs);
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200588 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if (test_thread_flag(TIF_32BIT))
590 pc = (u32)pc;
591 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
David S. Millered6b0b42006-02-09 20:20:34 -0800592 int asi = decode_asi(insn, regs);
David S. Millerb41418f2009-01-08 16:52:36 -0800593 u32 first, second;
David S. Miller18b8e082009-01-07 17:15:57 -0800594 int err;
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if ((asi > ASI_SNFL) ||
597 (asi < ASI_P))
598 goto daex;
David S. Millerb41418f2009-01-08 16:52:36 -0800599 first = second = 0;
David S. Miller18b8e082009-01-07 17:15:57 -0800600 err = get_user(first, (u32 __user *)sfar);
601 if (!err)
602 err = get_user(second, (u32 __user *)(sfar + 4));
603 if (err) {
David S. Millerb41418f2009-01-08 16:52:36 -0800604 if (!(asi & 0x2))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 goto daex;
David S. Millerb41418f2009-01-08 16:52:36 -0800606 first = second = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608 save_and_clear_fpu();
609 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
610 value = (((u64)first) << 32) | second;
611 if (asi & 0x8) /* Little */
612 value = __swab64p(&value);
613 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
614 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
615 current_thread_info()->fpsaved[0] = FPRS_FEF;
616 current_thread_info()->gsr[0] = 0;
617 }
618 if (!(current_thread_info()->fpsaved[0] & flag)) {
619 if (freg < 32)
620 memset(f->regs, 0, 32*sizeof(u32));
621 else
622 memset(f->regs+32, 0, 32*sizeof(u32));
623 }
624 *(u64 *)(f->regs + freg) = value;
625 current_thread_info()->fpsaved[0] |= flag;
626 } else {
David S. Millered6b0b42006-02-09 20:20:34 -0800627daex:
628 if (tlb_type == hypervisor)
629 sun4v_data_access_exception(regs, sfar, sfsr);
630 else
631 spitfire_data_access_exception(regs, sfsr, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 return;
633 }
634 advance(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
637void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
638{
639 unsigned long pc = regs->tpc;
640 unsigned long tstate = regs->tstate;
641 u32 insn;
642 u64 value;
David S. Millered6b0b42006-02-09 20:20:34 -0800643 u8 freg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 int flag;
645 struct fpustate *f = FPUSTATE;
646
647 if (tstate & TSTATE_PRIV)
648 die_if_kernel("stdfmna from kernel", regs);
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200649 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (test_thread_flag(TIF_32BIT))
651 pc = (u32)pc;
652 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
David S. Millered6b0b42006-02-09 20:20:34 -0800653 int asi = decode_asi(insn, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 value = 0;
656 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
657 if ((asi > ASI_SNFL) ||
658 (asi < ASI_P))
659 goto daex;
660 save_and_clear_fpu();
661 if (current_thread_info()->fpsaved[0] & flag)
662 value = *(u64 *)&f->regs[freg];
663 switch (asi) {
664 case ASI_P:
665 case ASI_S: break;
666 case ASI_PL:
667 case ASI_SL:
668 value = __swab64p(&value); break;
669 default: goto daex;
670 }
671 if (put_user (value >> 32, (u32 __user *) sfar) ||
672 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
673 goto daex;
674 } else {
David S. Millered6b0b42006-02-09 20:20:34 -0800675daex:
676 if (tlb_type == hypervisor)
677 sun4v_data_access_exception(regs, sfar, sfsr);
678 else
679 spitfire_data_access_exception(regs, sfsr, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 return;
681 }
682 advance(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}