blob: 640895b2c51aa2b55deef87256f2bb659f103dd9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
8
Randy Dunlapa9415642006-01-11 12:17:48 -08009#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/smp_lock.h>
15#include <linux/sem.h>
16#include <linux/msg.h>
17#include <linux/shm.h>
18#include <linux/stat.h>
19#include <linux/syscalls.h>
20#include <linux/mman.h>
21#include <linux/file.h>
22#include <linux/utsname.h>
23
24#include <asm/setup.h>
25#include <asm/uaccess.h>
26#include <asm/cachectl.h>
27#include <asm/traps.h>
28#include <asm/ipc.h>
29#include <asm/page.h>
30
31/*
32 * sys_pipe() is the normal C calling standard for creating
33 * a pipe. It's not the way unix traditionally does this, though.
34 */
35asmlinkage int sys_pipe(unsigned long * fildes)
36{
37 int fd[2];
38 int error;
39
40 error = do_pipe(fd);
41 if (!error) {
42 if (copy_to_user(fildes, fd, 2*sizeof(int)))
43 error = -EFAULT;
44 }
45 return error;
46}
47
48/* common code for old and new mmaps */
49static inline long do_mmap2(
50 unsigned long addr, unsigned long len,
51 unsigned long prot, unsigned long flags,
52 unsigned long fd, unsigned long pgoff)
53{
54 int error = -EBADF;
55 struct file * file = NULL;
56
57 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
58 if (!(flags & MAP_ANONYMOUS)) {
59 file = fget(fd);
60 if (!file)
61 goto out;
62 }
63
64 down_write(&current->mm->mmap_sem);
65 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
66 up_write(&current->mm->mmap_sem);
67
68 if (file)
69 fput(file);
70out:
71 return error;
72}
73
74asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
75 unsigned long prot, unsigned long flags,
76 unsigned long fd, unsigned long pgoff)
77{
78 return do_mmap2(addr, len, prot, flags, fd, pgoff);
79}
80
81/*
82 * Perform the select(nd, in, out, ex, tv) and mmap() system
83 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
84 * handle more than 4 system call parameters, so these system calls
85 * used a memory block for parameter passing..
86 */
87
88struct mmap_arg_struct {
89 unsigned long addr;
90 unsigned long len;
91 unsigned long prot;
92 unsigned long flags;
93 unsigned long fd;
94 unsigned long offset;
95};
96
97asmlinkage int old_mmap(struct mmap_arg_struct *arg)
98{
99 struct mmap_arg_struct a;
100 int error = -EFAULT;
101
102 if (copy_from_user(&a, arg, sizeof(a)))
103 goto out;
104
105 error = -EINVAL;
106 if (a.offset & ~PAGE_MASK)
107 goto out;
108
109 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
110
111 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
112out:
113 return error;
114}
115
116#if 0
117struct mmap_arg_struct64 {
118 __u32 addr;
119 __u32 len;
120 __u32 prot;
121 __u32 flags;
122 __u64 offset; /* 64 bits */
123 __u32 fd;
124};
125
126asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
127{
128 int error = -EFAULT;
129 struct file * file = NULL;
130 struct mmap_arg_struct64 a;
131 unsigned long pgoff;
132
133 if (copy_from_user(&a, arg, sizeof(a)))
134 return -EFAULT;
135
136 if ((long)a.offset & ~PAGE_MASK)
137 return -EINVAL;
138
139 pgoff = a.offset >> PAGE_SHIFT;
140 if ((a.offset >> PAGE_SHIFT) != pgoff)
141 return -EINVAL;
142
143 if (!(a.flags & MAP_ANONYMOUS)) {
144 error = -EBADF;
145 file = fget(a.fd);
146 if (!file)
147 goto out;
148 }
149 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
150
151 down_write(&current->mm->mmap_sem);
152 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
153 up_write(&current->mm->mmap_sem);
154 if (file)
155 fput(file);
156out:
157 return error;
158}
159#endif
160
161struct sel_arg_struct {
162 unsigned long n;
163 fd_set *inp, *outp, *exp;
164 struct timeval *tvp;
165};
166
167asmlinkage int old_select(struct sel_arg_struct *arg)
168{
169 struct sel_arg_struct a;
170
171 if (copy_from_user(&a, arg, sizeof(a)))
172 return -EFAULT;
173 /* sys_select() does the appropriate kernel locking */
174 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
175}
176
177/*
178 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
179 *
180 * This is really horribly ugly.
181 */
182asmlinkage int sys_ipc (uint call, int first, int second,
183 int third, void *ptr, long fifth)
184{
185 int version, ret;
186
187 version = call >> 16; /* hack for backward compatibility */
188 call &= 0xffff;
189
190 if (call <= SEMCTL)
191 switch (call) {
192 case SEMOP:
193 return sys_semop (first, (struct sembuf *)ptr, second);
194 case SEMGET:
195 return sys_semget (first, second, third);
196 case SEMCTL: {
197 union semun fourth;
198 if (!ptr)
199 return -EINVAL;
200 if (get_user(fourth.__pad, (void **) ptr))
201 return -EFAULT;
202 return sys_semctl (first, second, third, fourth);
203 }
204 default:
205 return -ENOSYS;
206 }
207 if (call <= MSGCTL)
208 switch (call) {
209 case MSGSND:
210 return sys_msgsnd (first, (struct msgbuf *) ptr,
211 second, third);
212 case MSGRCV:
213 switch (version) {
214 case 0: {
215 struct ipc_kludge tmp;
216 if (!ptr)
217 return -EINVAL;
218 if (copy_from_user (&tmp,
219 (struct ipc_kludge *)ptr,
220 sizeof (tmp)))
221 return -EFAULT;
222 return sys_msgrcv (first, tmp.msgp, second,
223 tmp.msgtyp, third);
224 }
225 default:
226 return sys_msgrcv (first,
227 (struct msgbuf *) ptr,
228 second, fifth, third);
229 }
230 case MSGGET:
231 return sys_msgget ((key_t) first, second);
232 case MSGCTL:
233 return sys_msgctl (first, second,
234 (struct msqid_ds *) ptr);
235 default:
236 return -ENOSYS;
237 }
238 if (call <= SHMCTL)
239 switch (call) {
240 case SHMAT:
241 switch (version) {
242 default: {
243 ulong raddr;
244 ret = do_shmat (first, (char *) ptr,
245 second, &raddr);
246 if (ret)
247 return ret;
248 return put_user (raddr, (ulong *) third);
249 }
250 }
251 case SHMDT:
252 return sys_shmdt ((char *)ptr);
253 case SHMGET:
254 return sys_shmget (first, second, third);
255 case SHMCTL:
256 return sys_shmctl (first, second,
257 (struct shmid_ds *) ptr);
258 default:
259 return -ENOSYS;
260 }
261
262 return -EINVAL;
263}
264
265/* Convert virtual (user) address VADDR to physical address PADDR */
266#define virt_to_phys_040(vaddr) \
267({ \
268 unsigned long _mmusr, _paddr; \
269 \
270 __asm__ __volatile__ (".chip 68040\n\t" \
271 "ptestr (%1)\n\t" \
272 "movec %%mmusr,%0\n\t" \
273 ".chip 68k" \
274 : "=r" (_mmusr) \
275 : "a" (vaddr)); \
276 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
277 _paddr; \
278})
279
280static inline int
281cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
282{
283 unsigned long paddr, i;
284
285 switch (scope)
286 {
287 case FLUSH_SCOPE_ALL:
288 switch (cache)
289 {
290 case FLUSH_CACHE_DATA:
291 /* This nop is needed for some broken versions of the 68040. */
292 __asm__ __volatile__ ("nop\n\t"
293 ".chip 68040\n\t"
294 "cpusha %dc\n\t"
295 ".chip 68k");
296 break;
297 case FLUSH_CACHE_INSN:
298 __asm__ __volatile__ ("nop\n\t"
299 ".chip 68040\n\t"
300 "cpusha %ic\n\t"
301 ".chip 68k");
302 break;
303 default:
304 case FLUSH_CACHE_BOTH:
305 __asm__ __volatile__ ("nop\n\t"
306 ".chip 68040\n\t"
307 "cpusha %bc\n\t"
308 ".chip 68k");
309 break;
310 }
311 break;
312
313 case FLUSH_SCOPE_LINE:
314 /* Find the physical address of the first mapped page in the
315 address range. */
316 if ((paddr = virt_to_phys_040(addr))) {
317 paddr += addr & ~(PAGE_MASK | 15);
318 len = (len + (addr & 15) + 15) >> 4;
319 } else {
320 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
321
322 if (len <= tmp)
323 return 0;
324 addr += tmp;
325 len -= tmp;
326 tmp = PAGE_SIZE;
327 for (;;)
328 {
329 if ((paddr = virt_to_phys_040(addr)))
330 break;
331 if (len <= tmp)
332 return 0;
333 addr += tmp;
334 len -= tmp;
335 }
336 len = (len + 15) >> 4;
337 }
338 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
339 while (len--)
340 {
341 switch (cache)
342 {
343 case FLUSH_CACHE_DATA:
344 __asm__ __volatile__ ("nop\n\t"
345 ".chip 68040\n\t"
346 "cpushl %%dc,(%0)\n\t"
347 ".chip 68k"
348 : : "a" (paddr));
349 break;
350 case FLUSH_CACHE_INSN:
351 __asm__ __volatile__ ("nop\n\t"
352 ".chip 68040\n\t"
353 "cpushl %%ic,(%0)\n\t"
354 ".chip 68k"
355 : : "a" (paddr));
356 break;
357 default:
358 case FLUSH_CACHE_BOTH:
359 __asm__ __volatile__ ("nop\n\t"
360 ".chip 68040\n\t"
361 "cpushl %%bc,(%0)\n\t"
362 ".chip 68k"
363 : : "a" (paddr));
364 break;
365 }
366 if (!--i && len)
367 {
368 /*
369 * No need to page align here since it is done by
370 * virt_to_phys_040().
371 */
372 addr += PAGE_SIZE;
373 i = PAGE_SIZE / 16;
374 /* Recompute physical address when crossing a page
375 boundary. */
376 for (;;)
377 {
378 if ((paddr = virt_to_phys_040(addr)))
379 break;
380 if (len <= i)
381 return 0;
382 len -= i;
383 addr += PAGE_SIZE;
384 }
385 }
386 else
387 paddr += 16;
388 }
389 break;
390
391 default:
392 case FLUSH_SCOPE_PAGE:
393 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
394 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
395 {
396 if (!(paddr = virt_to_phys_040(addr)))
397 continue;
398 switch (cache)
399 {
400 case FLUSH_CACHE_DATA:
401 __asm__ __volatile__ ("nop\n\t"
402 ".chip 68040\n\t"
403 "cpushp %%dc,(%0)\n\t"
404 ".chip 68k"
405 : : "a" (paddr));
406 break;
407 case FLUSH_CACHE_INSN:
408 __asm__ __volatile__ ("nop\n\t"
409 ".chip 68040\n\t"
410 "cpushp %%ic,(%0)\n\t"
411 ".chip 68k"
412 : : "a" (paddr));
413 break;
414 default:
415 case FLUSH_CACHE_BOTH:
416 __asm__ __volatile__ ("nop\n\t"
417 ".chip 68040\n\t"
418 "cpushp %%bc,(%0)\n\t"
419 ".chip 68k"
420 : : "a" (paddr));
421 break;
422 }
423 }
424 break;
425 }
426 return 0;
427}
428
429#define virt_to_phys_060(vaddr) \
430({ \
431 unsigned long paddr; \
432 __asm__ __volatile__ (".chip 68060\n\t" \
433 "plpar (%0)\n\t" \
434 ".chip 68k" \
435 : "=a" (paddr) \
436 : "0" (vaddr)); \
437 (paddr); /* XXX */ \
438})
439
440static inline int
441cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
442{
443 unsigned long paddr, i;
444
445 /*
446 * 68060 manual says:
447 * cpush %dc : flush DC, remains valid (with our %cacr setup)
448 * cpush %ic : invalidate IC
449 * cpush %bc : flush DC + invalidate IC
450 */
451 switch (scope)
452 {
453 case FLUSH_SCOPE_ALL:
454 switch (cache)
455 {
456 case FLUSH_CACHE_DATA:
457 __asm__ __volatile__ (".chip 68060\n\t"
458 "cpusha %dc\n\t"
459 ".chip 68k");
460 break;
461 case FLUSH_CACHE_INSN:
462 __asm__ __volatile__ (".chip 68060\n\t"
463 "cpusha %ic\n\t"
464 ".chip 68k");
465 break;
466 default:
467 case FLUSH_CACHE_BOTH:
468 __asm__ __volatile__ (".chip 68060\n\t"
469 "cpusha %bc\n\t"
470 ".chip 68k");
471 break;
472 }
473 break;
474
475 case FLUSH_SCOPE_LINE:
476 /* Find the physical address of the first mapped page in the
477 address range. */
478 len += addr & 15;
479 addr &= -16;
480 if (!(paddr = virt_to_phys_060(addr))) {
481 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
482
483 if (len <= tmp)
484 return 0;
485 addr += tmp;
486 len -= tmp;
487 tmp = PAGE_SIZE;
488 for (;;)
489 {
490 if ((paddr = virt_to_phys_060(addr)))
491 break;
492 if (len <= tmp)
493 return 0;
494 addr += tmp;
495 len -= tmp;
496 }
497 }
498 len = (len + 15) >> 4;
499 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
500 while (len--)
501 {
502 switch (cache)
503 {
504 case FLUSH_CACHE_DATA:
505 __asm__ __volatile__ (".chip 68060\n\t"
506 "cpushl %%dc,(%0)\n\t"
507 ".chip 68k"
508 : : "a" (paddr));
509 break;
510 case FLUSH_CACHE_INSN:
511 __asm__ __volatile__ (".chip 68060\n\t"
512 "cpushl %%ic,(%0)\n\t"
513 ".chip 68k"
514 : : "a" (paddr));
515 break;
516 default:
517 case FLUSH_CACHE_BOTH:
518 __asm__ __volatile__ (".chip 68060\n\t"
519 "cpushl %%bc,(%0)\n\t"
520 ".chip 68k"
521 : : "a" (paddr));
522 break;
523 }
524 if (!--i && len)
525 {
526
527 /*
528 * We just want to jump to the first cache line
529 * in the next page.
530 */
531 addr += PAGE_SIZE;
532 addr &= PAGE_MASK;
533
534 i = PAGE_SIZE / 16;
535 /* Recompute physical address when crossing a page
536 boundary. */
537 for (;;)
538 {
539 if ((paddr = virt_to_phys_060(addr)))
540 break;
541 if (len <= i)
542 return 0;
543 len -= i;
544 addr += PAGE_SIZE;
545 }
546 }
547 else
548 paddr += 16;
549 }
550 break;
551
552 default:
553 case FLUSH_SCOPE_PAGE:
554 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
555 addr &= PAGE_MASK; /* Workaround for bug in some
556 revisions of the 68060 */
557 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
558 {
559 if (!(paddr = virt_to_phys_060(addr)))
560 continue;
561 switch (cache)
562 {
563 case FLUSH_CACHE_DATA:
564 __asm__ __volatile__ (".chip 68060\n\t"
565 "cpushp %%dc,(%0)\n\t"
566 ".chip 68k"
567 : : "a" (paddr));
568 break;
569 case FLUSH_CACHE_INSN:
570 __asm__ __volatile__ (".chip 68060\n\t"
571 "cpushp %%ic,(%0)\n\t"
572 ".chip 68k"
573 : : "a" (paddr));
574 break;
575 default:
576 case FLUSH_CACHE_BOTH:
577 __asm__ __volatile__ (".chip 68060\n\t"
578 "cpushp %%bc,(%0)\n\t"
579 ".chip 68k"
580 : : "a" (paddr));
581 break;
582 }
583 }
584 break;
585 }
586 return 0;
587}
588
589/* sys_cacheflush -- flush (part of) the processor cache. */
590asmlinkage int
591sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
592{
593 struct vm_area_struct *vma;
594 int ret = -EINVAL;
595
596 lock_kernel();
597 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
598 cache & ~FLUSH_CACHE_BOTH)
599 goto out;
600
601 if (scope == FLUSH_SCOPE_ALL) {
602 /* Only the superuser may explicitly flush the whole cache. */
603 ret = -EPERM;
604 if (!capable(CAP_SYS_ADMIN))
605 goto out;
606 } else {
607 /*
608 * Verify that the specified address region actually belongs
609 * to this process.
610 */
611 vma = find_vma (current->mm, addr);
612 ret = -EINVAL;
613 /* Check for overflow. */
614 if (addr + len < addr)
615 goto out;
616 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
617 goto out;
618 }
619
620 if (CPU_IS_020_OR_030) {
621 if (scope == FLUSH_SCOPE_LINE && len < 256) {
622 unsigned long cacr;
623 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
624 if (cache & FLUSH_CACHE_INSN)
625 cacr |= 4;
626 if (cache & FLUSH_CACHE_DATA)
627 cacr |= 0x400;
628 len >>= 2;
629 while (len--) {
630 __asm__ __volatile__ ("movec %1, %%caar\n\t"
631 "movec %0, %%cacr"
632 : /* no outputs */
633 : "r" (cacr), "r" (addr));
634 addr += 4;
635 }
636 } else {
637 /* Flush the whole cache, even if page granularity requested. */
638 unsigned long cacr;
639 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
640 if (cache & FLUSH_CACHE_INSN)
641 cacr |= 8;
642 if (cache & FLUSH_CACHE_DATA)
643 cacr |= 0x800;
644 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
645 }
646 ret = 0;
647 goto out;
648 } else {
649 /*
650 * 040 or 060: don't blindly trust 'scope', someone could
651 * try to flush a few megs of memory.
652 */
653
654 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
655 scope=FLUSH_SCOPE_PAGE;
656 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
657 scope=FLUSH_SCOPE_ALL;
658 if (CPU_IS_040) {
659 ret = cache_flush_040 (addr, scope, cache, len);
660 } else if (CPU_IS_060) {
661 ret = cache_flush_060 (addr, scope, cache, len);
662 }
663 }
664out:
665 unlock_kernel();
666 return ret;
667}
668
669asmlinkage int sys_getpagesize(void)
670{
671 return PAGE_SIZE;
672}