| Greg Ungerer | cae2e6c | 2011-04-21 12:48:07 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/arch/m68k/kernel/sys_m68k.c | 
 | 3 |  * | 
 | 4 |  * This file contains various random system calls that | 
 | 5 |  * have a non-standard calling sequence on the Linux/m68k | 
 | 6 |  * platform. | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/capability.h> | 
 | 10 | #include <linux/errno.h> | 
 | 11 | #include <linux/sched.h> | 
 | 12 | #include <linux/mm.h> | 
 | 13 | #include <linux/fs.h> | 
 | 14 | #include <linux/smp.h> | 
 | 15 | #include <linux/sem.h> | 
 | 16 | #include <linux/msg.h> | 
 | 17 | #include <linux/shm.h> | 
 | 18 | #include <linux/stat.h> | 
 | 19 | #include <linux/syscalls.h> | 
 | 20 | #include <linux/mman.h> | 
 | 21 | #include <linux/file.h> | 
 | 22 | #include <linux/ipc.h> | 
 | 23 |  | 
 | 24 | #include <asm/setup.h> | 
 | 25 | #include <asm/uaccess.h> | 
 | 26 | #include <asm/cachectl.h> | 
 | 27 | #include <asm/traps.h> | 
 | 28 | #include <asm/page.h> | 
 | 29 | #include <asm/unistd.h> | 
 | 30 | #include <asm/cacheflush.h> | 
 | 31 |  | 
| Greg Ungerer | 66d857b | 2011-03-22 13:39:27 +1000 | [diff] [blame] | 32 | #ifdef CONFIG_MMU | 
| Greg Ungerer | cae2e6c | 2011-04-21 12:48:07 +1000 | [diff] [blame] | 33 |  | 
 | 34 | #include <asm/tlb.h> | 
 | 35 |  | 
 | 36 | asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, | 
 | 37 | 			     unsigned long error_code); | 
 | 38 |  | 
 | 39 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | 
 | 40 | 	unsigned long prot, unsigned long flags, | 
 | 41 | 	unsigned long fd, unsigned long pgoff) | 
 | 42 | { | 
 | 43 | 	/* | 
 | 44 | 	 * This is wrong for sun3 - there PAGE_SIZE is 8Kb, | 
 | 45 | 	 * so we need to shift the argument down by 1; m68k mmap64(3) | 
 | 46 | 	 * (in libc) expects the last argument of mmap2 in 4Kb units. | 
 | 47 | 	 */ | 
 | 48 | 	return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | 
 | 49 | } | 
 | 50 |  | 
 | 51 | /* Convert virtual (user) address VADDR to physical address PADDR */ | 
 | 52 | #define virt_to_phys_040(vaddr)						\ | 
 | 53 | ({									\ | 
 | 54 |   unsigned long _mmusr, _paddr;						\ | 
 | 55 | 									\ | 
 | 56 |   __asm__ __volatile__ (".chip 68040\n\t"				\ | 
 | 57 | 			"ptestr (%1)\n\t"				\ | 
 | 58 | 			"movec %%mmusr,%0\n\t"				\ | 
 | 59 | 			".chip 68k"					\ | 
 | 60 | 			: "=r" (_mmusr)					\ | 
 | 61 | 			: "a" (vaddr));					\ | 
 | 62 |   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\ | 
 | 63 |   _paddr;								\ | 
 | 64 | }) | 
 | 65 |  | 
 | 66 | static inline int | 
 | 67 | cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) | 
 | 68 | { | 
 | 69 |   unsigned long paddr, i; | 
 | 70 |  | 
 | 71 |   switch (scope) | 
 | 72 |     { | 
 | 73 |     case FLUSH_SCOPE_ALL: | 
 | 74 |       switch (cache) | 
 | 75 | 	{ | 
 | 76 | 	case FLUSH_CACHE_DATA: | 
 | 77 | 	  /* This nop is needed for some broken versions of the 68040.  */ | 
 | 78 | 	  __asm__ __volatile__ ("nop\n\t" | 
 | 79 | 				".chip 68040\n\t" | 
 | 80 | 				"cpusha %dc\n\t" | 
 | 81 | 				".chip 68k"); | 
 | 82 | 	  break; | 
 | 83 | 	case FLUSH_CACHE_INSN: | 
 | 84 | 	  __asm__ __volatile__ ("nop\n\t" | 
 | 85 | 				".chip 68040\n\t" | 
 | 86 | 				"cpusha %ic\n\t" | 
 | 87 | 				".chip 68k"); | 
 | 88 | 	  break; | 
 | 89 | 	default: | 
 | 90 | 	case FLUSH_CACHE_BOTH: | 
 | 91 | 	  __asm__ __volatile__ ("nop\n\t" | 
 | 92 | 				".chip 68040\n\t" | 
 | 93 | 				"cpusha %bc\n\t" | 
 | 94 | 				".chip 68k"); | 
 | 95 | 	  break; | 
 | 96 | 	} | 
 | 97 |       break; | 
 | 98 |  | 
 | 99 |     case FLUSH_SCOPE_LINE: | 
 | 100 |       /* Find the physical address of the first mapped page in the | 
 | 101 | 	 address range.  */ | 
 | 102 |       if ((paddr = virt_to_phys_040(addr))) { | 
 | 103 |         paddr += addr & ~(PAGE_MASK | 15); | 
 | 104 |         len = (len + (addr & 15) + 15) >> 4; | 
 | 105 |       } else { | 
 | 106 | 	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); | 
 | 107 |  | 
 | 108 | 	if (len <= tmp) | 
 | 109 | 	  return 0; | 
 | 110 | 	addr += tmp; | 
 | 111 | 	len -= tmp; | 
 | 112 | 	tmp = PAGE_SIZE; | 
 | 113 | 	for (;;) | 
 | 114 | 	  { | 
 | 115 | 	    if ((paddr = virt_to_phys_040(addr))) | 
 | 116 | 	      break; | 
 | 117 | 	    if (len <= tmp) | 
 | 118 | 	      return 0; | 
 | 119 | 	    addr += tmp; | 
 | 120 | 	    len -= tmp; | 
 | 121 | 	  } | 
 | 122 | 	len = (len + 15) >> 4; | 
 | 123 |       } | 
 | 124 |       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; | 
 | 125 |       while (len--) | 
 | 126 | 	{ | 
 | 127 | 	  switch (cache) | 
 | 128 | 	    { | 
 | 129 | 	    case FLUSH_CACHE_DATA: | 
 | 130 | 	      __asm__ __volatile__ ("nop\n\t" | 
 | 131 | 				    ".chip 68040\n\t" | 
 | 132 | 				    "cpushl %%dc,(%0)\n\t" | 
 | 133 | 				    ".chip 68k" | 
 | 134 | 				    : : "a" (paddr)); | 
 | 135 | 	      break; | 
 | 136 | 	    case FLUSH_CACHE_INSN: | 
 | 137 | 	      __asm__ __volatile__ ("nop\n\t" | 
 | 138 | 				    ".chip 68040\n\t" | 
 | 139 | 				    "cpushl %%ic,(%0)\n\t" | 
 | 140 | 				    ".chip 68k" | 
 | 141 | 				    : : "a" (paddr)); | 
 | 142 | 	      break; | 
 | 143 | 	    default: | 
 | 144 | 	    case FLUSH_CACHE_BOTH: | 
 | 145 | 	      __asm__ __volatile__ ("nop\n\t" | 
 | 146 | 				    ".chip 68040\n\t" | 
 | 147 | 				    "cpushl %%bc,(%0)\n\t" | 
 | 148 | 				    ".chip 68k" | 
 | 149 | 				    : : "a" (paddr)); | 
 | 150 | 	      break; | 
 | 151 | 	    } | 
 | 152 | 	  if (!--i && len) | 
 | 153 | 	    { | 
 | 154 | 	      /* | 
 | 155 | 	       * No need to page align here since it is done by | 
 | 156 | 	       * virt_to_phys_040(). | 
 | 157 | 	       */ | 
 | 158 | 	      addr += PAGE_SIZE; | 
 | 159 | 	      i = PAGE_SIZE / 16; | 
 | 160 | 	      /* Recompute physical address when crossing a page | 
 | 161 | 	         boundary. */ | 
 | 162 | 	      for (;;) | 
 | 163 | 		{ | 
 | 164 | 		  if ((paddr = virt_to_phys_040(addr))) | 
 | 165 | 		    break; | 
 | 166 | 		  if (len <= i) | 
 | 167 | 		    return 0; | 
 | 168 | 		  len -= i; | 
 | 169 | 		  addr += PAGE_SIZE; | 
 | 170 | 		} | 
 | 171 | 	    } | 
 | 172 | 	  else | 
 | 173 | 	    paddr += 16; | 
 | 174 | 	} | 
 | 175 |       break; | 
 | 176 |  | 
 | 177 |     default: | 
 | 178 |     case FLUSH_SCOPE_PAGE: | 
 | 179 |       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); | 
 | 180 |       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) | 
 | 181 | 	{ | 
 | 182 | 	  if (!(paddr = virt_to_phys_040(addr))) | 
 | 183 | 	    continue; | 
 | 184 | 	  switch (cache) | 
 | 185 | 	    { | 
 | 186 | 	    case FLUSH_CACHE_DATA: | 
 | 187 | 	      __asm__ __volatile__ ("nop\n\t" | 
 | 188 | 				    ".chip 68040\n\t" | 
 | 189 | 				    "cpushp %%dc,(%0)\n\t" | 
 | 190 | 				    ".chip 68k" | 
 | 191 | 				    : : "a" (paddr)); | 
 | 192 | 	      break; | 
 | 193 | 	    case FLUSH_CACHE_INSN: | 
 | 194 | 	      __asm__ __volatile__ ("nop\n\t" | 
 | 195 | 				    ".chip 68040\n\t" | 
 | 196 | 				    "cpushp %%ic,(%0)\n\t" | 
 | 197 | 				    ".chip 68k" | 
 | 198 | 				    : : "a" (paddr)); | 
 | 199 | 	      break; | 
 | 200 | 	    default: | 
 | 201 | 	    case FLUSH_CACHE_BOTH: | 
 | 202 | 	      __asm__ __volatile__ ("nop\n\t" | 
 | 203 | 				    ".chip 68040\n\t" | 
 | 204 | 				    "cpushp %%bc,(%0)\n\t" | 
 | 205 | 				    ".chip 68k" | 
 | 206 | 				    : : "a" (paddr)); | 
 | 207 | 	      break; | 
 | 208 | 	    } | 
 | 209 | 	} | 
 | 210 |       break; | 
 | 211 |     } | 
 | 212 |   return 0; | 
 | 213 | } | 
 | 214 |  | 
 | 215 | #define virt_to_phys_060(vaddr)				\ | 
 | 216 | ({							\ | 
 | 217 |   unsigned long paddr;					\ | 
 | 218 |   __asm__ __volatile__ (".chip 68060\n\t"		\ | 
 | 219 | 			"plpar (%0)\n\t"		\ | 
 | 220 | 			".chip 68k"			\ | 
 | 221 | 			: "=a" (paddr)			\ | 
 | 222 | 			: "0" (vaddr));			\ | 
 | 223 |   (paddr); /* XXX */					\ | 
 | 224 | }) | 
 | 225 |  | 
 | 226 | static inline int | 
 | 227 | cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) | 
 | 228 | { | 
 | 229 |   unsigned long paddr, i; | 
 | 230 |  | 
 | 231 |   /* | 
 | 232 |    * 68060 manual says: | 
 | 233 |    *  cpush %dc : flush DC, remains valid (with our %cacr setup) | 
 | 234 |    *  cpush %ic : invalidate IC | 
 | 235 |    *  cpush %bc : flush DC + invalidate IC | 
 | 236 |    */ | 
 | 237 |   switch (scope) | 
 | 238 |     { | 
 | 239 |     case FLUSH_SCOPE_ALL: | 
 | 240 |       switch (cache) | 
 | 241 | 	{ | 
 | 242 | 	case FLUSH_CACHE_DATA: | 
 | 243 | 	  __asm__ __volatile__ (".chip 68060\n\t" | 
 | 244 | 				"cpusha %dc\n\t" | 
 | 245 | 				".chip 68k"); | 
 | 246 | 	  break; | 
 | 247 | 	case FLUSH_CACHE_INSN: | 
 | 248 | 	  __asm__ __volatile__ (".chip 68060\n\t" | 
 | 249 | 				"cpusha %ic\n\t" | 
 | 250 | 				".chip 68k"); | 
 | 251 | 	  break; | 
 | 252 | 	default: | 
 | 253 | 	case FLUSH_CACHE_BOTH: | 
 | 254 | 	  __asm__ __volatile__ (".chip 68060\n\t" | 
 | 255 | 				"cpusha %bc\n\t" | 
 | 256 | 				".chip 68k"); | 
 | 257 | 	  break; | 
 | 258 | 	} | 
 | 259 |       break; | 
 | 260 |  | 
 | 261 |     case FLUSH_SCOPE_LINE: | 
 | 262 |       /* Find the physical address of the first mapped page in the | 
 | 263 | 	 address range.  */ | 
 | 264 |       len += addr & 15; | 
 | 265 |       addr &= -16; | 
 | 266 |       if (!(paddr = virt_to_phys_060(addr))) { | 
 | 267 | 	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK); | 
 | 268 |  | 
 | 269 | 	if (len <= tmp) | 
 | 270 | 	  return 0; | 
 | 271 | 	addr += tmp; | 
 | 272 | 	len -= tmp; | 
 | 273 | 	tmp = PAGE_SIZE; | 
 | 274 | 	for (;;) | 
 | 275 | 	  { | 
 | 276 | 	    if ((paddr = virt_to_phys_060(addr))) | 
 | 277 | 	      break; | 
 | 278 | 	    if (len <= tmp) | 
 | 279 | 	      return 0; | 
 | 280 | 	    addr += tmp; | 
 | 281 | 	    len -= tmp; | 
 | 282 | 	  } | 
 | 283 |       } | 
 | 284 |       len = (len + 15) >> 4; | 
 | 285 |       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4; | 
 | 286 |       while (len--) | 
 | 287 | 	{ | 
 | 288 | 	  switch (cache) | 
 | 289 | 	    { | 
 | 290 | 	    case FLUSH_CACHE_DATA: | 
 | 291 | 	      __asm__ __volatile__ (".chip 68060\n\t" | 
 | 292 | 				    "cpushl %%dc,(%0)\n\t" | 
 | 293 | 				    ".chip 68k" | 
 | 294 | 				    : : "a" (paddr)); | 
 | 295 | 	      break; | 
 | 296 | 	    case FLUSH_CACHE_INSN: | 
 | 297 | 	      __asm__ __volatile__ (".chip 68060\n\t" | 
 | 298 | 				    "cpushl %%ic,(%0)\n\t" | 
 | 299 | 				    ".chip 68k" | 
 | 300 | 				    : : "a" (paddr)); | 
 | 301 | 	      break; | 
 | 302 | 	    default: | 
 | 303 | 	    case FLUSH_CACHE_BOTH: | 
 | 304 | 	      __asm__ __volatile__ (".chip 68060\n\t" | 
 | 305 | 				    "cpushl %%bc,(%0)\n\t" | 
 | 306 | 				    ".chip 68k" | 
 | 307 | 				    : : "a" (paddr)); | 
 | 308 | 	      break; | 
 | 309 | 	    } | 
 | 310 | 	  if (!--i && len) | 
 | 311 | 	    { | 
 | 312 |  | 
 | 313 | 	      /* | 
 | 314 | 	       * We just want to jump to the first cache line | 
 | 315 | 	       * in the next page. | 
 | 316 | 	       */ | 
 | 317 | 	      addr += PAGE_SIZE; | 
 | 318 | 	      addr &= PAGE_MASK; | 
 | 319 |  | 
 | 320 | 	      i = PAGE_SIZE / 16; | 
 | 321 | 	      /* Recompute physical address when crossing a page | 
 | 322 | 	         boundary. */ | 
 | 323 | 	      for (;;) | 
 | 324 | 	        { | 
 | 325 | 	          if ((paddr = virt_to_phys_060(addr))) | 
 | 326 | 	            break; | 
 | 327 | 	          if (len <= i) | 
 | 328 | 	            return 0; | 
 | 329 | 	          len -= i; | 
 | 330 | 	          addr += PAGE_SIZE; | 
 | 331 | 	        } | 
 | 332 | 	    } | 
 | 333 | 	  else | 
 | 334 | 	    paddr += 16; | 
 | 335 | 	} | 
 | 336 |       break; | 
 | 337 |  | 
 | 338 |     default: | 
 | 339 |     case FLUSH_SCOPE_PAGE: | 
 | 340 |       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1); | 
 | 341 |       addr &= PAGE_MASK;	/* Workaround for bug in some | 
 | 342 | 				   revisions of the 68060 */ | 
 | 343 |       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE) | 
 | 344 | 	{ | 
 | 345 | 	  if (!(paddr = virt_to_phys_060(addr))) | 
 | 346 | 	    continue; | 
 | 347 | 	  switch (cache) | 
 | 348 | 	    { | 
 | 349 | 	    case FLUSH_CACHE_DATA: | 
 | 350 | 	      __asm__ __volatile__ (".chip 68060\n\t" | 
 | 351 | 				    "cpushp %%dc,(%0)\n\t" | 
 | 352 | 				    ".chip 68k" | 
 | 353 | 				    : : "a" (paddr)); | 
 | 354 | 	      break; | 
 | 355 | 	    case FLUSH_CACHE_INSN: | 
 | 356 | 	      __asm__ __volatile__ (".chip 68060\n\t" | 
 | 357 | 				    "cpushp %%ic,(%0)\n\t" | 
 | 358 | 				    ".chip 68k" | 
 | 359 | 				    : : "a" (paddr)); | 
 | 360 | 	      break; | 
 | 361 | 	    default: | 
 | 362 | 	    case FLUSH_CACHE_BOTH: | 
 | 363 | 	      __asm__ __volatile__ (".chip 68060\n\t" | 
 | 364 | 				    "cpushp %%bc,(%0)\n\t" | 
 | 365 | 				    ".chip 68k" | 
 | 366 | 				    : : "a" (paddr)); | 
 | 367 | 	      break; | 
 | 368 | 	    } | 
 | 369 | 	} | 
 | 370 |       break; | 
 | 371 |     } | 
 | 372 |   return 0; | 
 | 373 | } | 
 | 374 |  | 
 | 375 | /* sys_cacheflush -- flush (part of) the processor cache.  */ | 
 | 376 | asmlinkage int | 
 | 377 | sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) | 
 | 378 | { | 
 | 379 | 	struct vm_area_struct *vma; | 
 | 380 | 	int ret = -EINVAL; | 
 | 381 |  | 
 | 382 | 	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || | 
 | 383 | 	    cache & ~FLUSH_CACHE_BOTH) | 
 | 384 | 		goto out; | 
 | 385 |  | 
 | 386 | 	if (scope == FLUSH_SCOPE_ALL) { | 
 | 387 | 		/* Only the superuser may explicitly flush the whole cache. */ | 
 | 388 | 		ret = -EPERM; | 
 | 389 | 		if (!capable(CAP_SYS_ADMIN)) | 
 | 390 | 			goto out; | 
 | 391 | 	} else { | 
 | 392 | 		/* | 
 | 393 | 		 * Verify that the specified address region actually belongs | 
 | 394 | 		 * to this process. | 
 | 395 | 		 */ | 
 | 396 | 		vma = find_vma (current->mm, addr); | 
 | 397 | 		ret = -EINVAL; | 
 | 398 | 		/* Check for overflow.  */ | 
 | 399 | 		if (addr + len < addr) | 
 | 400 | 			goto out; | 
 | 401 | 		if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) | 
 | 402 | 			goto out; | 
 | 403 | 	} | 
 | 404 |  | 
 | 405 | 	if (CPU_IS_020_OR_030) { | 
 | 406 | 		if (scope == FLUSH_SCOPE_LINE && len < 256) { | 
 | 407 | 			unsigned long cacr; | 
 | 408 | 			__asm__ ("movec %%cacr, %0" : "=r" (cacr)); | 
 | 409 | 			if (cache & FLUSH_CACHE_INSN) | 
 | 410 | 				cacr |= 4; | 
 | 411 | 			if (cache & FLUSH_CACHE_DATA) | 
 | 412 | 				cacr |= 0x400; | 
 | 413 | 			len >>= 2; | 
 | 414 | 			while (len--) { | 
 | 415 | 				__asm__ __volatile__ ("movec %1, %%caar\n\t" | 
 | 416 | 						      "movec %0, %%cacr" | 
 | 417 | 						      : /* no outputs */ | 
 | 418 | 						      : "r" (cacr), "r" (addr)); | 
 | 419 | 				addr += 4; | 
 | 420 | 			} | 
 | 421 | 		} else { | 
 | 422 | 			/* Flush the whole cache, even if page granularity requested. */ | 
 | 423 | 			unsigned long cacr; | 
 | 424 | 			__asm__ ("movec %%cacr, %0" : "=r" (cacr)); | 
 | 425 | 			if (cache & FLUSH_CACHE_INSN) | 
 | 426 | 				cacr |= 8; | 
 | 427 | 			if (cache & FLUSH_CACHE_DATA) | 
 | 428 | 				cacr |= 0x800; | 
 | 429 | 			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); | 
 | 430 | 		} | 
 | 431 | 		ret = 0; | 
 | 432 | 		goto out; | 
 | 433 | 	} else { | 
 | 434 | 	    /* | 
 | 435 | 	     * 040 or 060: don't blindly trust 'scope', someone could | 
 | 436 | 	     * try to flush a few megs of memory. | 
 | 437 | 	     */ | 
 | 438 |  | 
 | 439 | 	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE) | 
 | 440 | 	        scope=FLUSH_SCOPE_PAGE; | 
 | 441 | 	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL) | 
 | 442 | 	        scope=FLUSH_SCOPE_ALL; | 
 | 443 | 	    if (CPU_IS_040) { | 
 | 444 | 		ret = cache_flush_040 (addr, scope, cache, len); | 
 | 445 | 	    } else if (CPU_IS_060) { | 
 | 446 | 		ret = cache_flush_060 (addr, scope, cache, len); | 
 | 447 | 	    } | 
 | 448 | 	} | 
 | 449 | out: | 
 | 450 | 	return ret; | 
 | 451 | } | 
 | 452 |  | 
 | 453 | /* This syscall gets its arguments in A0 (mem), D2 (oldval) and | 
 | 454 |    D1 (newval).  */ | 
 | 455 | asmlinkage int | 
 | 456 | sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, | 
 | 457 | 		      unsigned long __user * mem) | 
 | 458 | { | 
 | 459 | 	/* This was borrowed from ARM's implementation.  */ | 
 | 460 | 	for (;;) { | 
 | 461 | 		struct mm_struct *mm = current->mm; | 
 | 462 | 		pgd_t *pgd; | 
 | 463 | 		pmd_t *pmd; | 
 | 464 | 		pte_t *pte; | 
 | 465 | 		spinlock_t *ptl; | 
 | 466 | 		unsigned long mem_value; | 
 | 467 |  | 
 | 468 | 		down_read(&mm->mmap_sem); | 
 | 469 | 		pgd = pgd_offset(mm, (unsigned long)mem); | 
 | 470 | 		if (!pgd_present(*pgd)) | 
 | 471 | 			goto bad_access; | 
 | 472 | 		pmd = pmd_offset(pgd, (unsigned long)mem); | 
 | 473 | 		if (!pmd_present(*pmd)) | 
 | 474 | 			goto bad_access; | 
 | 475 | 		pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); | 
 | 476 | 		if (!pte_present(*pte) || !pte_dirty(*pte) | 
 | 477 | 		    || !pte_write(*pte)) { | 
 | 478 | 			pte_unmap_unlock(pte, ptl); | 
 | 479 | 			goto bad_access; | 
 | 480 | 		} | 
 | 481 |  | 
 | 482 | 		mem_value = *mem; | 
 | 483 | 		if (mem_value == oldval) | 
 | 484 | 			*mem = newval; | 
 | 485 |  | 
 | 486 | 		pte_unmap_unlock(pte, ptl); | 
 | 487 | 		up_read(&mm->mmap_sem); | 
 | 488 | 		return mem_value; | 
 | 489 |  | 
 | 490 | 	      bad_access: | 
 | 491 | 		up_read(&mm->mmap_sem); | 
 | 492 | 		/* This is not necessarily a bad access, we can get here if | 
 | 493 | 		   a memory we're trying to write to should be copied-on-write. | 
 | 494 | 		   Make the kernel do the necessary page stuff, then re-iterate. | 
 | 495 | 		   Simulate a write access fault to do that.  */ | 
 | 496 | 		{ | 
 | 497 | 			/* The first argument of the function corresponds to | 
 | 498 | 			   D1, which is the first field of struct pt_regs.  */ | 
 | 499 | 			struct pt_regs *fp = (struct pt_regs *)&newval; | 
 | 500 |  | 
 | 501 | 			/* '3' is an RMW flag.  */ | 
 | 502 | 			if (do_page_fault(fp, (unsigned long)mem, 3)) | 
 | 503 | 				/* If the do_page_fault() failed, we don't | 
 | 504 | 				   have anything meaningful to return. | 
 | 505 | 				   There should be a SIGSEGV pending for | 
 | 506 | 				   the process.  */ | 
 | 507 | 				return 0xdeadbeef; | 
 | 508 | 		} | 
 | 509 | 	} | 
 | 510 | } | 
 | 511 |  | 
| Greg Ungerer | 66d857b | 2011-03-22 13:39:27 +1000 | [diff] [blame] | 512 | #else | 
| Greg Ungerer | cae2e6c | 2011-04-21 12:48:07 +1000 | [diff] [blame] | 513 |  | 
 | 514 | /* sys_cacheflush -- flush (part of) the processor cache.  */ | 
 | 515 | asmlinkage int | 
 | 516 | sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) | 
 | 517 | { | 
 | 518 | 	flush_cache_all(); | 
 | 519 | 	return 0; | 
 | 520 | } | 
 | 521 |  | 
 | 522 | /* This syscall gets its arguments in A0 (mem), D2 (oldval) and | 
 | 523 |    D1 (newval).  */ | 
 | 524 | asmlinkage int | 
 | 525 | sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, | 
 | 526 | 		      unsigned long __user * mem) | 
 | 527 | { | 
 | 528 | 	struct mm_struct *mm = current->mm; | 
 | 529 | 	unsigned long mem_value; | 
 | 530 |  | 
 | 531 | 	down_read(&mm->mmap_sem); | 
 | 532 |  | 
 | 533 | 	mem_value = *mem; | 
 | 534 | 	if (mem_value == oldval) | 
 | 535 | 		*mem = newval; | 
 | 536 |  | 
 | 537 | 	up_read(&mm->mmap_sem); | 
 | 538 | 	return mem_value; | 
 | 539 | } | 
 | 540 |  | 
 | 541 | #endif /* CONFIG_MMU */ | 
 | 542 |  | 
 | 543 | asmlinkage int sys_getpagesize(void) | 
 | 544 | { | 
 | 545 | 	return PAGE_SIZE; | 
 | 546 | } | 
 | 547 |  | 
 | 548 | /* | 
 | 549 |  * Do a system call from kernel instead of calling sys_execve so we | 
 | 550 |  * end up with proper pt_regs. | 
 | 551 |  */ | 
 | 552 | int kernel_execve(const char *filename, | 
 | 553 | 		  const char *const argv[], | 
 | 554 | 		  const char *const envp[]) | 
 | 555 | { | 
 | 556 | 	register long __res asm ("%d0") = __NR_execve; | 
 | 557 | 	register long __a asm ("%d1") = (long)(filename); | 
 | 558 | 	register long __b asm ("%d2") = (long)(argv); | 
 | 559 | 	register long __c asm ("%d3") = (long)(envp); | 
 | 560 | 	asm volatile ("trap  #0" : "+d" (__res) | 
 | 561 | 			: "d" (__a), "d" (__b), "d" (__c)); | 
 | 562 | 	return __res; | 
 | 563 | } | 
 | 564 |  | 
 | 565 | asmlinkage unsigned long sys_get_thread_area(void) | 
 | 566 | { | 
 | 567 | 	return current_thread_info()->tp_value; | 
 | 568 | } | 
 | 569 |  | 
 | 570 | asmlinkage int sys_set_thread_area(unsigned long tp) | 
 | 571 | { | 
 | 572 | 	current_thread_info()->tp_value = tp; | 
 | 573 | 	return 0; | 
 | 574 | } | 
 | 575 |  | 
 | 576 | asmlinkage int sys_atomic_barrier(void) | 
 | 577 | { | 
 | 578 | 	/* no code needed for uniprocs */ | 
 | 579 | 	return 0; | 
 | 580 | } |