| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  mm/mprotect.c | 
|  | 3 | * | 
|  | 4 | *  (C) Copyright 1994 Linus Torvalds | 
|  | 5 | *  (C) Copyright 2002 Christoph Hellwig | 
|  | 6 | * | 
| Alan Cox | 046c688 | 2009-01-05 14:06:29 +0000 | [diff] [blame] | 7 | *  Address space accounting code	<alan@lxorguk.ukuu.org.uk> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/mm.h> | 
|  | 12 | #include <linux/hugetlb.h> | 
|  | 13 | #include <linux/slab.h> | 
|  | 14 | #include <linux/shm.h> | 
|  | 15 | #include <linux/mman.h> | 
|  | 16 | #include <linux/fs.h> | 
|  | 17 | #include <linux/highmem.h> | 
|  | 18 | #include <linux/security.h> | 
|  | 19 | #include <linux/mempolicy.h> | 
|  | 20 | #include <linux/personality.h> | 
|  | 21 | #include <linux/syscalls.h> | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 22 | #include <linux/swap.h> | 
|  | 23 | #include <linux/swapops.h> | 
| Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 24 | #include <linux/mmu_notifier.h> | 
| KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 25 | #include <linux/migrate.h> | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 26 | #include <linux/perf_event.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <asm/uaccess.h> | 
|  | 28 | #include <asm/pgtable.h> | 
|  | 29 | #include <asm/cacheflush.h> | 
|  | 30 | #include <asm/tlbflush.h> | 
|  | 31 |  | 
| Venki Pallipadi | 1c12c4c | 2008-05-14 16:05:51 -0700 | [diff] [blame] | 32 | #ifndef pgprot_modify | 
|  | 33 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | 
|  | 34 | { | 
|  | 35 | return newprot; | 
|  | 36 | } | 
|  | 37 | #endif | 
|  | 38 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 40 | unsigned long addr, unsigned long end, pgprot_t newprot, | 
|  | 41 | int dirty_accountable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | { | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 43 | pte_t *pte, oldpte; | 
| Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 44 | spinlock_t *ptl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 |  | 
| Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 46 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | 
| Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 47 | arch_enter_lazy_mmu_mode(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | do { | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 49 | oldpte = *pte; | 
|  | 50 | if (pte_present(oldpte)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | pte_t ptent; | 
|  | 52 |  | 
| Jeremy Fitzhardinge | 1ea0704 | 2008-06-16 04:30:00 -0700 | [diff] [blame] | 53 | ptent = ptep_modify_prot_start(mm, addr, pte); | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 54 | ptent = pte_modify(ptent, newprot); | 
| Jeremy Fitzhardinge | 1ea0704 | 2008-06-16 04:30:00 -0700 | [diff] [blame] | 55 |  | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 56 | /* | 
|  | 57 | * Avoid taking write faults for pages we know to be | 
|  | 58 | * dirty. | 
|  | 59 | */ | 
|  | 60 | if (dirty_accountable && pte_dirty(ptent)) | 
|  | 61 | ptent = pte_mkwrite(ptent); | 
| Jeremy Fitzhardinge | 1ea0704 | 2008-06-16 04:30:00 -0700 | [diff] [blame] | 62 |  | 
|  | 63 | ptep_modify_prot_commit(mm, addr, pte, ptent); | 
| KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 64 | } else if (PAGE_MIGRATION && !pte_file(oldpte)) { | 
| Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 65 | swp_entry_t entry = pte_to_swp_entry(oldpte); | 
|  | 66 |  | 
|  | 67 | if (is_write_migration_entry(entry)) { | 
|  | 68 | /* | 
|  | 69 | * A protection check is difficult so | 
|  | 70 | * just be safe and disable write | 
|  | 71 | */ | 
|  | 72 | make_migration_entry_read(&entry); | 
|  | 73 | set_pte_at(mm, addr, pte, | 
|  | 74 | swp_entry_to_pte(entry)); | 
|  | 75 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } | 
|  | 77 | } while (pte++, addr += PAGE_SIZE, addr != end); | 
| Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 78 | arch_leave_lazy_mmu_mode(); | 
| Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 79 | pte_unmap_unlock(pte - 1, ptl); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | } | 
|  | 81 |  | 
|  | 82 | static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud, | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 83 | unsigned long addr, unsigned long end, pgprot_t newprot, | 
|  | 84 | int dirty_accountable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | { | 
|  | 86 | pmd_t *pmd; | 
|  | 87 | unsigned long next; | 
|  | 88 |  | 
|  | 89 | pmd = pmd_offset(pud, addr); | 
|  | 90 | do { | 
|  | 91 | next = pmd_addr_end(addr, end); | 
|  | 92 | if (pmd_none_or_clear_bad(pmd)) | 
|  | 93 | continue; | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 94 | change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } while (pmd++, addr = next, addr != end); | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd, | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 99 | unsigned long addr, unsigned long end, pgprot_t newprot, | 
|  | 100 | int dirty_accountable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | { | 
|  | 102 | pud_t *pud; | 
|  | 103 | unsigned long next; | 
|  | 104 |  | 
|  | 105 | pud = pud_offset(pgd, addr); | 
|  | 106 | do { | 
|  | 107 | next = pud_addr_end(addr, end); | 
|  | 108 | if (pud_none_or_clear_bad(pud)) | 
|  | 109 | continue; | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 110 | change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | } while (pud++, addr = next, addr != end); | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | static void change_protection(struct vm_area_struct *vma, | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 115 | unsigned long addr, unsigned long end, pgprot_t newprot, | 
|  | 116 | int dirty_accountable) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { | 
|  | 118 | struct mm_struct *mm = vma->vm_mm; | 
|  | 119 | pgd_t *pgd; | 
|  | 120 | unsigned long next; | 
|  | 121 | unsigned long start = addr; | 
|  | 122 |  | 
|  | 123 | BUG_ON(addr >= end); | 
|  | 124 | pgd = pgd_offset(mm, addr); | 
|  | 125 | flush_cache_range(vma, addr, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | do { | 
|  | 127 | next = pgd_addr_end(addr, end); | 
|  | 128 | if (pgd_none_or_clear_bad(pgd)) | 
|  | 129 | continue; | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 130 | change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } while (pgd++, addr = next, addr != end); | 
|  | 132 | flush_tlb_range(vma, start, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } | 
|  | 134 |  | 
| Ollie Wild | b6a2fea | 2007-07-19 01:48:16 -0700 | [diff] [blame] | 135 | int | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, | 
|  | 137 | unsigned long start, unsigned long end, unsigned long newflags) | 
|  | 138 | { | 
|  | 139 | struct mm_struct *mm = vma->vm_mm; | 
|  | 140 | unsigned long oldflags = vma->vm_flags; | 
|  | 141 | long nrpages = (end - start) >> PAGE_SHIFT; | 
|  | 142 | unsigned long charged = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | pgoff_t pgoff; | 
|  | 144 | int error; | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 145 | int dirty_accountable = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 |  | 
|  | 147 | if (newflags == oldflags) { | 
|  | 148 | *pprev = vma; | 
|  | 149 | return 0; | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | /* | 
|  | 153 | * If we make a private mapping writable we increase our commit; | 
|  | 154 | * but (without finer accounting) cannot reduce our commit if we | 
| Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 155 | * make it unwritable again. hugetlb mapping were accounted for | 
|  | 156 | * even if read-only so there is no need to account for them here | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | */ | 
|  | 158 | if (newflags & VM_WRITE) { | 
| Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 159 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| | 
| Andy Whitcroft | cdfd432 | 2008-07-23 21:27:28 -0700 | [diff] [blame] | 160 | VM_SHARED|VM_NORESERVE))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | charged = nrpages; | 
|  | 162 | if (security_vm_enough_memory(charged)) | 
|  | 163 | return -ENOMEM; | 
|  | 164 | newflags |= VM_ACCOUNT; | 
|  | 165 | } | 
|  | 166 | } | 
|  | 167 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* | 
|  | 169 | * First try to merge with previous and/or next vma. | 
|  | 170 | */ | 
|  | 171 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | 
|  | 172 | *pprev = vma_merge(mm, *pprev, start, end, newflags, | 
|  | 173 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); | 
|  | 174 | if (*pprev) { | 
|  | 175 | vma = *pprev; | 
|  | 176 | goto success; | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | *pprev = vma; | 
|  | 180 |  | 
|  | 181 | if (start != vma->vm_start) { | 
|  | 182 | error = split_vma(mm, vma, start, 1); | 
|  | 183 | if (error) | 
|  | 184 | goto fail; | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | if (end != vma->vm_end) { | 
|  | 188 | error = split_vma(mm, vma, end, 0); | 
|  | 189 | if (error) | 
|  | 190 | goto fail; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | success: | 
|  | 194 | /* | 
|  | 195 | * vm_flags and vm_page_prot are protected by the mmap_sem | 
|  | 196 | * held in write mode. | 
|  | 197 | */ | 
|  | 198 | vma->vm_flags = newflags; | 
| Venki Pallipadi | 1c12c4c | 2008-05-14 16:05:51 -0700 | [diff] [blame] | 199 | vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, | 
|  | 200 | vm_get_page_prot(newflags)); | 
|  | 201 |  | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 202 | if (vma_wants_writenotify(vma)) { | 
| Hugh Dickins | 1ddd439 | 2007-10-22 20:45:12 -0700 | [diff] [blame] | 203 | vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 204 | dirty_accountable = 1; | 
|  | 205 | } | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 206 |  | 
| Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 207 | mmu_notifier_invalidate_range_start(mm, start, end); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 208 | if (is_vm_hugetlb_page(vma)) | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 209 | hugetlb_change_protection(vma, start, end, vma->vm_page_prot); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 210 | else | 
| Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 211 | change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable); | 
| Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 212 | mmu_notifier_invalidate_range_end(mm, start, end); | 
| Hugh Dickins | ab50b8e | 2005-10-29 18:15:56 -0700 | [diff] [blame] | 213 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); | 
|  | 214 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | return 0; | 
|  | 216 |  | 
|  | 217 | fail: | 
|  | 218 | vm_unacct_memory(charged); | 
|  | 219 | return error; | 
|  | 220 | } | 
|  | 221 |  | 
| Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 222 | SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, | 
|  | 223 | unsigned long, prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | { | 
|  | 225 | unsigned long vm_flags, nstart, end, tmp, reqprot; | 
|  | 226 | struct vm_area_struct *vma, *prev; | 
|  | 227 | int error = -EINVAL; | 
|  | 228 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); | 
|  | 229 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); | 
|  | 230 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ | 
|  | 231 | return -EINVAL; | 
|  | 232 |  | 
|  | 233 | if (start & ~PAGE_MASK) | 
|  | 234 | return -EINVAL; | 
|  | 235 | if (!len) | 
|  | 236 | return 0; | 
|  | 237 | len = PAGE_ALIGN(len); | 
|  | 238 | end = start + len; | 
|  | 239 | if (end <= start) | 
|  | 240 | return -ENOMEM; | 
| Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 241 | if (!arch_validate_prot(prot)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | return -EINVAL; | 
|  | 243 |  | 
|  | 244 | reqprot = prot; | 
|  | 245 | /* | 
|  | 246 | * Does the application expect PROT_READ to imply PROT_EXEC: | 
|  | 247 | */ | 
| Hua Zhong | b344e05 | 2006-06-23 02:03:23 -0700 | [diff] [blame] | 248 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | prot |= PROT_EXEC; | 
|  | 250 |  | 
|  | 251 | vm_flags = calc_vm_prot_bits(prot); | 
|  | 252 |  | 
|  | 253 | down_write(¤t->mm->mmap_sem); | 
|  | 254 |  | 
|  | 255 | vma = find_vma_prev(current->mm, start, &prev); | 
|  | 256 | error = -ENOMEM; | 
|  | 257 | if (!vma) | 
|  | 258 | goto out; | 
|  | 259 | if (unlikely(grows & PROT_GROWSDOWN)) { | 
|  | 260 | if (vma->vm_start >= end) | 
|  | 261 | goto out; | 
|  | 262 | start = vma->vm_start; | 
|  | 263 | error = -EINVAL; | 
|  | 264 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 
|  | 265 | goto out; | 
|  | 266 | } | 
|  | 267 | else { | 
|  | 268 | if (vma->vm_start > start) | 
|  | 269 | goto out; | 
|  | 270 | if (unlikely(grows & PROT_GROWSUP)) { | 
|  | 271 | end = vma->vm_end; | 
|  | 272 | error = -EINVAL; | 
|  | 273 | if (!(vma->vm_flags & VM_GROWSUP)) | 
|  | 274 | goto out; | 
|  | 275 | } | 
|  | 276 | } | 
|  | 277 | if (start > vma->vm_start) | 
|  | 278 | prev = vma; | 
|  | 279 |  | 
|  | 280 | for (nstart = start ; ; ) { | 
|  | 281 | unsigned long newflags; | 
|  | 282 |  | 
|  | 283 | /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */ | 
|  | 284 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); | 
|  | 286 |  | 
| Paolo 'Blaisorblade' Giarrusso | 7e2cff4 | 2005-09-21 09:55:39 -0700 | [diff] [blame] | 287 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ | 
|  | 288 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | error = -EACCES; | 
|  | 290 | goto out; | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | error = security_file_mprotect(vma, reqprot, prot); | 
|  | 294 | if (error) | 
|  | 295 | goto out; | 
|  | 296 |  | 
|  | 297 | tmp = vma->vm_end; | 
|  | 298 | if (tmp > end) | 
|  | 299 | tmp = end; | 
|  | 300 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | 
|  | 301 | if (error) | 
|  | 302 | goto out; | 
| Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 303 | perf_event_mmap(vma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | nstart = tmp; | 
|  | 305 |  | 
|  | 306 | if (nstart < prev->vm_end) | 
|  | 307 | nstart = prev->vm_end; | 
|  | 308 | if (nstart >= end) | 
|  | 309 | goto out; | 
|  | 310 |  | 
|  | 311 | vma = prev->vm_next; | 
|  | 312 | if (!vma || vma->vm_start != nstart) { | 
|  | 313 | error = -ENOMEM; | 
|  | 314 | goto out; | 
|  | 315 | } | 
|  | 316 | } | 
|  | 317 | out: | 
|  | 318 | up_write(¤t->mm->mmap_sem); | 
|  | 319 | return error; | 
|  | 320 | } |