| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 1 | #include <linux/mm.h> | 
|  | 2 | #include <linux/highmem.h> | 
|  | 3 | #include <linux/sched.h> | 
| Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 4 | #include <linux/hugetlb.h> | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 5 |  | 
|  | 6 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 7 | struct mm_walk *walk) | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 8 | { | 
|  | 9 | pte_t *pte; | 
|  | 10 | int err = 0; | 
|  | 11 |  | 
|  | 12 | pte = pte_offset_map(pmd, addr); | 
| Johannes Weiner | 556637c | 2008-04-28 02:11:47 -0700 | [diff] [blame] | 13 | for (;;) { | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 14 | err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 15 | if (err) | 
|  | 16 | break; | 
| Johannes Weiner | 556637c | 2008-04-28 02:11:47 -0700 | [diff] [blame] | 17 | addr += PAGE_SIZE; | 
|  | 18 | if (addr == end) | 
|  | 19 | break; | 
|  | 20 | pte++; | 
|  | 21 | } | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 22 |  | 
|  | 23 | pte_unmap(pte); | 
|  | 24 | return err; | 
|  | 25 | } | 
|  | 26 |  | 
|  | 27 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 28 | struct mm_walk *walk) | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 29 | { | 
|  | 30 | pmd_t *pmd; | 
|  | 31 | unsigned long next; | 
|  | 32 | int err = 0; | 
|  | 33 |  | 
|  | 34 | pmd = pmd_offset(pud, addr); | 
|  | 35 | do { | 
| Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 36 | again: | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 37 | next = pmd_addr_end(addr, end); | 
| Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 38 | if (pmd_none(*pmd)) { | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 39 | if (walk->pte_hole) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 40 | err = walk->pte_hole(addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 41 | if (err) | 
|  | 42 | break; | 
|  | 43 | continue; | 
|  | 44 | } | 
| Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 45 | /* | 
|  | 46 | * This implies that each ->pmd_entry() handler | 
|  | 47 | * needs to know about pmd_trans_huge() pmds | 
|  | 48 | */ | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 49 | if (walk->pmd_entry) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 50 | err = walk->pmd_entry(pmd, addr, next, walk); | 
| Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 51 | if (err) | 
|  | 52 | break; | 
|  | 53 |  | 
|  | 54 | /* | 
|  | 55 | * Check this here so we only break down trans_huge | 
|  | 56 | * pages when we _need_ to | 
|  | 57 | */ | 
|  | 58 | if (!walk->pte_entry) | 
|  | 59 | continue; | 
|  | 60 |  | 
|  | 61 | split_huge_page_pmd(walk->mm, pmd); | 
| Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 
| Dave Hansen | 0331932 | 2011-03-22 16:32:56 -0700 | [diff] [blame] | 63 | goto again; | 
|  | 64 | err = walk_pte_range(pmd, addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 65 | if (err) | 
|  | 66 | break; | 
|  | 67 | } while (pmd++, addr = next, addr != end); | 
|  | 68 |  | 
|  | 69 | return err; | 
|  | 70 | } | 
|  | 71 |  | 
|  | 72 | static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 73 | struct mm_walk *walk) | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 74 | { | 
|  | 75 | pud_t *pud; | 
|  | 76 | unsigned long next; | 
|  | 77 | int err = 0; | 
|  | 78 |  | 
|  | 79 | pud = pud_offset(pgd, addr); | 
|  | 80 | do { | 
|  | 81 | next = pud_addr_end(addr, end); | 
|  | 82 | if (pud_none_or_clear_bad(pud)) { | 
|  | 83 | if (walk->pte_hole) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 84 | err = walk->pte_hole(addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 85 | if (err) | 
|  | 86 | break; | 
|  | 87 | continue; | 
|  | 88 | } | 
|  | 89 | if (walk->pud_entry) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 90 | err = walk->pud_entry(pud, addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 91 | if (!err && (walk->pmd_entry || walk->pte_entry)) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 92 | err = walk_pmd_range(pud, addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 93 | if (err) | 
|  | 94 | break; | 
|  | 95 | } while (pud++, addr = next, addr != end); | 
|  | 96 |  | 
|  | 97 | return err; | 
|  | 98 | } | 
|  | 99 |  | 
| Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 100 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 101 | static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, | 
|  | 102 | unsigned long end) | 
|  | 103 | { | 
|  | 104 | unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); | 
|  | 105 | return boundary < end ? boundary : end; | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | static int walk_hugetlb_range(struct vm_area_struct *vma, | 
|  | 109 | unsigned long addr, unsigned long end, | 
|  | 110 | struct mm_walk *walk) | 
|  | 111 | { | 
|  | 112 | struct hstate *h = hstate_vma(vma); | 
|  | 113 | unsigned long next; | 
|  | 114 | unsigned long hmask = huge_page_mask(h); | 
|  | 115 | pte_t *pte; | 
|  | 116 | int err = 0; | 
|  | 117 |  | 
|  | 118 | do { | 
|  | 119 | next = hugetlb_entry_end(h, addr, end); | 
|  | 120 | pte = huge_pte_offset(walk->mm, addr & hmask); | 
|  | 121 | if (pte && walk->hugetlb_entry) | 
|  | 122 | err = walk->hugetlb_entry(pte, hmask, addr, next, walk); | 
|  | 123 | if (err) | 
|  | 124 | return err; | 
|  | 125 | } while (addr = next, addr != end); | 
|  | 126 |  | 
|  | 127 | return 0; | 
|  | 128 | } | 
| KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 129 |  | 
|  | 130 | static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) | 
|  | 131 | { | 
|  | 132 | struct vm_area_struct *vma; | 
|  | 133 |  | 
|  | 134 | /* We don't need vma lookup at all. */ | 
|  | 135 | if (!walk->hugetlb_entry) | 
|  | 136 | return NULL; | 
|  | 137 |  | 
|  | 138 | VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); | 
|  | 139 | vma = find_vma(walk->mm, addr); | 
|  | 140 | if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) | 
|  | 141 | return vma; | 
|  | 142 |  | 
|  | 143 | return NULL; | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 | #else /* CONFIG_HUGETLB_PAGE */ | 
|  | 147 | static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk) | 
|  | 148 | { | 
|  | 149 | return NULL; | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static int walk_hugetlb_range(struct vm_area_struct *vma, | 
|  | 153 | unsigned long addr, unsigned long end, | 
|  | 154 | struct mm_walk *walk) | 
|  | 155 | { | 
|  | 156 | return 0; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | #endif /* CONFIG_HUGETLB_PAGE */ | 
|  | 160 |  | 
|  | 161 |  | 
| Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 162 |  | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 163 | /** | 
|  | 164 | * walk_page_range - walk a memory map's page tables with a callback | 
| Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 165 | * @mm: memory map to walk | 
|  | 166 | * @addr: starting address | 
|  | 167 | * @end: ending address | 
|  | 168 | * @walk: set of callbacks to invoke for each level of the tree | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 169 | * | 
|  | 170 | * Recursively walk the page table for the memory area in a VMA, | 
|  | 171 | * calling supplied callbacks. Callbacks are called in-order (first | 
|  | 172 | * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, | 
|  | 173 | * etc.). If lower-level callbacks are omitted, walking depth is reduced. | 
|  | 174 | * | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 175 | * Each callback receives an entry pointer and the start and end of the | 
|  | 176 | * associated range, and a copy of the original mm_walk for access to | 
|  | 177 | * the ->private or ->mm fields. | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 178 | * | 
| KOSAKI Motohiro | dd78553 | 2011-07-25 17:12:11 -0700 | [diff] [blame] | 179 | * Usually no locks are taken, but splitting transparent huge page may | 
|  | 180 | * take page table lock. And the bottom level iterator will map PTE | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 181 | * directories from highmem if necessary. | 
|  | 182 | * | 
|  | 183 | * If any callback returns a non-zero value, the walk is aborted and | 
|  | 184 | * the return value is propagated back to the caller. Otherwise 0 is returned. | 
| KOSAKI Motohiro | c27fe4c8 | 2011-07-25 17:12:10 -0700 | [diff] [blame] | 185 | * | 
|  | 186 | * walk->mm->mmap_sem must be held for at least read if walk->hugetlb_entry | 
|  | 187 | * is !NULL. | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 188 | */ | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 189 | int walk_page_range(unsigned long addr, unsigned long end, | 
|  | 190 | struct mm_walk *walk) | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 191 | { | 
|  | 192 | pgd_t *pgd; | 
|  | 193 | unsigned long next; | 
|  | 194 | int err = 0; | 
|  | 195 |  | 
|  | 196 | if (addr >= end) | 
|  | 197 | return err; | 
|  | 198 |  | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 199 | if (!walk->mm) | 
|  | 200 | return -EINVAL; | 
|  | 201 |  | 
|  | 202 | pgd = pgd_offset(walk->mm, addr); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 203 | do { | 
| KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 204 | struct vm_area_struct *vma; | 
| David Sterba | 5f0af70 | 2010-11-24 12:57:10 -0800 | [diff] [blame] | 205 |  | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 206 | next = pgd_addr_end(addr, end); | 
| Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 207 |  | 
| Naoya Horiguchi | 5dc3764 | 2009-12-14 18:00:01 -0800 | [diff] [blame] | 208 | /* | 
|  | 209 | * handle hugetlb vma individually because pagetable walk for | 
|  | 210 | * the hugetlb page is dependent on the architecture and | 
|  | 211 | * we can't handled it in the same manner as non-huge pages. | 
|  | 212 | */ | 
| KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 213 | vma = hugetlb_vma(addr, walk); | 
|  | 214 | if (vma) { | 
| Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 215 | if (vma->vm_end < next) | 
|  | 216 | next = vma->vm_end; | 
| Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 217 | /* | 
|  | 218 | * Hugepage is very tightly coupled with vma, so | 
|  | 219 | * walk through hugetlb entries within a given vma. | 
|  | 220 | */ | 
|  | 221 | err = walk_hugetlb_range(vma, addr, next, walk); | 
| Naoya Horiguchi | 5dc3764 | 2009-12-14 18:00:01 -0800 | [diff] [blame] | 222 | if (err) | 
|  | 223 | break; | 
| Naoya Horiguchi | 116354d | 2010-04-06 14:35:04 -0700 | [diff] [blame] | 224 | pgd = pgd_offset(walk->mm, next); | 
| Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 225 | continue; | 
|  | 226 | } | 
| KOSAKI Motohiro | 6c6d528 | 2011-07-25 17:12:09 -0700 | [diff] [blame] | 227 |  | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 228 | if (pgd_none_or_clear_bad(pgd)) { | 
|  | 229 | if (walk->pte_hole) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 230 | err = walk->pte_hole(addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 231 | if (err) | 
|  | 232 | break; | 
| Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 233 | pgd++; | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 234 | continue; | 
|  | 235 | } | 
|  | 236 | if (walk->pgd_entry) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 237 | err = walk->pgd_entry(pgd, addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 238 | if (!err && | 
|  | 239 | (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) | 
| Dave Hansen | 2165009 | 2008-06-12 15:21:47 -0700 | [diff] [blame] | 240 | err = walk_pud_range(pgd, addr, next, walk); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 241 | if (err) | 
|  | 242 | break; | 
| Naoya Horiguchi | d33b9f4 | 2009-12-14 17:59:59 -0800 | [diff] [blame] | 243 | pgd++; | 
|  | 244 | } while (addr = next, addr != end); | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 245 |  | 
|  | 246 | return err; | 
|  | 247 | } |