| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 1 | #include <linux/mm.h> | 
 | 2 | #include <linux/highmem.h> | 
 | 3 | #include <linux/sched.h> | 
 | 4 |  | 
 | 5 | static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | 
 | 6 | 			  const struct mm_walk *walk, void *private) | 
 | 7 | { | 
 | 8 | 	pte_t *pte; | 
 | 9 | 	int err = 0; | 
 | 10 |  | 
 | 11 | 	pte = pte_offset_map(pmd, addr); | 
| Johannes Weiner | 556637c | 2008-04-28 02:11:47 -0700 | [diff] [blame] | 12 | 	for (;;) { | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 13 | 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private); | 
 | 14 | 		if (err) | 
 | 15 | 		       break; | 
| Johannes Weiner | 556637c | 2008-04-28 02:11:47 -0700 | [diff] [blame] | 16 | 		addr += PAGE_SIZE; | 
 | 17 | 		if (addr == end) | 
 | 18 | 			break; | 
 | 19 | 		pte++; | 
 | 20 | 	} | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 21 |  | 
 | 22 | 	pte_unmap(pte); | 
 | 23 | 	return err; | 
 | 24 | } | 
 | 25 |  | 
 | 26 | static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, | 
 | 27 | 			  const struct mm_walk *walk, void *private) | 
 | 28 | { | 
 | 29 | 	pmd_t *pmd; | 
 | 30 | 	unsigned long next; | 
 | 31 | 	int err = 0; | 
 | 32 |  | 
 | 33 | 	pmd = pmd_offset(pud, addr); | 
 | 34 | 	do { | 
 | 35 | 		next = pmd_addr_end(addr, end); | 
 | 36 | 		if (pmd_none_or_clear_bad(pmd)) { | 
 | 37 | 			if (walk->pte_hole) | 
 | 38 | 				err = walk->pte_hole(addr, next, private); | 
 | 39 | 			if (err) | 
 | 40 | 				break; | 
 | 41 | 			continue; | 
 | 42 | 		} | 
 | 43 | 		if (walk->pmd_entry) | 
 | 44 | 			err = walk->pmd_entry(pmd, addr, next, private); | 
 | 45 | 		if (!err && walk->pte_entry) | 
 | 46 | 			err = walk_pte_range(pmd, addr, next, walk, private); | 
 | 47 | 		if (err) | 
 | 48 | 			break; | 
 | 49 | 	} while (pmd++, addr = next, addr != end); | 
 | 50 |  | 
 | 51 | 	return err; | 
 | 52 | } | 
 | 53 |  | 
 | 54 | static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, | 
 | 55 | 			  const struct mm_walk *walk, void *private) | 
 | 56 | { | 
 | 57 | 	pud_t *pud; | 
 | 58 | 	unsigned long next; | 
 | 59 | 	int err = 0; | 
 | 60 |  | 
 | 61 | 	pud = pud_offset(pgd, addr); | 
 | 62 | 	do { | 
 | 63 | 		next = pud_addr_end(addr, end); | 
 | 64 | 		if (pud_none_or_clear_bad(pud)) { | 
 | 65 | 			if (walk->pte_hole) | 
 | 66 | 				err = walk->pte_hole(addr, next, private); | 
 | 67 | 			if (err) | 
 | 68 | 				break; | 
 | 69 | 			continue; | 
 | 70 | 		} | 
 | 71 | 		if (walk->pud_entry) | 
 | 72 | 			err = walk->pud_entry(pud, addr, next, private); | 
 | 73 | 		if (!err && (walk->pmd_entry || walk->pte_entry)) | 
 | 74 | 			err = walk_pmd_range(pud, addr, next, walk, private); | 
 | 75 | 		if (err) | 
 | 76 | 			break; | 
 | 77 | 	} while (pud++, addr = next, addr != end); | 
 | 78 |  | 
 | 79 | 	return err; | 
 | 80 | } | 
 | 81 |  | 
 | 82 | /** | 
 | 83 |  * walk_page_range - walk a memory map's page tables with a callback | 
| Randy Dunlap | 7682486 | 2008-03-19 17:00:40 -0700 | [diff] [blame] | 84 |  * @mm: memory map to walk | 
 | 85 |  * @addr: starting address | 
 | 86 |  * @end: ending address | 
 | 87 |  * @walk: set of callbacks to invoke for each level of the tree | 
 | 88 |  * @private: private data passed to the callback function | 
| Matt Mackall | e647309 | 2008-02-04 22:29:01 -0800 | [diff] [blame] | 89 |  * | 
 | 90 |  * Recursively walk the page table for the memory area in a VMA, | 
 | 91 |  * calling supplied callbacks. Callbacks are called in-order (first | 
 | 92 |  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, | 
 | 93 |  * etc.). If lower-level callbacks are omitted, walking depth is reduced. | 
 | 94 |  * | 
 | 95 |  * Each callback receives an entry pointer, the start and end of the | 
 | 96 |  * associated range, and a caller-supplied private data pointer. | 
 | 97 |  * | 
 | 98 |  * No locks are taken, but the bottom level iterator will map PTE | 
 | 99 |  * directories from highmem if necessary. | 
 | 100 |  * | 
 | 101 |  * If any callback returns a non-zero value, the walk is aborted and | 
 | 102 |  * the return value is propagated back to the caller. Otherwise 0 is returned. | 
 | 103 |  */ | 
 | 104 | int walk_page_range(const struct mm_struct *mm, | 
 | 105 | 		    unsigned long addr, unsigned long end, | 
 | 106 | 		    const struct mm_walk *walk, void *private) | 
 | 107 | { | 
 | 108 | 	pgd_t *pgd; | 
 | 109 | 	unsigned long next; | 
 | 110 | 	int err = 0; | 
 | 111 |  | 
 | 112 | 	if (addr >= end) | 
 | 113 | 		return err; | 
 | 114 |  | 
 | 115 | 	pgd = pgd_offset(mm, addr); | 
 | 116 | 	do { | 
 | 117 | 		next = pgd_addr_end(addr, end); | 
 | 118 | 		if (pgd_none_or_clear_bad(pgd)) { | 
 | 119 | 			if (walk->pte_hole) | 
 | 120 | 				err = walk->pte_hole(addr, next, private); | 
 | 121 | 			if (err) | 
 | 122 | 				break; | 
 | 123 | 			continue; | 
 | 124 | 		} | 
 | 125 | 		if (walk->pgd_entry) | 
 | 126 | 			err = walk->pgd_entry(pgd, addr, next, private); | 
 | 127 | 		if (!err && | 
 | 128 | 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) | 
 | 129 | 			err = walk_pud_range(pgd, addr, next, walk, private); | 
 | 130 | 		if (err) | 
 | 131 | 			break; | 
 | 132 | 	} while (pgd++, addr = next, addr != end); | 
 | 133 |  | 
 | 134 | 	return err; | 
 | 135 | } |