| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Quicklist support. | 
|  | 3 | * | 
|  | 4 | * Quicklists are light weight lists of pages that have a defined state | 
|  | 5 | * on alloc and free. Pages must be in the quicklist specific defined state | 
|  | 6 | * (zero by default) when the page is freed. It seems that the initial idea | 
|  | 7 | * for such lists first came from Dave Miller and then various other people | 
|  | 8 | * improved on it. | 
|  | 9 | * | 
|  | 10 | * Copyright (C) 2007 SGI, | 
|  | 11 | * 	Christoph Lameter <clameter@sgi.com> | 
|  | 12 | * 		Generalized, added support for multiple lists and | 
|  | 13 | * 		constructors / destructors. | 
|  | 14 | */ | 
|  | 15 | #include <linux/kernel.h> | 
|  | 16 |  | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/gfp.h> | 
| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 18 | #include <linux/mm.h> | 
|  | 19 | #include <linux/mmzone.h> | 
| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 20 | #include <linux/quicklist.h> | 
|  | 21 |  | 
| Tejun Heo | 204fba4 | 2009-06-24 15:13:45 +0900 | [diff] [blame] | 22 | DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist); | 
| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 23 |  | 
|  | 24 | #define FRACTION_OF_NODE_MEM	16 | 
|  | 25 |  | 
|  | 26 | static unsigned long max_pages(unsigned long min_pages) | 
|  | 27 | { | 
|  | 28 | unsigned long node_free_pages, max; | 
| KOSAKI Motohiro | b954185 | 2008-09-02 14:35:58 -0700 | [diff] [blame] | 29 | int node = numa_node_id(); | 
|  | 30 | struct zone *zones = NODE_DATA(node)->node_zones; | 
|  | 31 | int num_cpus_on_node; | 
| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 32 |  | 
| Christoph Lameter | 96990a4 | 2008-01-14 00:55:14 -0800 | [diff] [blame] | 33 | node_free_pages = | 
|  | 34 | #ifdef CONFIG_ZONE_DMA | 
|  | 35 | zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + | 
|  | 36 | #endif | 
|  | 37 | #ifdef CONFIG_ZONE_DMA32 | 
|  | 38 | zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + | 
|  | 39 | #endif | 
|  | 40 | zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); | 
|  | 41 |  | 
| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 42 | max = node_free_pages / FRACTION_OF_NODE_MEM; | 
| KOSAKI Motohiro | b954185 | 2008-09-02 14:35:58 -0700 | [diff] [blame] | 43 |  | 
| Rusty Russell | db79078 | 2009-09-24 09:34:52 -0600 | [diff] [blame] | 44 | num_cpus_on_node = cpumask_weight(cpumask_of_node(node)); | 
| KOSAKI Motohiro | b954185 | 2008-09-02 14:35:58 -0700 | [diff] [blame] | 45 | max /= num_cpus_on_node; | 
|  | 46 |  | 
| Christoph Lameter | 6225e93 | 2007-05-06 14:49:50 -0700 | [diff] [blame] | 47 | return max(max, min_pages); | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | static long min_pages_to_free(struct quicklist *q, | 
|  | 51 | unsigned long min_pages, long max_free) | 
|  | 52 | { | 
|  | 53 | long pages_to_free; | 
|  | 54 |  | 
|  | 55 | pages_to_free = q->nr_pages - max_pages(min_pages); | 
|  | 56 |  | 
|  | 57 | return min(pages_to_free, max_free); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | /* | 
|  | 61 | * Trim down the number of pages in the quicklist | 
|  | 62 | */ | 
|  | 63 | void quicklist_trim(int nr, void (*dtor)(void *), | 
|  | 64 | unsigned long min_pages, unsigned long max_free) | 
|  | 65 | { | 
|  | 66 | long pages_to_free; | 
|  | 67 | struct quicklist *q; | 
|  | 68 |  | 
|  | 69 | q = &get_cpu_var(quicklist)[nr]; | 
|  | 70 | if (q->nr_pages > min_pages) { | 
|  | 71 | pages_to_free = min_pages_to_free(q, min_pages, max_free); | 
|  | 72 |  | 
|  | 73 | while (pages_to_free > 0) { | 
|  | 74 | /* | 
|  | 75 | * We pass a gfp_t of 0 to quicklist_alloc here | 
|  | 76 | * because we will never call into the page allocator. | 
|  | 77 | */ | 
|  | 78 | void *p = quicklist_alloc(nr, 0, NULL); | 
|  | 79 |  | 
|  | 80 | if (dtor) | 
|  | 81 | dtor(p); | 
|  | 82 | free_page((unsigned long)p); | 
|  | 83 | pages_to_free--; | 
|  | 84 | } | 
|  | 85 | } | 
|  | 86 | put_cpu_var(quicklist); | 
|  | 87 | } | 
|  | 88 |  | 
|  | 89 | unsigned long quicklist_total_size(void) | 
|  | 90 | { | 
|  | 91 | unsigned long count = 0; | 
|  | 92 | int cpu; | 
|  | 93 | struct quicklist *ql, *q; | 
|  | 94 |  | 
|  | 95 | for_each_online_cpu(cpu) { | 
|  | 96 | ql = per_cpu(quicklist, cpu); | 
|  | 97 | for (q = ql; q < ql + CONFIG_NR_QUICK; q++) | 
|  | 98 | count += q->nr_pages; | 
|  | 99 | } | 
|  | 100 | return count; | 
|  | 101 | } | 
|  | 102 |  |