| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/mm/allocpercpu.c | 
 | 3 |  * | 
| Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 4 |  * Separated from slab.c August 11, 2006 Christoph Lameter | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 5 |  */ | 
 | 6 | #include <linux/mm.h> | 
 | 7 | #include <linux/module.h> | 
 | 8 |  | 
| Eric Dumazet | be85279 | 2008-03-04 14:28:35 -0800 | [diff] [blame] | 9 | #ifndef cache_line_size | 
 | 10 | #define cache_line_size()	L1_CACHE_BYTES | 
 | 11 | #endif | 
 | 12 |  | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 13 | /** | 
 | 14 |  * percpu_depopulate - depopulate per-cpu data for given cpu | 
 | 15 |  * @__pdata: per-cpu data to depopulate | 
 | 16 |  * @cpu: depopulate per-cpu data for this cpu | 
 | 17 |  * | 
 | 18 |  * Depopulating per-cpu data for a cpu going offline would be a typical | 
 | 19 |  * use case. You need to register a cpu hotplug handler for that purpose. | 
 | 20 |  */ | 
| Adrian Bunk | 9d8fddf | 2008-07-25 19:46:23 -0700 | [diff] [blame] | 21 | static void percpu_depopulate(void *__pdata, int cpu) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 22 | { | 
 | 23 | 	struct percpu_data *pdata = __percpu_disguise(__pdata); | 
| Alan Stern | a120586 | 2006-12-06 20:32:37 -0800 | [diff] [blame] | 24 |  | 
 | 25 | 	kfree(pdata->ptrs[cpu]); | 
 | 26 | 	pdata->ptrs[cpu] = NULL; | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 27 | } | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 28 |  | 
 | 29 | /** | 
 | 30 |  * percpu_depopulate_mask - depopulate per-cpu data for some cpu's | 
 | 31 |  * @__pdata: per-cpu data to depopulate | 
 | 32 |  * @mask: depopulate per-cpu data for cpu's selected through mask bits | 
 | 33 |  */ | 
| Stephen Rothwell | 5d6700e | 2009-04-06 15:08:29 +1000 | [diff] [blame] | 34 | static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 35 | { | 
 | 36 | 	int cpu; | 
| Mike Travis | 6d6a436 | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 37 | 	for_each_cpu_mask_nr(cpu, *mask) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 38 | 		percpu_depopulate(__pdata, cpu); | 
 | 39 | } | 
| Adrian Bunk | 9d8fddf | 2008-07-25 19:46:23 -0700 | [diff] [blame] | 40 |  | 
 | 41 | #define percpu_depopulate_mask(__pdata, mask) \ | 
 | 42 | 	__percpu_depopulate_mask((__pdata), &(mask)) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 43 |  | 
 | 44 | /** | 
 | 45 |  * percpu_populate - populate per-cpu data for given cpu | 
 | 46 |  * @__pdata: per-cpu data to populate further | 
 | 47 |  * @size: size of per-cpu object | 
 | 48 |  * @gfp: may sleep or not etc. | 
 | 49 |  * @cpu: populate per-data for this cpu | 
 | 50 |  * | 
 | 51 |  * Populating per-cpu data for a cpu coming online would be a typical | 
 | 52 |  * use case. You need to register a cpu hotplug handler for that purpose. | 
 | 53 |  * Per-cpu object is populated with zeroed buffer. | 
 | 54 |  */ | 
| Adrian Bunk | 9d8fddf | 2008-07-25 19:46:23 -0700 | [diff] [blame] | 55 | static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 56 | { | 
 | 57 | 	struct percpu_data *pdata = __percpu_disguise(__pdata); | 
 | 58 | 	int node = cpu_to_node(cpu); | 
 | 59 |  | 
| Eric Dumazet | be85279 | 2008-03-04 14:28:35 -0800 | [diff] [blame] | 60 | 	/* | 
 | 61 | 	 * We should make sure each CPU gets private memory. | 
 | 62 | 	 */ | 
 | 63 | 	size = roundup(size, cache_line_size()); | 
 | 64 |  | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 65 | 	BUG_ON(pdata->ptrs[cpu]); | 
| Christoph Lameter | 94f6030 | 2007-07-17 04:03:29 -0700 | [diff] [blame] | 66 | 	if (node_online(node)) | 
 | 67 | 		pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); | 
 | 68 | 	else | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 69 | 		pdata->ptrs[cpu] = kzalloc(size, gfp); | 
 | 70 | 	return pdata->ptrs[cpu]; | 
 | 71 | } | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 72 |  | 
 | 73 | /** | 
 | 74 |  * percpu_populate_mask - populate per-cpu data for more cpu's | 
 | 75 |  * @__pdata: per-cpu data to populate further | 
 | 76 |  * @size: size of per-cpu object | 
 | 77 |  * @gfp: may sleep or not etc. | 
 | 78 |  * @mask: populate per-cpu data for cpu's selected through mask bits | 
 | 79 |  * | 
 | 80 |  * Per-cpu objects are populated with zeroed buffers. | 
 | 81 |  */ | 
| Adrian Bunk | 9d8fddf | 2008-07-25 19:46:23 -0700 | [diff] [blame] | 82 | static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | 
 | 83 | 				  cpumask_t *mask) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 84 | { | 
| Mike Travis | d366f8c | 2008-04-04 18:11:12 -0700 | [diff] [blame] | 85 | 	cpumask_t populated; | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 86 | 	int cpu; | 
 | 87 |  | 
| Mike Travis | d366f8c | 2008-04-04 18:11:12 -0700 | [diff] [blame] | 88 | 	cpus_clear(populated); | 
| Mike Travis | 6d6a436 | 2008-05-12 21:21:13 +0200 | [diff] [blame] | 89 | 	for_each_cpu_mask_nr(cpu, *mask) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 90 | 		if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { | 
 | 91 | 			__percpu_depopulate_mask(__pdata, &populated); | 
 | 92 | 			return -ENOMEM; | 
 | 93 | 		} else | 
 | 94 | 			cpu_set(cpu, populated); | 
 | 95 | 	return 0; | 
 | 96 | } | 
| Adrian Bunk | 9d8fddf | 2008-07-25 19:46:23 -0700 | [diff] [blame] | 97 |  | 
 | 98 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ | 
 | 99 | 	__percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 100 |  | 
 | 101 | /** | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 102 |  * alloc_percpu - initial setup of per-cpu data | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 103 |  * @size: size of per-cpu object | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 104 |  * @align: alignment | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 105 |  * | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 106 |  * Allocate dynamic percpu area.  Percpu objects are populated with | 
 | 107 |  * zeroed buffers. | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 108 |  */ | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 109 | void *__alloc_percpu(size_t size, size_t align) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 110 | { | 
| Eric Dumazet | be85279 | 2008-03-04 14:28:35 -0800 | [diff] [blame] | 111 | 	/* | 
 | 112 | 	 * We allocate whole cache lines to avoid false sharing | 
 | 113 | 	 */ | 
 | 114 | 	size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 115 | 	void *pdata = kzalloc(sz, GFP_KERNEL); | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 116 | 	void *__pdata = __percpu_disguise(pdata); | 
 | 117 |  | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 118 | 	/* | 
 | 119 | 	 * Can't easily make larger alignment work with kmalloc.  WARN | 
 | 120 | 	 * on it.  Larger alignment should only be used for module | 
 | 121 | 	 * percpu sections on SMP for which this path isn't used. | 
 | 122 | 	 */ | 
| Tejun Heo | 60db564 | 2009-03-11 14:36:54 +0900 | [diff] [blame] | 123 | 	WARN_ON_ONCE(align > SMP_CACHE_BYTES); | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 124 |  | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 125 | 	if (unlikely(!pdata)) | 
 | 126 | 		return NULL; | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 127 | 	if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL, | 
 | 128 | 					   &cpu_possible_map))) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 129 | 		return __pdata; | 
 | 130 | 	kfree(pdata); | 
 | 131 | 	return NULL; | 
 | 132 | } | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 133 | EXPORT_SYMBOL_GPL(__alloc_percpu); | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 134 |  | 
 | 135 | /** | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 136 |  * free_percpu - final cleanup of per-cpu data | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 137 |  * @__pdata: object to clean up | 
 | 138 |  * | 
 | 139 |  * We simply clean up any per-cpu object left. No need for the client to | 
 | 140 |  * track and specify through a bis mask which per-cpu objects are to free. | 
 | 141 |  */ | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 142 | void free_percpu(void *__pdata) | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 143 | { | 
| Alan Stern | a120586 | 2006-12-06 20:32:37 -0800 | [diff] [blame] | 144 | 	if (unlikely(!__pdata)) | 
 | 145 | 		return; | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 146 | 	__percpu_depopulate_mask(__pdata, cpu_possible_mask); | 
| Christoph Lameter | d00bcc9 | 2006-09-25 23:31:50 -0700 | [diff] [blame] | 147 | 	kfree(__percpu_disguise(__pdata)); | 
 | 148 | } | 
| Tejun Heo | f2a8205 | 2009-02-20 16:29:08 +0900 | [diff] [blame] | 149 | EXPORT_SYMBOL_GPL(free_percpu); |