| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *        copyright notice, this list of conditions and the following | 
 | 16 |  *        disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *        copyright notice, this list of conditions and the following | 
 | 20 |  *        disclaimer in the documentation and/or other materials | 
 | 21 |  *        provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  * | 
 | 32 |  */ | 
 | 33 | #include <linux/highmem.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> | 
| Amerigo Wang | 80f1ff9 | 2011-07-25 17:13:08 -0700 | [diff] [blame] | 35 | #include <linux/cpu.h> | 
| Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 36 | #include <linux/export.h> | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 37 |  | 
 | 38 | #include "rds.h" | 
 | 39 |  | 
 | 40 | struct rds_page_remainder { | 
 | 41 | 	struct page	*r_page; | 
 | 42 | 	unsigned long	r_offset; | 
 | 43 | }; | 
 | 44 |  | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 45 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, | 
 | 46 | 				     rds_page_remainders); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 47 |  | 
 | 48 | /* | 
 | 49 |  * returns 0 on success or -errno on failure. | 
 | 50 |  * | 
 | 51 |  * We don't have to worry about flush_dcache_page() as this only works | 
 | 52 |  * with private pages.  If, say, we were to do directed receive to pinned | 
 | 53 |  * user pages we'd have to worry more about cache coherence.  (Though | 
 | 54 |  * the flush_dcache_page() in get_user_pages() would probably be enough). | 
 | 55 |  */ | 
 | 56 | int rds_page_copy_user(struct page *page, unsigned long offset, | 
 | 57 | 		       void __user *ptr, unsigned long bytes, | 
 | 58 | 		       int to_user) | 
 | 59 | { | 
 | 60 | 	unsigned long ret; | 
 | 61 | 	void *addr; | 
 | 62 |  | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 63 | 	addr = kmap(page); | 
 | 64 | 	if (to_user) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 65 | 		rds_stats_add(s_copy_to_user, bytes); | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 66 | 		ret = copy_to_user(ptr, addr + offset, bytes); | 
 | 67 | 	} else { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 68 | 		rds_stats_add(s_copy_from_user, bytes); | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 69 | 		ret = copy_from_user(addr + offset, ptr, bytes); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 70 | 	} | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 71 | 	kunmap(page); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 72 |  | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 73 | 	return ret ? -EFAULT : 0; | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 74 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 75 | EXPORT_SYMBOL_GPL(rds_page_copy_user); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 76 |  | 
 | 77 | /* | 
 | 78 |  * Message allocation uses this to build up regions of a message. | 
 | 79 |  * | 
 | 80 |  * @bytes - the number of bytes needed. | 
 | 81 |  * @gfp - the waiting behaviour of the allocation | 
 | 82 |  * | 
 | 83 |  * @gfp is always ored with __GFP_HIGHMEM.  Callers must be prepared to | 
 | 84 |  * kmap the pages, etc. | 
 | 85 |  * | 
 | 86 |  * If @bytes is at least a full page then this just returns a page from | 
 | 87 |  * alloc_page(). | 
 | 88 |  * | 
 | 89 |  * If @bytes is a partial page then this stores the unused region of the | 
 | 90 |  * page in a per-cpu structure.  Future partial-page allocations may be | 
 | 91 |  * satisfied from that cached region.  This lets us waste less memory on | 
 | 92 |  * small allocations with minimal complexity.  It works because the transmit | 
 | 93 |  * path passes read-only page regions down to devices.  They hold a page | 
 | 94 |  * reference until they are done with the region. | 
 | 95 |  */ | 
 | 96 | int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, | 
 | 97 | 			     gfp_t gfp) | 
 | 98 | { | 
 | 99 | 	struct rds_page_remainder *rem; | 
 | 100 | 	unsigned long flags; | 
 | 101 | 	struct page *page; | 
 | 102 | 	int ret; | 
 | 103 |  | 
 | 104 | 	gfp |= __GFP_HIGHMEM; | 
 | 105 |  | 
 | 106 | 	/* jump straight to allocation if we're trying for a huge page */ | 
 | 107 | 	if (bytes >= PAGE_SIZE) { | 
 | 108 | 		page = alloc_page(gfp); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 109 | 		if (!page) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 110 | 			ret = -ENOMEM; | 
 | 111 | 		} else { | 
 | 112 | 			sg_set_page(scat, page, PAGE_SIZE, 0); | 
 | 113 | 			ret = 0; | 
 | 114 | 		} | 
 | 115 | 		goto out; | 
 | 116 | 	} | 
 | 117 |  | 
 | 118 | 	rem = &per_cpu(rds_page_remainders, get_cpu()); | 
 | 119 | 	local_irq_save(flags); | 
 | 120 |  | 
 | 121 | 	while (1) { | 
 | 122 | 		/* avoid a tiny region getting stuck by tossing it */ | 
 | 123 | 		if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) { | 
 | 124 | 			rds_stats_inc(s_page_remainder_miss); | 
 | 125 | 			__free_page(rem->r_page); | 
 | 126 | 			rem->r_page = NULL; | 
 | 127 | 		} | 
 | 128 |  | 
 | 129 | 		/* hand out a fragment from the cached page */ | 
 | 130 | 		if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) { | 
 | 131 | 			sg_set_page(scat, rem->r_page, bytes, rem->r_offset); | 
 | 132 | 			get_page(sg_page(scat)); | 
 | 133 |  | 
 | 134 | 			if (rem->r_offset != 0) | 
 | 135 | 				rds_stats_inc(s_page_remainder_hit); | 
 | 136 |  | 
 | 137 | 			rem->r_offset += bytes; | 
 | 138 | 			if (rem->r_offset == PAGE_SIZE) { | 
 | 139 | 				__free_page(rem->r_page); | 
 | 140 | 				rem->r_page = NULL; | 
 | 141 | 			} | 
 | 142 | 			ret = 0; | 
 | 143 | 			break; | 
 | 144 | 		} | 
 | 145 |  | 
 | 146 | 		/* alloc if there is nothing for us to use */ | 
 | 147 | 		local_irq_restore(flags); | 
 | 148 | 		put_cpu(); | 
 | 149 |  | 
 | 150 | 		page = alloc_page(gfp); | 
 | 151 |  | 
 | 152 | 		rem = &per_cpu(rds_page_remainders, get_cpu()); | 
 | 153 | 		local_irq_save(flags); | 
 | 154 |  | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 155 | 		if (!page) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 156 | 			ret = -ENOMEM; | 
 | 157 | 			break; | 
 | 158 | 		} | 
 | 159 |  | 
 | 160 | 		/* did someone race to fill the remainder before us? */ | 
 | 161 | 		if (rem->r_page) { | 
 | 162 | 			__free_page(page); | 
 | 163 | 			continue; | 
 | 164 | 		} | 
 | 165 |  | 
 | 166 | 		/* otherwise install our page and loop around to alloc */ | 
 | 167 | 		rem->r_page = page; | 
 | 168 | 		rem->r_offset = 0; | 
 | 169 | 	} | 
 | 170 |  | 
 | 171 | 	local_irq_restore(flags); | 
 | 172 | 	put_cpu(); | 
 | 173 | out: | 
 | 174 | 	rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret, | 
 | 175 | 		 ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, | 
 | 176 | 		 ret ? 0 : scat->length); | 
 | 177 | 	return ret; | 
 | 178 | } | 
| Andy Grover | 0b088e0 | 2010-05-24 20:12:41 -0700 | [diff] [blame] | 179 | EXPORT_SYMBOL_GPL(rds_page_remainder_alloc); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 180 |  | 
 | 181 | static int rds_page_remainder_cpu_notify(struct notifier_block *self, | 
 | 182 | 					 unsigned long action, void *hcpu) | 
 | 183 | { | 
 | 184 | 	struct rds_page_remainder *rem; | 
 | 185 | 	long cpu = (long)hcpu; | 
 | 186 |  | 
 | 187 | 	rem = &per_cpu(rds_page_remainders, cpu); | 
 | 188 |  | 
 | 189 | 	rdsdebug("cpu %ld action 0x%lx\n", cpu, action); | 
 | 190 |  | 
 | 191 | 	switch (action) { | 
 | 192 | 	case CPU_DEAD: | 
 | 193 | 		if (rem->r_page) | 
 | 194 | 			__free_page(rem->r_page); | 
 | 195 | 		rem->r_page = NULL; | 
 | 196 | 		break; | 
 | 197 | 	} | 
 | 198 |  | 
 | 199 | 	return 0; | 
 | 200 | } | 
 | 201 |  | 
 | 202 | static struct notifier_block rds_page_remainder_nb = { | 
 | 203 | 	.notifier_call = rds_page_remainder_cpu_notify, | 
 | 204 | }; | 
 | 205 |  | 
 | 206 | void rds_page_exit(void) | 
 | 207 | { | 
 | 208 | 	int i; | 
 | 209 |  | 
 | 210 | 	for_each_possible_cpu(i) | 
 | 211 | 		rds_page_remainder_cpu_notify(&rds_page_remainder_nb, | 
 | 212 | 					      (unsigned long)CPU_DEAD, | 
 | 213 | 					      (void *)(long)i); | 
 | 214 | } |