| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2006 Oracle.  All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This software is available to you under a choice of one of two | 
|  | 5 | * licenses.  You may choose to be licensed under the terms of the GNU | 
|  | 6 | * General Public License (GPL) Version 2, available from the file | 
|  | 7 | * COPYING in the main directory of this source tree, or the | 
|  | 8 | * OpenIB.org BSD license below: | 
|  | 9 | * | 
|  | 10 | *     Redistribution and use in source and binary forms, with or | 
|  | 11 | *     without modification, are permitted provided that the following | 
|  | 12 | *     conditions are met: | 
|  | 13 | * | 
|  | 14 | *      - Redistributions of source code must retain the above | 
|  | 15 | *        copyright notice, this list of conditions and the following | 
|  | 16 | *        disclaimer. | 
|  | 17 | * | 
|  | 18 | *      - Redistributions in binary form must reproduce the above | 
|  | 19 | *        copyright notice, this list of conditions and the following | 
|  | 20 | *        disclaimer in the documentation and/or other materials | 
|  | 21 | *        provided with the distribution. | 
|  | 22 | * | 
|  | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
|  | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
|  | 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
|  | 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
|  | 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
|  | 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
|  | 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
|  | 30 | * SOFTWARE. | 
|  | 31 | * | 
|  | 32 | */ | 
|  | 33 | #include <linux/highmem.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> | 
| Amerigo Wang | 80f1ff9 | 2011-07-25 17:13:08 -0700 | [diff] [blame] | 35 | #include <linux/cpu.h> | 
| Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 36 | #include <linux/export.h> | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 37 |  | 
|  | 38 | #include "rds.h" | 
|  | 39 |  | 
|  | 40 | struct rds_page_remainder { | 
|  | 41 | struct page	*r_page; | 
|  | 42 | unsigned long	r_offset; | 
|  | 43 | }; | 
|  | 44 |  | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 45 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, | 
|  | 46 | rds_page_remainders); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 47 |  | 
|  | 48 | /* | 
|  | 49 | * returns 0 on success or -errno on failure. | 
|  | 50 | * | 
|  | 51 | * We don't have to worry about flush_dcache_page() as this only works | 
|  | 52 | * with private pages.  If, say, we were to do directed receive to pinned | 
|  | 53 | * user pages we'd have to worry more about cache coherence.  (Though | 
|  | 54 | * the flush_dcache_page() in get_user_pages() would probably be enough). | 
|  | 55 | */ | 
|  | 56 | int rds_page_copy_user(struct page *page, unsigned long offset, | 
|  | 57 | void __user *ptr, unsigned long bytes, | 
|  | 58 | int to_user) | 
|  | 59 | { | 
|  | 60 | unsigned long ret; | 
|  | 61 | void *addr; | 
|  | 62 |  | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 63 | addr = kmap(page); | 
|  | 64 | if (to_user) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 65 | rds_stats_add(s_copy_to_user, bytes); | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 66 | ret = copy_to_user(ptr, addr + offset, bytes); | 
|  | 67 | } else { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 68 | rds_stats_add(s_copy_from_user, bytes); | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 69 | ret = copy_from_user(addr + offset, ptr, bytes); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 70 | } | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 71 | kunmap(page); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 72 |  | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 73 | return ret ? -EFAULT : 0; | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 74 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 75 | EXPORT_SYMBOL_GPL(rds_page_copy_user); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 76 |  | 
| Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 77 | /** | 
|  | 78 | * rds_page_remainder_alloc - build up regions of a message. | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 79 | * | 
| Ben Hutchings | 2c53040 | 2012-07-10 10:55:09 +0000 | [diff] [blame] | 80 | * @scat: Scatter list for message | 
|  | 81 | * @bytes: the number of bytes needed. | 
|  | 82 | * @gfp: the waiting behaviour of the allocation | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 83 | * | 
|  | 84 | * @gfp is always ored with __GFP_HIGHMEM.  Callers must be prepared to | 
|  | 85 | * kmap the pages, etc. | 
|  | 86 | * | 
|  | 87 | * If @bytes is at least a full page then this just returns a page from | 
|  | 88 | * alloc_page(). | 
|  | 89 | * | 
|  | 90 | * If @bytes is a partial page then this stores the unused region of the | 
|  | 91 | * page in a per-cpu structure.  Future partial-page allocations may be | 
|  | 92 | * satisfied from that cached region.  This lets us waste less memory on | 
|  | 93 | * small allocations with minimal complexity.  It works because the transmit | 
|  | 94 | * path passes read-only page regions down to devices.  They hold a page | 
|  | 95 | * reference until they are done with the region. | 
|  | 96 | */ | 
|  | 97 | int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, | 
|  | 98 | gfp_t gfp) | 
|  | 99 | { | 
|  | 100 | struct rds_page_remainder *rem; | 
|  | 101 | unsigned long flags; | 
|  | 102 | struct page *page; | 
|  | 103 | int ret; | 
|  | 104 |  | 
|  | 105 | gfp |= __GFP_HIGHMEM; | 
|  | 106 |  | 
|  | 107 | /* jump straight to allocation if we're trying for a huge page */ | 
|  | 108 | if (bytes >= PAGE_SIZE) { | 
|  | 109 | page = alloc_page(gfp); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 110 | if (!page) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 111 | ret = -ENOMEM; | 
|  | 112 | } else { | 
|  | 113 | sg_set_page(scat, page, PAGE_SIZE, 0); | 
|  | 114 | ret = 0; | 
|  | 115 | } | 
|  | 116 | goto out; | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | rem = &per_cpu(rds_page_remainders, get_cpu()); | 
|  | 120 | local_irq_save(flags); | 
|  | 121 |  | 
|  | 122 | while (1) { | 
|  | 123 | /* avoid a tiny region getting stuck by tossing it */ | 
|  | 124 | if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) { | 
|  | 125 | rds_stats_inc(s_page_remainder_miss); | 
|  | 126 | __free_page(rem->r_page); | 
|  | 127 | rem->r_page = NULL; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | /* hand out a fragment from the cached page */ | 
|  | 131 | if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) { | 
|  | 132 | sg_set_page(scat, rem->r_page, bytes, rem->r_offset); | 
|  | 133 | get_page(sg_page(scat)); | 
|  | 134 |  | 
|  | 135 | if (rem->r_offset != 0) | 
|  | 136 | rds_stats_inc(s_page_remainder_hit); | 
|  | 137 |  | 
|  | 138 | rem->r_offset += bytes; | 
|  | 139 | if (rem->r_offset == PAGE_SIZE) { | 
|  | 140 | __free_page(rem->r_page); | 
|  | 141 | rem->r_page = NULL; | 
|  | 142 | } | 
|  | 143 | ret = 0; | 
|  | 144 | break; | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | /* alloc if there is nothing for us to use */ | 
|  | 148 | local_irq_restore(flags); | 
|  | 149 | put_cpu(); | 
|  | 150 |  | 
|  | 151 | page = alloc_page(gfp); | 
|  | 152 |  | 
|  | 153 | rem = &per_cpu(rds_page_remainders, get_cpu()); | 
|  | 154 | local_irq_save(flags); | 
|  | 155 |  | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 156 | if (!page) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 157 | ret = -ENOMEM; | 
|  | 158 | break; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | /* did someone race to fill the remainder before us? */ | 
|  | 162 | if (rem->r_page) { | 
|  | 163 | __free_page(page); | 
|  | 164 | continue; | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | /* otherwise install our page and loop around to alloc */ | 
|  | 168 | rem->r_page = page; | 
|  | 169 | rem->r_offset = 0; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | local_irq_restore(flags); | 
|  | 173 | put_cpu(); | 
|  | 174 | out: | 
|  | 175 | rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret, | 
|  | 176 | ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, | 
|  | 177 | ret ? 0 : scat->length); | 
|  | 178 | return ret; | 
|  | 179 | } | 
| Andy Grover | 0b088e0 | 2010-05-24 20:12:41 -0700 | [diff] [blame] | 180 | EXPORT_SYMBOL_GPL(rds_page_remainder_alloc); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 181 |  | 
|  | 182 | static int rds_page_remainder_cpu_notify(struct notifier_block *self, | 
|  | 183 | unsigned long action, void *hcpu) | 
|  | 184 | { | 
|  | 185 | struct rds_page_remainder *rem; | 
|  | 186 | long cpu = (long)hcpu; | 
|  | 187 |  | 
|  | 188 | rem = &per_cpu(rds_page_remainders, cpu); | 
|  | 189 |  | 
|  | 190 | rdsdebug("cpu %ld action 0x%lx\n", cpu, action); | 
|  | 191 |  | 
|  | 192 | switch (action) { | 
|  | 193 | case CPU_DEAD: | 
|  | 194 | if (rem->r_page) | 
|  | 195 | __free_page(rem->r_page); | 
|  | 196 | rem->r_page = NULL; | 
|  | 197 | break; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | return 0; | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | static struct notifier_block rds_page_remainder_nb = { | 
|  | 204 | .notifier_call = rds_page_remainder_cpu_notify, | 
|  | 205 | }; | 
|  | 206 |  | 
|  | 207 | void rds_page_exit(void) | 
|  | 208 | { | 
|  | 209 | int i; | 
|  | 210 |  | 
|  | 211 | for_each_possible_cpu(i) | 
|  | 212 | rds_page_remainder_cpu_notify(&rds_page_remainder_nb, | 
|  | 213 | (unsigned long)CPU_DEAD, | 
|  | 214 | (void *)(long)i); | 
|  | 215 | } |