| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2006 Oracle.  All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This software is available to you under a choice of one of two | 
|  | 5 | * licenses.  You may choose to be licensed under the terms of the GNU | 
|  | 6 | * General Public License (GPL) Version 2, available from the file | 
|  | 7 | * COPYING in the main directory of this source tree, or the | 
|  | 8 | * OpenIB.org BSD license below: | 
|  | 9 | * | 
|  | 10 | *     Redistribution and use in source and binary forms, with or | 
|  | 11 | *     without modification, are permitted provided that the following | 
|  | 12 | *     conditions are met: | 
|  | 13 | * | 
|  | 14 | *      - Redistributions of source code must retain the above | 
|  | 15 | *        copyright notice, this list of conditions and the following | 
|  | 16 | *        disclaimer. | 
|  | 17 | * | 
|  | 18 | *      - Redistributions in binary form must reproduce the above | 
|  | 19 | *        copyright notice, this list of conditions and the following | 
|  | 20 | *        disclaimer in the documentation and/or other materials | 
|  | 21 | *        provided with the distribution. | 
|  | 22 | * | 
|  | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
|  | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
|  | 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
|  | 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
|  | 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
|  | 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
|  | 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
|  | 30 | * SOFTWARE. | 
|  | 31 | * | 
|  | 32 | */ | 
|  | 33 | #include <linux/highmem.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 35 |  | 
|  | 36 | #include "rds.h" | 
|  | 37 |  | 
|  | 38 | struct rds_page_remainder { | 
|  | 39 | struct page	*r_page; | 
|  | 40 | unsigned long	r_offset; | 
|  | 41 | }; | 
|  | 42 |  | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 43 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, | 
|  | 44 | rds_page_remainders); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 45 |  | 
|  | 46 | /* | 
|  | 47 | * returns 0 on success or -errno on failure. | 
|  | 48 | * | 
|  | 49 | * We don't have to worry about flush_dcache_page() as this only works | 
|  | 50 | * with private pages.  If, say, we were to do directed receive to pinned | 
|  | 51 | * user pages we'd have to worry more about cache coherence.  (Though | 
|  | 52 | * the flush_dcache_page() in get_user_pages() would probably be enough). | 
|  | 53 | */ | 
|  | 54 | int rds_page_copy_user(struct page *page, unsigned long offset, | 
|  | 55 | void __user *ptr, unsigned long bytes, | 
|  | 56 | int to_user) | 
|  | 57 | { | 
|  | 58 | unsigned long ret; | 
|  | 59 | void *addr; | 
|  | 60 |  | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 61 | addr = kmap(page); | 
|  | 62 | if (to_user) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 63 | rds_stats_add(s_copy_to_user, bytes); | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 64 | ret = copy_to_user(ptr, addr + offset, bytes); | 
|  | 65 | } else { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 66 | rds_stats_add(s_copy_from_user, bytes); | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 67 | ret = copy_from_user(addr + offset, ptr, bytes); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 68 | } | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 69 | kunmap(page); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 70 |  | 
| Linus Torvalds | 799c1055 | 2010-10-15 11:09:28 -0700 | [diff] [blame] | 71 | return ret ? -EFAULT : 0; | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 72 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 73 | EXPORT_SYMBOL_GPL(rds_page_copy_user); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 74 |  | 
|  | 75 | /* | 
|  | 76 | * Message allocation uses this to build up regions of a message. | 
|  | 77 | * | 
|  | 78 | * @bytes - the number of bytes needed. | 
|  | 79 | * @gfp - the waiting behaviour of the allocation | 
|  | 80 | * | 
|  | 81 | * @gfp is always ored with __GFP_HIGHMEM.  Callers must be prepared to | 
|  | 82 | * kmap the pages, etc. | 
|  | 83 | * | 
|  | 84 | * If @bytes is at least a full page then this just returns a page from | 
|  | 85 | * alloc_page(). | 
|  | 86 | * | 
|  | 87 | * If @bytes is a partial page then this stores the unused region of the | 
|  | 88 | * page in a per-cpu structure.  Future partial-page allocations may be | 
|  | 89 | * satisfied from that cached region.  This lets us waste less memory on | 
|  | 90 | * small allocations with minimal complexity.  It works because the transmit | 
|  | 91 | * path passes read-only page regions down to devices.  They hold a page | 
|  | 92 | * reference until they are done with the region. | 
|  | 93 | */ | 
|  | 94 | int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, | 
|  | 95 | gfp_t gfp) | 
|  | 96 | { | 
|  | 97 | struct rds_page_remainder *rem; | 
|  | 98 | unsigned long flags; | 
|  | 99 | struct page *page; | 
|  | 100 | int ret; | 
|  | 101 |  | 
|  | 102 | gfp |= __GFP_HIGHMEM; | 
|  | 103 |  | 
|  | 104 | /* jump straight to allocation if we're trying for a huge page */ | 
|  | 105 | if (bytes >= PAGE_SIZE) { | 
|  | 106 | page = alloc_page(gfp); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 107 | if (!page) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 108 | ret = -ENOMEM; | 
|  | 109 | } else { | 
|  | 110 | sg_set_page(scat, page, PAGE_SIZE, 0); | 
|  | 111 | ret = 0; | 
|  | 112 | } | 
|  | 113 | goto out; | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | rem = &per_cpu(rds_page_remainders, get_cpu()); | 
|  | 117 | local_irq_save(flags); | 
|  | 118 |  | 
|  | 119 | while (1) { | 
|  | 120 | /* avoid a tiny region getting stuck by tossing it */ | 
|  | 121 | if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) { | 
|  | 122 | rds_stats_inc(s_page_remainder_miss); | 
|  | 123 | __free_page(rem->r_page); | 
|  | 124 | rem->r_page = NULL; | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | /* hand out a fragment from the cached page */ | 
|  | 128 | if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) { | 
|  | 129 | sg_set_page(scat, rem->r_page, bytes, rem->r_offset); | 
|  | 130 | get_page(sg_page(scat)); | 
|  | 131 |  | 
|  | 132 | if (rem->r_offset != 0) | 
|  | 133 | rds_stats_inc(s_page_remainder_hit); | 
|  | 134 |  | 
|  | 135 | rem->r_offset += bytes; | 
|  | 136 | if (rem->r_offset == PAGE_SIZE) { | 
|  | 137 | __free_page(rem->r_page); | 
|  | 138 | rem->r_page = NULL; | 
|  | 139 | } | 
|  | 140 | ret = 0; | 
|  | 141 | break; | 
|  | 142 | } | 
|  | 143 |  | 
|  | 144 | /* alloc if there is nothing for us to use */ | 
|  | 145 | local_irq_restore(flags); | 
|  | 146 | put_cpu(); | 
|  | 147 |  | 
|  | 148 | page = alloc_page(gfp); | 
|  | 149 |  | 
|  | 150 | rem = &per_cpu(rds_page_remainders, get_cpu()); | 
|  | 151 | local_irq_save(flags); | 
|  | 152 |  | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 153 | if (!page) { | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 154 | ret = -ENOMEM; | 
|  | 155 | break; | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | /* did someone race to fill the remainder before us? */ | 
|  | 159 | if (rem->r_page) { | 
|  | 160 | __free_page(page); | 
|  | 161 | continue; | 
|  | 162 | } | 
|  | 163 |  | 
|  | 164 | /* otherwise install our page and loop around to alloc */ | 
|  | 165 | rem->r_page = page; | 
|  | 166 | rem->r_offset = 0; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | local_irq_restore(flags); | 
|  | 170 | put_cpu(); | 
|  | 171 | out: | 
|  | 172 | rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret, | 
|  | 173 | ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, | 
|  | 174 | ret ? 0 : scat->length); | 
|  | 175 | return ret; | 
|  | 176 | } | 
| Andy Grover | 0b088e0 | 2010-05-24 20:12:41 -0700 | [diff] [blame] | 177 | EXPORT_SYMBOL_GPL(rds_page_remainder_alloc); | 
| Andy Grover | 7875e18 | 2009-02-24 15:30:26 +0000 | [diff] [blame] | 178 |  | 
|  | 179 | static int rds_page_remainder_cpu_notify(struct notifier_block *self, | 
|  | 180 | unsigned long action, void *hcpu) | 
|  | 181 | { | 
|  | 182 | struct rds_page_remainder *rem; | 
|  | 183 | long cpu = (long)hcpu; | 
|  | 184 |  | 
|  | 185 | rem = &per_cpu(rds_page_remainders, cpu); | 
|  | 186 |  | 
|  | 187 | rdsdebug("cpu %ld action 0x%lx\n", cpu, action); | 
|  | 188 |  | 
|  | 189 | switch (action) { | 
|  | 190 | case CPU_DEAD: | 
|  | 191 | if (rem->r_page) | 
|  | 192 | __free_page(rem->r_page); | 
|  | 193 | rem->r_page = NULL; | 
|  | 194 | break; | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | return 0; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | static struct notifier_block rds_page_remainder_nb = { | 
|  | 201 | .notifier_call = rds_page_remainder_cpu_notify, | 
|  | 202 | }; | 
|  | 203 |  | 
|  | 204 | void rds_page_exit(void) | 
|  | 205 | { | 
|  | 206 | int i; | 
|  | 207 |  | 
|  | 208 | for_each_possible_cpu(i) | 
|  | 209 | rds_page_remainder_cpu_notify(&rds_page_remainder_nb, | 
|  | 210 | (unsigned long)CPU_DEAD, | 
|  | 211 | (void *)(long)i); | 
|  | 212 | } |