| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *        copyright notice, this list of conditions and the following | 
 | 16 |  *        disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *        copyright notice, this list of conditions and the following | 
 | 20 |  *        disclaimer in the documentation and/or other materials | 
 | 21 |  *        provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  * | 
 | 32 |  */ | 
 | 33 | #include <linux/kernel.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 35 |  | 
 | 36 | #include "rds.h" | 
 | 37 | #include "rdma.h" | 
 | 38 | #include "ib.h" | 
 | 39 |  | 
 | 40 |  | 
 | 41 | /* | 
 | 42 |  * This is stored as mr->r_trans_private. | 
 | 43 |  */ | 
 | 44 | struct rds_ib_mr { | 
 | 45 | 	struct rds_ib_device	*device; | 
 | 46 | 	struct rds_ib_mr_pool	*pool; | 
 | 47 | 	struct ib_fmr		*fmr; | 
 | 48 | 	struct list_head	list; | 
 | 49 | 	unsigned int		remap_count; | 
 | 50 |  | 
 | 51 | 	struct scatterlist	*sg; | 
 | 52 | 	unsigned int		sg_len; | 
 | 53 | 	u64			*dma; | 
 | 54 | 	int			sg_dma_len; | 
 | 55 | }; | 
 | 56 |  | 
 | 57 | /* | 
 | 58 |  * Our own little FMR pool | 
 | 59 |  */ | 
 | 60 | struct rds_ib_mr_pool { | 
 | 61 | 	struct mutex		flush_lock;		/* serialize fmr invalidate */ | 
 | 62 | 	struct work_struct	flush_worker;		/* flush worker */ | 
 | 63 |  | 
 | 64 | 	spinlock_t		list_lock;		/* protect variables below */ | 
 | 65 | 	atomic_t		item_count;		/* total # of MRs */ | 
 | 66 | 	atomic_t		dirty_count;		/* # dirty of MRs */ | 
 | 67 | 	struct list_head	drop_list;		/* MRs that have reached their max_maps limit */ | 
 | 68 | 	struct list_head	free_list;		/* unused MRs */ | 
 | 69 | 	struct list_head	clean_list;		/* unused & unamapped MRs */ | 
 | 70 | 	atomic_t		free_pinned;		/* memory pinned by free MRs */ | 
 | 71 | 	unsigned long		max_items; | 
 | 72 | 	unsigned long		max_items_soft; | 
 | 73 | 	unsigned long		max_free_pinned; | 
 | 74 | 	struct ib_fmr_attr	fmr_attr; | 
 | 75 | }; | 
 | 76 |  | 
 | 77 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); | 
 | 78 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); | 
 | 79 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work); | 
 | 80 |  | 
 | 81 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) | 
 | 82 | { | 
 | 83 | 	struct rds_ib_device *rds_ibdev; | 
 | 84 | 	struct rds_ib_ipaddr *i_ipaddr; | 
 | 85 |  | 
 | 86 | 	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { | 
 | 87 | 		spin_lock_irq(&rds_ibdev->spinlock); | 
 | 88 | 		list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) { | 
 | 89 | 			if (i_ipaddr->ipaddr == ipaddr) { | 
 | 90 | 				spin_unlock_irq(&rds_ibdev->spinlock); | 
 | 91 | 				return rds_ibdev; | 
 | 92 | 			} | 
 | 93 | 		} | 
 | 94 | 		spin_unlock_irq(&rds_ibdev->spinlock); | 
 | 95 | 	} | 
 | 96 |  | 
 | 97 | 	return NULL; | 
 | 98 | } | 
 | 99 |  | 
 | 100 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | 
 | 101 | { | 
 | 102 | 	struct rds_ib_ipaddr *i_ipaddr; | 
 | 103 |  | 
 | 104 | 	i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); | 
 | 105 | 	if (!i_ipaddr) | 
 | 106 | 		return -ENOMEM; | 
 | 107 |  | 
 | 108 | 	i_ipaddr->ipaddr = ipaddr; | 
 | 109 |  | 
 | 110 | 	spin_lock_irq(&rds_ibdev->spinlock); | 
 | 111 | 	list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list); | 
 | 112 | 	spin_unlock_irq(&rds_ibdev->spinlock); | 
 | 113 |  | 
 | 114 | 	return 0; | 
 | 115 | } | 
 | 116 |  | 
 | 117 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | 
 | 118 | { | 
 | 119 | 	struct rds_ib_ipaddr *i_ipaddr, *next; | 
 | 120 |  | 
 | 121 | 	spin_lock_irq(&rds_ibdev->spinlock); | 
 | 122 | 	list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) { | 
 | 123 | 		if (i_ipaddr->ipaddr == ipaddr) { | 
 | 124 | 			list_del(&i_ipaddr->list); | 
 | 125 | 			kfree(i_ipaddr); | 
 | 126 | 			break; | 
 | 127 | 		} | 
 | 128 | 	} | 
 | 129 | 	spin_unlock_irq(&rds_ibdev->spinlock); | 
 | 130 | } | 
 | 131 |  | 
 | 132 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) | 
 | 133 | { | 
 | 134 | 	struct rds_ib_device *rds_ibdev_old; | 
 | 135 |  | 
 | 136 | 	rds_ibdev_old = rds_ib_get_device(ipaddr); | 
 | 137 | 	if (rds_ibdev_old) | 
 | 138 | 		rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); | 
 | 139 |  | 
 | 140 | 	return rds_ib_add_ipaddr(rds_ibdev, ipaddr); | 
 | 141 | } | 
 | 142 |  | 
| Andy Grover | 745cbcc | 2009-04-01 08:20:19 +0000 | [diff] [blame] | 143 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 144 | { | 
 | 145 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 146 |  | 
 | 147 | 	/* conn was previously on the nodev_conns_list */ | 
 | 148 | 	spin_lock_irq(&ib_nodev_conns_lock); | 
 | 149 | 	BUG_ON(list_empty(&ib_nodev_conns)); | 
 | 150 | 	BUG_ON(list_empty(&ic->ib_node)); | 
 | 151 | 	list_del(&ic->ib_node); | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 152 |  | 
 | 153 | 	spin_lock_irq(&rds_ibdev->spinlock); | 
 | 154 | 	list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); | 
 | 155 | 	spin_unlock_irq(&rds_ibdev->spinlock); | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 156 | 	spin_unlock_irq(&ib_nodev_conns_lock); | 
 | 157 |  | 
| Andy Grover | 745cbcc | 2009-04-01 08:20:19 +0000 | [diff] [blame] | 158 | 	ic->rds_ibdev = rds_ibdev; | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 159 | } | 
 | 160 |  | 
| Andy Grover | 745cbcc | 2009-04-01 08:20:19 +0000 | [diff] [blame] | 161 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) | 
 | 162 | { | 
 | 163 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 164 |  | 
 | 165 | 	/* place conn on nodev_conns_list */ | 
 | 166 | 	spin_lock(&ib_nodev_conns_lock); | 
 | 167 |  | 
 | 168 | 	spin_lock_irq(&rds_ibdev->spinlock); | 
 | 169 | 	BUG_ON(list_empty(&ic->ib_node)); | 
 | 170 | 	list_del(&ic->ib_node); | 
 | 171 | 	spin_unlock_irq(&rds_ibdev->spinlock); | 
 | 172 |  | 
 | 173 | 	list_add_tail(&ic->ib_node, &ib_nodev_conns); | 
 | 174 |  | 
 | 175 | 	spin_unlock(&ib_nodev_conns_lock); | 
 | 176 |  | 
 | 177 | 	ic->rds_ibdev = NULL; | 
 | 178 | } | 
 | 179 |  | 
 | 180 | void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 181 | { | 
 | 182 | 	struct rds_ib_connection *ic, *_ic; | 
 | 183 | 	LIST_HEAD(tmp_list); | 
 | 184 |  | 
 | 185 | 	/* avoid calling conn_destroy with irqs off */ | 
| Andy Grover | 745cbcc | 2009-04-01 08:20:19 +0000 | [diff] [blame] | 186 | 	spin_lock_irq(list_lock); | 
 | 187 | 	list_splice(list, &tmp_list); | 
 | 188 | 	INIT_LIST_HEAD(list); | 
 | 189 | 	spin_unlock_irq(list_lock); | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 190 |  | 
| Andy Grover | 433d308 | 2009-10-30 08:51:55 +0000 | [diff] [blame] | 191 | 	list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 192 | 		rds_conn_destroy(ic->conn); | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 193 | } | 
 | 194 |  | 
 | 195 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) | 
 | 196 | { | 
 | 197 | 	struct rds_ib_mr_pool *pool; | 
 | 198 |  | 
 | 199 | 	pool = kzalloc(sizeof(*pool), GFP_KERNEL); | 
 | 200 | 	if (!pool) | 
 | 201 | 		return ERR_PTR(-ENOMEM); | 
 | 202 |  | 
 | 203 | 	INIT_LIST_HEAD(&pool->free_list); | 
 | 204 | 	INIT_LIST_HEAD(&pool->drop_list); | 
 | 205 | 	INIT_LIST_HEAD(&pool->clean_list); | 
 | 206 | 	mutex_init(&pool->flush_lock); | 
 | 207 | 	spin_lock_init(&pool->list_lock); | 
 | 208 | 	INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); | 
 | 209 |  | 
 | 210 | 	pool->fmr_attr.max_pages = fmr_message_size; | 
 | 211 | 	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; | 
| Andy Grover | a870d62 | 2009-07-17 13:13:33 +0000 | [diff] [blame] | 212 | 	pool->fmr_attr.page_shift = PAGE_SHIFT; | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 213 | 	pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; | 
 | 214 |  | 
 | 215 | 	/* We never allow more than max_items MRs to be allocated. | 
 | 216 | 	 * When we exceed more than max_items_soft, we start freeing | 
 | 217 | 	 * items more aggressively. | 
 | 218 | 	 * Make sure that max_items > max_items_soft > max_items / 2 | 
 | 219 | 	 */ | 
 | 220 | 	pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4; | 
 | 221 | 	pool->max_items = rds_ibdev->max_fmrs; | 
 | 222 |  | 
 | 223 | 	return pool; | 
 | 224 | } | 
 | 225 |  | 
 | 226 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) | 
 | 227 | { | 
 | 228 | 	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | 
 | 229 |  | 
 | 230 | 	iinfo->rdma_mr_max = pool->max_items; | 
 | 231 | 	iinfo->rdma_mr_size = pool->fmr_attr.max_pages; | 
 | 232 | } | 
 | 233 |  | 
 | 234 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) | 
 | 235 | { | 
 | 236 | 	flush_workqueue(rds_wq); | 
 | 237 | 	rds_ib_flush_mr_pool(pool, 1); | 
| Andy Grover | 571c02f | 2010-03-11 13:50:01 +0000 | [diff] [blame] | 238 | 	WARN_ON(atomic_read(&pool->item_count)); | 
 | 239 | 	WARN_ON(atomic_read(&pool->free_pinned)); | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 240 | 	kfree(pool); | 
 | 241 | } | 
 | 242 |  | 
 | 243 | static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) | 
 | 244 | { | 
 | 245 | 	struct rds_ib_mr *ibmr = NULL; | 
 | 246 | 	unsigned long flags; | 
 | 247 |  | 
 | 248 | 	spin_lock_irqsave(&pool->list_lock, flags); | 
 | 249 | 	if (!list_empty(&pool->clean_list)) { | 
 | 250 | 		ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); | 
 | 251 | 		list_del_init(&ibmr->list); | 
 | 252 | 	} | 
 | 253 | 	spin_unlock_irqrestore(&pool->list_lock, flags); | 
 | 254 |  | 
 | 255 | 	return ibmr; | 
 | 256 | } | 
 | 257 |  | 
 | 258 | static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) | 
 | 259 | { | 
 | 260 | 	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | 
 | 261 | 	struct rds_ib_mr *ibmr = NULL; | 
 | 262 | 	int err = 0, iter = 0; | 
 | 263 |  | 
 | 264 | 	while (1) { | 
 | 265 | 		ibmr = rds_ib_reuse_fmr(pool); | 
 | 266 | 		if (ibmr) | 
 | 267 | 			return ibmr; | 
 | 268 |  | 
 | 269 | 		/* No clean MRs - now we have the choice of either | 
 | 270 | 		 * allocating a fresh MR up to the limit imposed by the | 
 | 271 | 		 * driver, or flush any dirty unused MRs. | 
 | 272 | 		 * We try to avoid stalling in the send path if possible, | 
 | 273 | 		 * so we allocate as long as we're allowed to. | 
 | 274 | 		 * | 
 | 275 | 		 * We're fussy with enforcing the FMR limit, though. If the driver | 
 | 276 | 		 * tells us we can't use more than N fmrs, we shouldn't start | 
 | 277 | 		 * arguing with it */ | 
 | 278 | 		if (atomic_inc_return(&pool->item_count) <= pool->max_items) | 
 | 279 | 			break; | 
 | 280 |  | 
 | 281 | 		atomic_dec(&pool->item_count); | 
 | 282 |  | 
 | 283 | 		if (++iter > 2) { | 
 | 284 | 			rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted); | 
 | 285 | 			return ERR_PTR(-EAGAIN); | 
 | 286 | 		} | 
 | 287 |  | 
 | 288 | 		/* We do have some empty MRs. Flush them out. */ | 
 | 289 | 		rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); | 
 | 290 | 		rds_ib_flush_mr_pool(pool, 0); | 
 | 291 | 	} | 
 | 292 |  | 
 | 293 | 	ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); | 
 | 294 | 	if (!ibmr) { | 
 | 295 | 		err = -ENOMEM; | 
 | 296 | 		goto out_no_cigar; | 
 | 297 | 	} | 
 | 298 |  | 
 | 299 | 	ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, | 
 | 300 | 			(IB_ACCESS_LOCAL_WRITE | | 
 | 301 | 			 IB_ACCESS_REMOTE_READ | | 
 | 302 | 			 IB_ACCESS_REMOTE_WRITE), | 
 | 303 | 			&pool->fmr_attr); | 
 | 304 | 	if (IS_ERR(ibmr->fmr)) { | 
 | 305 | 		err = PTR_ERR(ibmr->fmr); | 
 | 306 | 		ibmr->fmr = NULL; | 
 | 307 | 		printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err); | 
 | 308 | 		goto out_no_cigar; | 
 | 309 | 	} | 
 | 310 |  | 
 | 311 | 	rds_ib_stats_inc(s_ib_rdma_mr_alloc); | 
 | 312 | 	return ibmr; | 
 | 313 |  | 
 | 314 | out_no_cigar: | 
 | 315 | 	if (ibmr) { | 
 | 316 | 		if (ibmr->fmr) | 
 | 317 | 			ib_dealloc_fmr(ibmr->fmr); | 
 | 318 | 		kfree(ibmr); | 
 | 319 | 	} | 
 | 320 | 	atomic_dec(&pool->item_count); | 
 | 321 | 	return ERR_PTR(err); | 
 | 322 | } | 
 | 323 |  | 
 | 324 | static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, | 
 | 325 | 	       struct scatterlist *sg, unsigned int nents) | 
 | 326 | { | 
 | 327 | 	struct ib_device *dev = rds_ibdev->dev; | 
 | 328 | 	struct scatterlist *scat = sg; | 
 | 329 | 	u64 io_addr = 0; | 
 | 330 | 	u64 *dma_pages; | 
 | 331 | 	u32 len; | 
 | 332 | 	int page_cnt, sg_dma_len; | 
 | 333 | 	int i, j; | 
 | 334 | 	int ret; | 
 | 335 |  | 
 | 336 | 	sg_dma_len = ib_dma_map_sg(dev, sg, nents, | 
 | 337 | 				 DMA_BIDIRECTIONAL); | 
 | 338 | 	if (unlikely(!sg_dma_len)) { | 
 | 339 | 		printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n"); | 
 | 340 | 		return -EBUSY; | 
 | 341 | 	} | 
 | 342 |  | 
 | 343 | 	len = 0; | 
 | 344 | 	page_cnt = 0; | 
 | 345 |  | 
 | 346 | 	for (i = 0; i < sg_dma_len; ++i) { | 
 | 347 | 		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | 
 | 348 | 		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | 
 | 349 |  | 
| Andy Grover | a870d62 | 2009-07-17 13:13:33 +0000 | [diff] [blame] | 350 | 		if (dma_addr & ~PAGE_MASK) { | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 351 | 			if (i > 0) | 
 | 352 | 				return -EINVAL; | 
 | 353 | 			else | 
 | 354 | 				++page_cnt; | 
 | 355 | 		} | 
| Andy Grover | a870d62 | 2009-07-17 13:13:33 +0000 | [diff] [blame] | 356 | 		if ((dma_addr + dma_len) & ~PAGE_MASK) { | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 357 | 			if (i < sg_dma_len - 1) | 
 | 358 | 				return -EINVAL; | 
 | 359 | 			else | 
 | 360 | 				++page_cnt; | 
 | 361 | 		} | 
 | 362 |  | 
 | 363 | 		len += dma_len; | 
 | 364 | 	} | 
 | 365 |  | 
| Andy Grover | a870d62 | 2009-07-17 13:13:33 +0000 | [diff] [blame] | 366 | 	page_cnt += len >> PAGE_SHIFT; | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 367 | 	if (page_cnt > fmr_message_size) | 
 | 368 | 		return -EINVAL; | 
 | 369 |  | 
 | 370 | 	dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC); | 
 | 371 | 	if (!dma_pages) | 
 | 372 | 		return -ENOMEM; | 
 | 373 |  | 
 | 374 | 	page_cnt = 0; | 
 | 375 | 	for (i = 0; i < sg_dma_len; ++i) { | 
 | 376 | 		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); | 
 | 377 | 		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); | 
 | 378 |  | 
| Andy Grover | a870d62 | 2009-07-17 13:13:33 +0000 | [diff] [blame] | 379 | 		for (j = 0; j < dma_len; j += PAGE_SIZE) | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 380 | 			dma_pages[page_cnt++] = | 
| Andy Grover | a870d62 | 2009-07-17 13:13:33 +0000 | [diff] [blame] | 381 | 				(dma_addr & PAGE_MASK) + j; | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 382 | 	} | 
 | 383 |  | 
 | 384 | 	ret = ib_map_phys_fmr(ibmr->fmr, | 
 | 385 | 				   dma_pages, page_cnt, io_addr); | 
 | 386 | 	if (ret) | 
 | 387 | 		goto out; | 
 | 388 |  | 
 | 389 | 	/* Success - we successfully remapped the MR, so we can | 
 | 390 | 	 * safely tear down the old mapping. */ | 
 | 391 | 	rds_ib_teardown_mr(ibmr); | 
 | 392 |  | 
 | 393 | 	ibmr->sg = scat; | 
 | 394 | 	ibmr->sg_len = nents; | 
 | 395 | 	ibmr->sg_dma_len = sg_dma_len; | 
 | 396 | 	ibmr->remap_count++; | 
 | 397 |  | 
 | 398 | 	rds_ib_stats_inc(s_ib_rdma_mr_used); | 
 | 399 | 	ret = 0; | 
 | 400 |  | 
 | 401 | out: | 
 | 402 | 	kfree(dma_pages); | 
 | 403 |  | 
 | 404 | 	return ret; | 
 | 405 | } | 
 | 406 |  | 
 | 407 | void rds_ib_sync_mr(void *trans_private, int direction) | 
 | 408 | { | 
 | 409 | 	struct rds_ib_mr *ibmr = trans_private; | 
 | 410 | 	struct rds_ib_device *rds_ibdev = ibmr->device; | 
 | 411 |  | 
 | 412 | 	switch (direction) { | 
 | 413 | 	case DMA_FROM_DEVICE: | 
 | 414 | 		ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, | 
 | 415 | 			ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | 
 | 416 | 		break; | 
 | 417 | 	case DMA_TO_DEVICE: | 
 | 418 | 		ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, | 
 | 419 | 			ibmr->sg_dma_len, DMA_BIDIRECTIONAL); | 
 | 420 | 		break; | 
 | 421 | 	} | 
 | 422 | } | 
 | 423 |  | 
 | 424 | static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | 
 | 425 | { | 
 | 426 | 	struct rds_ib_device *rds_ibdev = ibmr->device; | 
 | 427 |  | 
 | 428 | 	if (ibmr->sg_dma_len) { | 
 | 429 | 		ib_dma_unmap_sg(rds_ibdev->dev, | 
 | 430 | 				ibmr->sg, ibmr->sg_len, | 
 | 431 | 				DMA_BIDIRECTIONAL); | 
 | 432 | 		ibmr->sg_dma_len = 0; | 
 | 433 | 	} | 
 | 434 |  | 
 | 435 | 	/* Release the s/g list */ | 
 | 436 | 	if (ibmr->sg_len) { | 
 | 437 | 		unsigned int i; | 
 | 438 |  | 
 | 439 | 		for (i = 0; i < ibmr->sg_len; ++i) { | 
 | 440 | 			struct page *page = sg_page(&ibmr->sg[i]); | 
 | 441 |  | 
 | 442 | 			/* FIXME we need a way to tell a r/w MR | 
 | 443 | 			 * from a r/o MR */ | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 444 | 			BUG_ON(in_interrupt()); | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 445 | 			set_page_dirty(page); | 
 | 446 | 			put_page(page); | 
 | 447 | 		} | 
 | 448 | 		kfree(ibmr->sg); | 
 | 449 |  | 
 | 450 | 		ibmr->sg = NULL; | 
 | 451 | 		ibmr->sg_len = 0; | 
 | 452 | 	} | 
 | 453 | } | 
 | 454 |  | 
 | 455 | static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) | 
 | 456 | { | 
 | 457 | 	unsigned int pinned = ibmr->sg_len; | 
 | 458 |  | 
 | 459 | 	__rds_ib_teardown_mr(ibmr); | 
 | 460 | 	if (pinned) { | 
 | 461 | 		struct rds_ib_device *rds_ibdev = ibmr->device; | 
 | 462 | 		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | 
 | 463 |  | 
 | 464 | 		atomic_sub(pinned, &pool->free_pinned); | 
 | 465 | 	} | 
 | 466 | } | 
 | 467 |  | 
 | 468 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) | 
 | 469 | { | 
 | 470 | 	unsigned int item_count; | 
 | 471 |  | 
 | 472 | 	item_count = atomic_read(&pool->item_count); | 
 | 473 | 	if (free_all) | 
 | 474 | 		return item_count; | 
 | 475 |  | 
 | 476 | 	return 0; | 
 | 477 | } | 
 | 478 |  | 
 | 479 | /* | 
 | 480 |  * Flush our pool of MRs. | 
 | 481 |  * At a minimum, all currently unused MRs are unmapped. | 
 | 482 |  * If the number of MRs allocated exceeds the limit, we also try | 
 | 483 |  * to free as many MRs as needed to get back to this limit. | 
 | 484 |  */ | 
 | 485 | static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) | 
 | 486 | { | 
 | 487 | 	struct rds_ib_mr *ibmr, *next; | 
 | 488 | 	LIST_HEAD(unmap_list); | 
 | 489 | 	LIST_HEAD(fmr_list); | 
 | 490 | 	unsigned long unpinned = 0; | 
 | 491 | 	unsigned long flags; | 
 | 492 | 	unsigned int nfreed = 0, ncleaned = 0, free_goal; | 
 | 493 | 	int ret = 0; | 
 | 494 |  | 
 | 495 | 	rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); | 
 | 496 |  | 
 | 497 | 	mutex_lock(&pool->flush_lock); | 
 | 498 |  | 
 | 499 | 	spin_lock_irqsave(&pool->list_lock, flags); | 
 | 500 | 	/* Get the list of all MRs to be dropped. Ordering matters - | 
 | 501 | 	 * we want to put drop_list ahead of free_list. */ | 
 | 502 | 	list_splice_init(&pool->free_list, &unmap_list); | 
 | 503 | 	list_splice_init(&pool->drop_list, &unmap_list); | 
 | 504 | 	if (free_all) | 
 | 505 | 		list_splice_init(&pool->clean_list, &unmap_list); | 
 | 506 | 	spin_unlock_irqrestore(&pool->list_lock, flags); | 
 | 507 |  | 
 | 508 | 	free_goal = rds_ib_flush_goal(pool, free_all); | 
 | 509 |  | 
 | 510 | 	if (list_empty(&unmap_list)) | 
 | 511 | 		goto out; | 
 | 512 |  | 
 | 513 | 	/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ | 
 | 514 | 	list_for_each_entry(ibmr, &unmap_list, list) | 
 | 515 | 		list_add(&ibmr->fmr->list, &fmr_list); | 
 | 516 | 	ret = ib_unmap_fmr(&fmr_list); | 
 | 517 | 	if (ret) | 
 | 518 | 		printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); | 
 | 519 |  | 
 | 520 | 	/* Now we can destroy the DMA mapping and unpin any pages */ | 
 | 521 | 	list_for_each_entry_safe(ibmr, next, &unmap_list, list) { | 
 | 522 | 		unpinned += ibmr->sg_len; | 
 | 523 | 		__rds_ib_teardown_mr(ibmr); | 
 | 524 | 		if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { | 
 | 525 | 			rds_ib_stats_inc(s_ib_rdma_mr_free); | 
 | 526 | 			list_del(&ibmr->list); | 
 | 527 | 			ib_dealloc_fmr(ibmr->fmr); | 
 | 528 | 			kfree(ibmr); | 
 | 529 | 			nfreed++; | 
 | 530 | 		} | 
 | 531 | 		ncleaned++; | 
 | 532 | 	} | 
 | 533 |  | 
 | 534 | 	spin_lock_irqsave(&pool->list_lock, flags); | 
 | 535 | 	list_splice(&unmap_list, &pool->clean_list); | 
 | 536 | 	spin_unlock_irqrestore(&pool->list_lock, flags); | 
 | 537 |  | 
 | 538 | 	atomic_sub(unpinned, &pool->free_pinned); | 
 | 539 | 	atomic_sub(ncleaned, &pool->dirty_count); | 
 | 540 | 	atomic_sub(nfreed, &pool->item_count); | 
 | 541 |  | 
 | 542 | out: | 
 | 543 | 	mutex_unlock(&pool->flush_lock); | 
 | 544 | 	return ret; | 
 | 545 | } | 
 | 546 |  | 
 | 547 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | 
 | 548 | { | 
 | 549 | 	struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); | 
 | 550 |  | 
 | 551 | 	rds_ib_flush_mr_pool(pool, 0); | 
 | 552 | } | 
 | 553 |  | 
 | 554 | void rds_ib_free_mr(void *trans_private, int invalidate) | 
 | 555 | { | 
 | 556 | 	struct rds_ib_mr *ibmr = trans_private; | 
 | 557 | 	struct rds_ib_device *rds_ibdev = ibmr->device; | 
 | 558 | 	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | 
 | 559 | 	unsigned long flags; | 
 | 560 |  | 
 | 561 | 	rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); | 
 | 562 |  | 
 | 563 | 	/* Return it to the pool's free list */ | 
 | 564 | 	spin_lock_irqsave(&pool->list_lock, flags); | 
 | 565 | 	if (ibmr->remap_count >= pool->fmr_attr.max_maps) | 
 | 566 | 		list_add(&ibmr->list, &pool->drop_list); | 
 | 567 | 	else | 
 | 568 | 		list_add(&ibmr->list, &pool->free_list); | 
 | 569 |  | 
 | 570 | 	atomic_add(ibmr->sg_len, &pool->free_pinned); | 
 | 571 | 	atomic_inc(&pool->dirty_count); | 
 | 572 | 	spin_unlock_irqrestore(&pool->list_lock, flags); | 
 | 573 |  | 
 | 574 | 	/* If we've pinned too many pages, request a flush */ | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 575 | 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || | 
 | 576 | 	    atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 
| Andy Grover | 08b48a1 | 2009-02-24 15:30:32 +0000 | [diff] [blame] | 577 | 		queue_work(rds_wq, &pool->flush_worker); | 
 | 578 |  | 
 | 579 | 	if (invalidate) { | 
 | 580 | 		if (likely(!in_interrupt())) { | 
 | 581 | 			rds_ib_flush_mr_pool(pool, 0); | 
 | 582 | 		} else { | 
 | 583 | 			/* We get here if the user created a MR marked | 
 | 584 | 			 * as use_once and invalidate at the same time. */ | 
 | 585 | 			queue_work(rds_wq, &pool->flush_worker); | 
 | 586 | 		} | 
 | 587 | 	} | 
 | 588 | } | 
 | 589 |  | 
 | 590 | void rds_ib_flush_mrs(void) | 
 | 591 | { | 
 | 592 | 	struct rds_ib_device *rds_ibdev; | 
 | 593 |  | 
 | 594 | 	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { | 
 | 595 | 		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; | 
 | 596 |  | 
 | 597 | 		if (pool) | 
 | 598 | 			rds_ib_flush_mr_pool(pool, 0); | 
 | 599 | 	} | 
 | 600 | } | 
 | 601 |  | 
 | 602 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | 
 | 603 | 		    struct rds_sock *rs, u32 *key_ret) | 
 | 604 | { | 
 | 605 | 	struct rds_ib_device *rds_ibdev; | 
 | 606 | 	struct rds_ib_mr *ibmr = NULL; | 
 | 607 | 	int ret; | 
 | 608 |  | 
 | 609 | 	rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); | 
 | 610 | 	if (!rds_ibdev) { | 
 | 611 | 		ret = -ENODEV; | 
 | 612 | 		goto out; | 
 | 613 | 	} | 
 | 614 |  | 
 | 615 | 	if (!rds_ibdev->mr_pool) { | 
 | 616 | 		ret = -ENODEV; | 
 | 617 | 		goto out; | 
 | 618 | 	} | 
 | 619 |  | 
 | 620 | 	ibmr = rds_ib_alloc_fmr(rds_ibdev); | 
 | 621 | 	if (IS_ERR(ibmr)) | 
 | 622 | 		return ibmr; | 
 | 623 |  | 
 | 624 | 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); | 
 | 625 | 	if (ret == 0) | 
 | 626 | 		*key_ret = ibmr->fmr->rkey; | 
 | 627 | 	else | 
 | 628 | 		printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); | 
 | 629 |  | 
 | 630 | 	ibmr->device = rds_ibdev; | 
 | 631 |  | 
 | 632 |  out: | 
 | 633 | 	if (ret) { | 
 | 634 | 		if (ibmr) | 
 | 635 | 			rds_ib_free_mr(ibmr, 0); | 
 | 636 | 		ibmr = ERR_PTR(ret); | 
 | 637 | 	} | 
 | 638 | 	return ibmr; | 
 | 639 | } |