| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Functions related to io context handling | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/module.h> | 
 | 6 | #include <linux/init.h> | 
 | 7 | #include <linux/bio.h> | 
 | 8 | #include <linux/blkdev.h> | 
 | 9 | #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */ | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 11 |  | 
 | 12 | #include "blk.h" | 
 | 13 |  | 
 | 14 | /* | 
 | 15 |  * For io context allocations | 
 | 16 |  */ | 
 | 17 | static struct kmem_cache *iocontext_cachep; | 
 | 18 |  | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 19 | /** | 
 | 20 |  * get_io_context - increment reference count to io_context | 
 | 21 |  * @ioc: io_context to get | 
 | 22 |  * | 
 | 23 |  * Increment reference count to @ioc. | 
 | 24 |  */ | 
 | 25 | void get_io_context(struct io_context *ioc) | 
 | 26 | { | 
 | 27 | 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | 
 | 28 | 	atomic_long_inc(&ioc->refcount); | 
 | 29 | } | 
 | 30 | EXPORT_SYMBOL(get_io_context); | 
 | 31 |  | 
| Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 32 | static void icq_free_icq_rcu(struct rcu_head *head) | 
 | 33 | { | 
 | 34 | 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); | 
 | 35 |  | 
 | 36 | 	kmem_cache_free(icq->__rcu_icq_cache, icq); | 
 | 37 | } | 
 | 38 |  | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 39 | /* Exit an icq. Called with both ioc and q locked. */ | 
| Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 40 | static void ioc_exit_icq(struct io_cq *icq) | 
 | 41 | { | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 42 | 	struct elevator_type *et = icq->q->elevator->type; | 
 | 43 |  | 
 | 44 | 	if (icq->flags & ICQ_EXITED) | 
 | 45 | 		return; | 
 | 46 |  | 
 | 47 | 	if (et->ops.elevator_exit_icq_fn) | 
 | 48 | 		et->ops.elevator_exit_icq_fn(icq); | 
 | 49 |  | 
 | 50 | 	icq->flags |= ICQ_EXITED; | 
 | 51 | } | 
 | 52 |  | 
 | 53 | /* Release an icq.  Called with both ioc and q locked. */ | 
 | 54 | static void ioc_destroy_icq(struct io_cq *icq) | 
 | 55 | { | 
| Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 56 | 	struct io_context *ioc = icq->ioc; | 
 | 57 | 	struct request_queue *q = icq->q; | 
 | 58 | 	struct elevator_type *et = q->elevator->type; | 
 | 59 |  | 
 | 60 | 	lockdep_assert_held(&ioc->lock); | 
 | 61 | 	lockdep_assert_held(q->queue_lock); | 
 | 62 |  | 
 | 63 | 	radix_tree_delete(&ioc->icq_tree, icq->q->id); | 
 | 64 | 	hlist_del_init(&icq->ioc_node); | 
 | 65 | 	list_del_init(&icq->q_node); | 
 | 66 |  | 
 | 67 | 	/* | 
 | 68 | 	 * Both setting lookup hint to and clearing it from @icq are done | 
 | 69 | 	 * under queue_lock.  If it's not pointing to @icq now, it never | 
 | 70 | 	 * will.  Hint assignment itself can race safely. | 
 | 71 | 	 */ | 
 | 72 | 	if (rcu_dereference_raw(ioc->icq_hint) == icq) | 
 | 73 | 		rcu_assign_pointer(ioc->icq_hint, NULL); | 
 | 74 |  | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 75 | 	ioc_exit_icq(icq); | 
| Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 76 |  | 
 | 77 | 	/* | 
 | 78 | 	 * @icq->q might have gone away by the time RCU callback runs | 
 | 79 | 	 * making it impossible to determine icq_cache.  Record it in @icq. | 
 | 80 | 	 */ | 
 | 81 | 	icq->__rcu_icq_cache = et->icq_cache; | 
 | 82 | 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu); | 
 | 83 | } | 
 | 84 |  | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 85 | /* | 
 | 86 |  * Slow path for ioc release in put_io_context().  Performs double-lock | 
| Tejun Heo | c586980 | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 87 |  * dancing to unlink all icq's and then frees ioc. | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 88 |  */ | 
 | 89 | static void ioc_release_fn(struct work_struct *work) | 
 | 90 | { | 
 | 91 | 	struct io_context *ioc = container_of(work, struct io_context, | 
 | 92 | 					      release_work); | 
| Tejun Heo | d8c66c5 | 2012-02-11 12:37:25 +0100 | [diff] [blame] | 93 | 	unsigned long flags; | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 94 |  | 
| Tejun Heo | d8c66c5 | 2012-02-11 12:37:25 +0100 | [diff] [blame] | 95 | 	/* | 
 | 96 | 	 * Exiting icq may call into put_io_context() through elevator | 
 | 97 | 	 * which will trigger lockdep warning.  The ioc's are guaranteed to | 
 | 98 | 	 * be different, use a different locking subclass here.  Use | 
 | 99 | 	 * irqsave variant as there's no spin_lock_irq_nested(). | 
 | 100 | 	 */ | 
 | 101 | 	spin_lock_irqsave_nested(&ioc->lock, flags, 1); | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 102 |  | 
| Tejun Heo | c586980 | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 103 | 	while (!hlist_empty(&ioc->icq_list)) { | 
 | 104 | 		struct io_cq *icq = hlist_entry(ioc->icq_list.first, | 
 | 105 | 						struct io_cq, ioc_node); | 
| Tejun Heo | 2274b02 | 2012-02-15 09:45:52 +0100 | [diff] [blame] | 106 | 		struct request_queue *q = icq->q; | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 107 |  | 
| Tejun Heo | 2274b02 | 2012-02-15 09:45:52 +0100 | [diff] [blame] | 108 | 		if (spin_trylock(q->queue_lock)) { | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 109 | 			ioc_destroy_icq(icq); | 
| Tejun Heo | 2274b02 | 2012-02-15 09:45:52 +0100 | [diff] [blame] | 110 | 			spin_unlock(q->queue_lock); | 
 | 111 | 		} else { | 
 | 112 | 			spin_unlock_irqrestore(&ioc->lock, flags); | 
 | 113 | 			cpu_relax(); | 
 | 114 | 			spin_lock_irqsave_nested(&ioc->lock, flags, 1); | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 115 | 		} | 
| Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 116 | 	} | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 117 |  | 
| Tejun Heo | 2274b02 | 2012-02-15 09:45:52 +0100 | [diff] [blame] | 118 | 	spin_unlock_irqrestore(&ioc->lock, flags); | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 119 |  | 
 | 120 | 	kmem_cache_free(iocontext_cachep, ioc); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 121 | } | 
 | 122 |  | 
| Tejun Heo | 42ec57a | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 123 | /** | 
 | 124 |  * put_io_context - put a reference of io_context | 
 | 125 |  * @ioc: io_context to put | 
 | 126 |  * | 
 | 127 |  * Decrement reference count of @ioc and release it if the count reaches | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 128 |  * zero. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 129 |  */ | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 130 | void put_io_context(struct io_context *ioc) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 131 | { | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 132 | 	unsigned long flags; | 
| Xiaotian Feng | ff8c147 | 2012-03-14 15:34:48 +0100 | [diff] [blame] | 133 | 	bool free_ioc = false; | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 134 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 135 | 	if (ioc == NULL) | 
| Tejun Heo | 42ec57a | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 136 | 		return; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 137 |  | 
| Tejun Heo | 42ec57a | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 138 | 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 139 |  | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 140 | 	/* | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 141 | 	 * Releasing ioc requires reverse order double locking and we may | 
 | 142 | 	 * already be holding a queue_lock.  Do it asynchronously from wq. | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 143 | 	 */ | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 144 | 	if (atomic_long_dec_and_test(&ioc->refcount)) { | 
 | 145 | 		spin_lock_irqsave(&ioc->lock, flags); | 
 | 146 | 		if (!hlist_empty(&ioc->icq_list)) | 
 | 147 | 			schedule_work(&ioc->release_work); | 
| Xiaotian Feng | ff8c147 | 2012-03-14 15:34:48 +0100 | [diff] [blame] | 148 | 		else | 
 | 149 | 			free_ioc = true; | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 150 | 		spin_unlock_irqrestore(&ioc->lock, flags); | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 151 | 	} | 
| Xiaotian Feng | ff8c147 | 2012-03-14 15:34:48 +0100 | [diff] [blame] | 152 |  | 
 | 153 | 	if (free_ioc) | 
 | 154 | 		kmem_cache_free(iocontext_cachep, ioc); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 155 | } | 
 | 156 | EXPORT_SYMBOL(put_io_context); | 
 | 157 |  | 
| Tejun Heo | f6e8d01 | 2012-03-05 13:15:26 -0800 | [diff] [blame] | 158 | /** | 
 | 159 |  * put_io_context_active - put active reference on ioc | 
 | 160 |  * @ioc: ioc of interest | 
 | 161 |  * | 
 | 162 |  * Undo get_io_context_active().  If active reference reaches zero after | 
 | 163 |  * put, @ioc can never issue further IOs and ioscheds are notified. | 
 | 164 |  */ | 
 | 165 | void put_io_context_active(struct io_context *ioc) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 166 | { | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 167 | 	struct hlist_node *n; | 
 | 168 | 	unsigned long flags; | 
| Tejun Heo | f6e8d01 | 2012-03-05 13:15:26 -0800 | [diff] [blame] | 169 | 	struct io_cq *icq; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 170 |  | 
| Tejun Heo | f6e8d01 | 2012-03-05 13:15:26 -0800 | [diff] [blame] | 171 | 	if (!atomic_dec_and_test(&ioc->active_ref)) { | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 172 | 		put_io_context(ioc); | 
 | 173 | 		return; | 
 | 174 | 	} | 
 | 175 |  | 
 | 176 | 	/* | 
 | 177 | 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform | 
 | 178 | 	 * reverse double locking.  Read comment in ioc_release_fn() for | 
 | 179 | 	 * explanation on the nested locking annotation. | 
 | 180 | 	 */ | 
 | 181 | retry: | 
 | 182 | 	spin_lock_irqsave_nested(&ioc->lock, flags, 1); | 
 | 183 | 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { | 
 | 184 | 		if (icq->flags & ICQ_EXITED) | 
 | 185 | 			continue; | 
 | 186 | 		if (spin_trylock(icq->q->queue_lock)) { | 
 | 187 | 			ioc_exit_icq(icq); | 
 | 188 | 			spin_unlock(icq->q->queue_lock); | 
 | 189 | 		} else { | 
 | 190 | 			spin_unlock_irqrestore(&ioc->lock, flags); | 
 | 191 | 			cpu_relax(); | 
 | 192 | 			goto retry; | 
 | 193 | 		} | 
 | 194 | 	} | 
 | 195 | 	spin_unlock_irqrestore(&ioc->lock, flags); | 
 | 196 |  | 
| Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 197 | 	put_io_context(ioc); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 198 | } | 
 | 199 |  | 
| Tejun Heo | f6e8d01 | 2012-03-05 13:15:26 -0800 | [diff] [blame] | 200 | /* Called by the exiting task */ | 
 | 201 | void exit_io_context(struct task_struct *task) | 
 | 202 | { | 
 | 203 | 	struct io_context *ioc; | 
 | 204 |  | 
 | 205 | 	task_lock(task); | 
 | 206 | 	ioc = task->io_context; | 
 | 207 | 	task->io_context = NULL; | 
 | 208 | 	task_unlock(task); | 
 | 209 |  | 
 | 210 | 	atomic_dec(&ioc->nr_tasks); | 
 | 211 | 	put_io_context_active(ioc); | 
 | 212 | } | 
 | 213 |  | 
| Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 214 | /** | 
 | 215 |  * ioc_clear_queue - break any ioc association with the specified queue | 
 | 216 |  * @q: request_queue being cleared | 
 | 217 |  * | 
 | 218 |  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked. | 
 | 219 |  */ | 
 | 220 | void ioc_clear_queue(struct request_queue *q) | 
 | 221 | { | 
 | 222 | 	lockdep_assert_held(q->queue_lock); | 
 | 223 |  | 
 | 224 | 	while (!list_empty(&q->icq_list)) { | 
 | 225 | 		struct io_cq *icq = list_entry(q->icq_list.next, | 
 | 226 | 					       struct io_cq, q_node); | 
 | 227 | 		struct io_context *ioc = icq->ioc; | 
 | 228 |  | 
 | 229 | 		spin_lock(&ioc->lock); | 
| Tejun Heo | 621032a | 2012-02-15 09:45:53 +0100 | [diff] [blame] | 230 | 		ioc_destroy_icq(icq); | 
| Tejun Heo | 7e5a879 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 231 | 		spin_unlock(&ioc->lock); | 
 | 232 | 	} | 
 | 233 | } | 
 | 234 |  | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 235 | int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 236 | { | 
| Paul Bolle | df41565 | 2011-06-06 05:11:34 +0200 | [diff] [blame] | 237 | 	struct io_context *ioc; | 
| Eric Dumazet | 3c9c708 | 2012-05-31 13:39:05 +0200 | [diff] [blame] | 238 | 	int ret; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 239 |  | 
| Tejun Heo | 42ec57a | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 240 | 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, | 
 | 241 | 				    node); | 
 | 242 | 	if (unlikely(!ioc)) | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 243 | 		return -ENOMEM; | 
| Tejun Heo | 42ec57a | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 244 |  | 
 | 245 | 	/* initialize */ | 
 | 246 | 	atomic_long_set(&ioc->refcount, 1); | 
| Olof Johansson | 4638a83 | 2012-08-01 12:17:27 +0200 | [diff] [blame] | 247 | 	atomic_set(&ioc->nr_tasks, 1); | 
| Tejun Heo | f6e8d01 | 2012-03-05 13:15:26 -0800 | [diff] [blame] | 248 | 	atomic_set(&ioc->active_ref, 1); | 
| Tejun Heo | 42ec57a | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 249 | 	spin_lock_init(&ioc->lock); | 
| Tejun Heo | c586980 | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 250 | 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); | 
 | 251 | 	INIT_HLIST_HEAD(&ioc->icq_list); | 
| Tejun Heo | b2efa05 | 2011-12-14 00:33:39 +0100 | [diff] [blame] | 252 | 	INIT_WORK(&ioc->release_work, ioc_release_fn); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 253 |  | 
| Tejun Heo | fd63836 | 2011-12-25 14:29:14 +0100 | [diff] [blame] | 254 | 	/* | 
 | 255 | 	 * Try to install.  ioc shouldn't be installed if someone else | 
 | 256 | 	 * already did or @task, which isn't %current, is exiting.  Note | 
 | 257 | 	 * that we need to allow ioc creation on exiting %current as exit | 
 | 258 | 	 * path may issue IOs from e.g. exit_files().  The exit path is | 
 | 259 | 	 * responsible for not issuing IO after exit_io_context(). | 
 | 260 | 	 */ | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 261 | 	task_lock(task); | 
| Tejun Heo | fd63836 | 2011-12-25 14:29:14 +0100 | [diff] [blame] | 262 | 	if (!task->io_context && | 
 | 263 | 	    (task == current || !(task->flags & PF_EXITING))) | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 264 | 		task->io_context = ioc; | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 265 | 	else | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 266 | 		kmem_cache_free(iocontext_cachep, ioc); | 
| Eric Dumazet | 3c9c708 | 2012-05-31 13:39:05 +0200 | [diff] [blame] | 267 |  | 
 | 268 | 	ret = task->io_context ? 0 : -EBUSY; | 
 | 269 |  | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 270 | 	task_unlock(task); | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 271 |  | 
| Eric Dumazet | 3c9c708 | 2012-05-31 13:39:05 +0200 | [diff] [blame] | 272 | 	return ret; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 273 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 274 |  | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 275 | /** | 
 | 276 |  * get_task_io_context - get io_context of a task | 
 | 277 |  * @task: task of interest | 
 | 278 |  * @gfp_flags: allocation flags, used if allocation is necessary | 
 | 279 |  * @node: allocation node, used if allocation is necessary | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 280 |  * | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 281 |  * Return io_context of @task.  If it doesn't exist, it is created with | 
 | 282 |  * @gfp_flags and @node.  The returned io_context has its reference count | 
 | 283 |  * incremented. | 
 | 284 |  * | 
 | 285 |  * This function always goes through task_lock() and it's better to use | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 286 |  * %current->io_context + get_io_context() for %current. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 287 |  */ | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 288 | struct io_context *get_task_io_context(struct task_struct *task, | 
 | 289 | 				       gfp_t gfp_flags, int node) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 290 | { | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 291 | 	struct io_context *ioc; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 292 |  | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 293 | 	might_sleep_if(gfp_flags & __GFP_WAIT); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 294 |  | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 295 | 	do { | 
 | 296 | 		task_lock(task); | 
 | 297 | 		ioc = task->io_context; | 
 | 298 | 		if (likely(ioc)) { | 
 | 299 | 			get_io_context(ioc); | 
 | 300 | 			task_unlock(task); | 
 | 301 | 			return ioc; | 
 | 302 | 		} | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 303 | 		task_unlock(task); | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 304 | 	} while (!create_task_io_context(task, gfp_flags, node)); | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 305 |  | 
| Tejun Heo | f2dbd76 | 2011-12-14 00:33:40 +0100 | [diff] [blame] | 306 | 	return NULL; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 307 | } | 
| Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 308 | EXPORT_SYMBOL(get_task_io_context); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 309 |  | 
| Tejun Heo | 47fdd4c | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 310 | /** | 
 | 311 |  * ioc_lookup_icq - lookup io_cq from ioc | 
 | 312 |  * @ioc: the associated io_context | 
 | 313 |  * @q: the associated request_queue | 
 | 314 |  * | 
 | 315 |  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called | 
 | 316 |  * with @q->queue_lock held. | 
 | 317 |  */ | 
 | 318 | struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) | 
 | 319 | { | 
 | 320 | 	struct io_cq *icq; | 
 | 321 |  | 
 | 322 | 	lockdep_assert_held(q->queue_lock); | 
 | 323 |  | 
 | 324 | 	/* | 
 | 325 | 	 * icq's are indexed from @ioc using radix tree and hint pointer, | 
 | 326 | 	 * both of which are protected with RCU.  All removals are done | 
 | 327 | 	 * holding both q and ioc locks, and we're holding q lock - if we | 
 | 328 | 	 * find a icq which points to us, it's guaranteed to be valid. | 
 | 329 | 	 */ | 
 | 330 | 	rcu_read_lock(); | 
 | 331 | 	icq = rcu_dereference(ioc->icq_hint); | 
 | 332 | 	if (icq && icq->q == q) | 
 | 333 | 		goto out; | 
 | 334 |  | 
 | 335 | 	icq = radix_tree_lookup(&ioc->icq_tree, q->id); | 
 | 336 | 	if (icq && icq->q == q) | 
 | 337 | 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */ | 
 | 338 | 	else | 
 | 339 | 		icq = NULL; | 
 | 340 | out: | 
 | 341 | 	rcu_read_unlock(); | 
 | 342 | 	return icq; | 
 | 343 | } | 
 | 344 | EXPORT_SYMBOL(ioc_lookup_icq); | 
 | 345 |  | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 346 | /** | 
 | 347 |  * ioc_create_icq - create and link io_cq | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 348 |  * @ioc: io_context of interest | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 349 |  * @q: request_queue of interest | 
 | 350 |  * @gfp_mask: allocation mask | 
 | 351 |  * | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 352 |  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they | 
 | 353 |  * will be created using @gfp_mask. | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 354 |  * | 
 | 355 |  * The caller is responsible for ensuring @ioc won't go away and @q is | 
 | 356 |  * alive and will stay alive until this function returns. | 
 | 357 |  */ | 
| Tejun Heo | 24acfc3 | 2012-03-05 13:15:24 -0800 | [diff] [blame] | 358 | struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, | 
 | 359 | 			     gfp_t gfp_mask) | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 360 | { | 
 | 361 | 	struct elevator_type *et = q->elevator->type; | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 362 | 	struct io_cq *icq; | 
 | 363 |  | 
 | 364 | 	/* allocate stuff */ | 
| Tejun Heo | f1f8cc9 | 2011-12-14 00:33:42 +0100 | [diff] [blame] | 365 | 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, | 
 | 366 | 				    q->node); | 
 | 367 | 	if (!icq) | 
 | 368 | 		return NULL; | 
 | 369 |  | 
 | 370 | 	if (radix_tree_preload(gfp_mask) < 0) { | 
 | 371 | 		kmem_cache_free(et->icq_cache, icq); | 
 | 372 | 		return NULL; | 
 | 373 | 	} | 
 | 374 |  | 
 | 375 | 	icq->ioc = ioc; | 
 | 376 | 	icq->q = q; | 
 | 377 | 	INIT_LIST_HEAD(&icq->q_node); | 
 | 378 | 	INIT_HLIST_NODE(&icq->ioc_node); | 
 | 379 |  | 
 | 380 | 	/* lock both q and ioc and try to link @icq */ | 
 | 381 | 	spin_lock_irq(q->queue_lock); | 
 | 382 | 	spin_lock(&ioc->lock); | 
 | 383 |  | 
 | 384 | 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { | 
 | 385 | 		hlist_add_head(&icq->ioc_node, &ioc->icq_list); | 
 | 386 | 		list_add(&icq->q_node, &q->icq_list); | 
 | 387 | 		if (et->ops.elevator_init_icq_fn) | 
 | 388 | 			et->ops.elevator_init_icq_fn(icq); | 
 | 389 | 	} else { | 
 | 390 | 		kmem_cache_free(et->icq_cache, icq); | 
 | 391 | 		icq = ioc_lookup_icq(ioc, q); | 
 | 392 | 		if (!icq) | 
 | 393 | 			printk(KERN_ERR "cfq: icq link failed!\n"); | 
 | 394 | 	} | 
 | 395 |  | 
 | 396 | 	spin_unlock(&ioc->lock); | 
 | 397 | 	spin_unlock_irq(q->queue_lock); | 
 | 398 | 	radix_tree_preload_end(); | 
 | 399 | 	return icq; | 
 | 400 | } | 
 | 401 |  | 
| Adrian Bunk | 1334159 | 2008-02-18 13:45:53 +0100 | [diff] [blame] | 402 | static int __init blk_ioc_init(void) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 403 | { | 
 | 404 | 	iocontext_cachep = kmem_cache_create("blkdev_ioc", | 
 | 405 | 			sizeof(struct io_context), 0, SLAB_PANIC, NULL); | 
 | 406 | 	return 0; | 
 | 407 | } | 
 | 408 | subsys_initcall(blk_ioc_init); |