| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Functions related to io context handling | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/module.h> | 
 | 6 | #include <linux/init.h> | 
 | 7 | #include <linux/bio.h> | 
 | 8 | #include <linux/blkdev.h> | 
 | 9 | #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */ | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 11 |  | 
 | 12 | #include "blk.h" | 
 | 13 |  | 
 | 14 | /* | 
 | 15 |  * For io context allocations | 
 | 16 |  */ | 
 | 17 | static struct kmem_cache *iocontext_cachep; | 
 | 18 |  | 
 | 19 | static void cfq_dtor(struct io_context *ioc) | 
 | 20 | { | 
| Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 21 | 	if (!hlist_empty(&ioc->cic_list)) { | 
 | 22 | 		struct cfq_io_context *cic; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 23 |  | 
| Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 24 | 		cic = list_entry(ioc->cic_list.first, struct cfq_io_context, | 
 | 25 | 								cic_list); | 
 | 26 | 		cic->dtor(ioc); | 
 | 27 | 	} | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 28 | } | 
 | 29 |  | 
 | 30 | /* | 
 | 31 |  * IO Context helper functions. put_io_context() returns 1 if there are no | 
 | 32 |  * more users of this io context, 0 otherwise. | 
 | 33 |  */ | 
 | 34 | int put_io_context(struct io_context *ioc) | 
 | 35 | { | 
 | 36 | 	if (ioc == NULL) | 
 | 37 | 		return 1; | 
 | 38 |  | 
| Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 39 | 	BUG_ON(atomic_long_read(&ioc->refcount) == 0); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 40 |  | 
| Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 41 | 	if (atomic_long_dec_and_test(&ioc->refcount)) { | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 42 | 		rcu_read_lock(); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 43 | 		cfq_dtor(ioc); | 
| Jens Axboe | 07416d2 | 2008-05-07 09:17:12 +0200 | [diff] [blame] | 44 | 		rcu_read_unlock(); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 45 |  | 
 | 46 | 		kmem_cache_free(iocontext_cachep, ioc); | 
 | 47 | 		return 1; | 
 | 48 | 	} | 
 | 49 | 	return 0; | 
 | 50 | } | 
 | 51 | EXPORT_SYMBOL(put_io_context); | 
 | 52 |  | 
 | 53 | static void cfq_exit(struct io_context *ioc) | 
 | 54 | { | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 55 | 	rcu_read_lock(); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 56 |  | 
| Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 57 | 	if (!hlist_empty(&ioc->cic_list)) { | 
 | 58 | 		struct cfq_io_context *cic; | 
 | 59 |  | 
 | 60 | 		cic = list_entry(ioc->cic_list.first, struct cfq_io_context, | 
 | 61 | 								cic_list); | 
 | 62 | 		cic->exit(ioc); | 
 | 63 | 	} | 
 | 64 | 	rcu_read_unlock(); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 65 | } | 
 | 66 |  | 
 | 67 | /* Called by the exitting task */ | 
| Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 68 | void exit_io_context(struct task_struct *task) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 69 | { | 
 | 70 | 	struct io_context *ioc; | 
 | 71 |  | 
| Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 72 | 	task_lock(task); | 
 | 73 | 	ioc = task->io_context; | 
 | 74 | 	task->io_context = NULL; | 
 | 75 | 	task_unlock(task); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 76 |  | 
 | 77 | 	if (atomic_dec_and_test(&ioc->nr_tasks)) { | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 78 | 		cfq_exit(ioc); | 
 | 79 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 80 | 	} | 
| Louis Rilling | 61cc74f | 2009-12-04 14:52:41 +0100 | [diff] [blame] | 81 | 	put_io_context(ioc); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 82 | } | 
 | 83 |  | 
 | 84 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | 
 | 85 | { | 
 | 86 | 	struct io_context *ret; | 
 | 87 |  | 
 | 88 | 	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | 
 | 89 | 	if (ret) { | 
| Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 90 | 		atomic_long_set(&ret->refcount, 1); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 91 | 		atomic_set(&ret->nr_tasks, 1); | 
 | 92 | 		spin_lock_init(&ret->lock); | 
 | 93 | 		ret->ioprio_changed = 0; | 
 | 94 | 		ret->ioprio = 0; | 
| Richard Kennedy | 4671a13 | 2010-03-01 10:57:22 +0100 | [diff] [blame] | 95 | 		ret->last_waited = 0; /* doesn't matter... */ | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 96 | 		ret->nr_batch_requests = 0; /* because this is 0 */ | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 97 | 		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); | 
| Jens Axboe | ffc4e75 | 2008-02-19 10:02:29 +0100 | [diff] [blame] | 98 | 		INIT_HLIST_HEAD(&ret->cic_list); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 99 | 		ret->ioc_data = NULL; | 
 | 100 | 	} | 
 | 101 |  | 
 | 102 | 	return ret; | 
 | 103 | } | 
 | 104 |  | 
 | 105 | /* | 
 | 106 |  * If the current task has no IO context then create one and initialise it. | 
 | 107 |  * Otherwise, return its existing IO context. | 
 | 108 |  * | 
 | 109 |  * This returned IO context doesn't have a specifically elevated refcount, | 
 | 110 |  * but since the current task itself holds a reference, the context can be | 
 | 111 |  * used in general code, so long as it stays within `current` context. | 
 | 112 |  */ | 
 | 113 | struct io_context *current_io_context(gfp_t gfp_flags, int node) | 
 | 114 | { | 
 | 115 | 	struct task_struct *tsk = current; | 
 | 116 | 	struct io_context *ret; | 
 | 117 |  | 
 | 118 | 	ret = tsk->io_context; | 
 | 119 | 	if (likely(ret)) | 
 | 120 | 		return ret; | 
 | 121 |  | 
 | 122 | 	ret = alloc_io_context(gfp_flags, node); | 
 | 123 | 	if (ret) { | 
 | 124 | 		/* make sure set_task_ioprio() sees the settings above */ | 
 | 125 | 		smp_wmb(); | 
 | 126 | 		tsk->io_context = ret; | 
 | 127 | 	} | 
 | 128 |  | 
 | 129 | 	return ret; | 
 | 130 | } | 
 | 131 |  | 
 | 132 | /* | 
 | 133 |  * If the current task has no IO context then create one and initialise it. | 
 | 134 |  * If it does have a context, take a ref on it. | 
 | 135 |  * | 
 | 136 |  * This is always called in the context of the task which submitted the I/O. | 
 | 137 |  */ | 
 | 138 | struct io_context *get_io_context(gfp_t gfp_flags, int node) | 
 | 139 | { | 
 | 140 | 	struct io_context *ret = NULL; | 
 | 141 |  | 
 | 142 | 	/* | 
 | 143 | 	 * Check for unlikely race with exiting task. ioc ref count is | 
 | 144 | 	 * zero when ioc is being detached. | 
 | 145 | 	 */ | 
 | 146 | 	do { | 
 | 147 | 		ret = current_io_context(gfp_flags, node); | 
 | 148 | 		if (unlikely(!ret)) | 
 | 149 | 			break; | 
| Nikanth Karthikesan | d9c7d39 | 2009-06-10 12:57:06 -0700 | [diff] [blame] | 150 | 	} while (!atomic_long_inc_not_zero(&ret->refcount)); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 151 |  | 
 | 152 | 	return ret; | 
 | 153 | } | 
 | 154 | EXPORT_SYMBOL(get_io_context); | 
 | 155 |  | 
| Adrian Bunk | 1334159 | 2008-02-18 13:45:53 +0100 | [diff] [blame] | 156 | static int __init blk_ioc_init(void) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 157 | { | 
 | 158 | 	iocontext_cachep = kmem_cache_create("blkdev_ioc", | 
 | 159 | 			sizeof(struct io_context), 0, SLAB_PANIC, NULL); | 
 | 160 | 	return 0; | 
 | 161 | } | 
 | 162 | subsys_initcall(blk_ioc_init); |