| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/drivers/mmc/mmc_queue.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2003 Russell King, All Rights Reserved. | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | */ | 
|  | 11 | #include <linux/module.h> | 
|  | 12 | #include <linux/blkdev.h> | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 13 | #include <linux/kthread.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
|  | 15 | #include <linux/mmc/card.h> | 
|  | 16 | #include <linux/mmc/host.h> | 
|  | 17 | #include "mmc_queue.h" | 
|  | 18 |  | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 19 | #define MMC_QUEUE_SUSPENDED	(1 << 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
|  | 21 | /* | 
|  | 22 | * Prepare a MMC request.  Essentially, this means passing the | 
|  | 23 | * preparation off to the media driver.  The media driver will | 
|  | 24 | * create a mmc_io_request in req->special. | 
|  | 25 | */ | 
|  | 26 | static int mmc_prep_request(struct request_queue *q, struct request *req) | 
|  | 27 | { | 
|  | 28 | struct mmc_queue *mq = q->queuedata; | 
|  | 29 | int ret = BLKPREP_KILL; | 
|  | 30 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 31 | if (blk_special_request(req)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* | 
|  | 33 | * Special commands already have the command | 
|  | 34 | * blocks already setup in req->special. | 
|  | 35 | */ | 
|  | 36 | BUG_ON(!req->special); | 
|  | 37 |  | 
|  | 38 | ret = BLKPREP_OK; | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 39 | } else if (blk_fs_request(req) || blk_pc_request(req)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | /* | 
|  | 41 | * Block I/O requests need translating according | 
|  | 42 | * to the protocol. | 
|  | 43 | */ | 
|  | 44 | ret = mq->prep_fn(mq, req); | 
|  | 45 | } else { | 
|  | 46 | /* | 
|  | 47 | * Everything else is invalid. | 
|  | 48 | */ | 
|  | 49 | blk_dump_rq_flags(req, "MMC bad request"); | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | if (ret == BLKPREP_OK) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 53 | req->cmd_flags |= REQ_DONTPREP; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
|  | 55 | return ret; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | static int mmc_queue_thread(void *d) | 
|  | 59 | { | 
|  | 60 | struct mmc_queue *mq = d; | 
|  | 61 | struct request_queue *q = mq->queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 |  | 
|  | 63 | /* | 
|  | 64 | * Set iothread to ensure that we aren't put to sleep by | 
|  | 65 | * the process freezing.  We handle suspension ourselves. | 
|  | 66 | */ | 
|  | 67 | current->flags |= PF_MEMALLOC|PF_NOFREEZE; | 
|  | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | down(&mq->thread_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | do { | 
|  | 71 | struct request *req = NULL; | 
|  | 72 |  | 
|  | 73 | spin_lock_irq(q->queue_lock); | 
|  | 74 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 75 | if (!blk_queue_plugged(q)) | 
| Juha [êölä | c723e08a | 2006-08-06 09:58:22 +0100 | [diff] [blame] | 76 | req = elv_next_request(q); | 
|  | 77 | mq->req = req; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | spin_unlock_irq(q->queue_lock); | 
|  | 79 |  | 
|  | 80 | if (!req) { | 
| Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 81 | if (kthread_should_stop()) { | 
|  | 82 | set_current_state(TASK_RUNNING); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | break; | 
| Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 84 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | up(&mq->thread_sem); | 
|  | 86 | schedule(); | 
|  | 87 | down(&mq->thread_sem); | 
|  | 88 | continue; | 
|  | 89 | } | 
|  | 90 | set_current_state(TASK_RUNNING); | 
|  | 91 |  | 
|  | 92 | mq->issue_fn(mq, req); | 
|  | 93 | } while (1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | up(&mq->thread_sem); | 
|  | 95 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | return 0; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * Generic MMC request handler.  This is called for any queue on a | 
|  | 101 | * particular host.  When the host is not busy, we look for a request | 
|  | 102 | * on any queue on this host, and attempt to issue it.  This may | 
|  | 103 | * not be the queue we were asked to process. | 
|  | 104 | */ | 
|  | 105 | static void mmc_request(request_queue_t *q) | 
|  | 106 | { | 
|  | 107 | struct mmc_queue *mq = q->queuedata; | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 108 | struct request *req; | 
|  | 109 | int ret; | 
|  | 110 |  | 
|  | 111 | if (!mq) { | 
|  | 112 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | 
|  | 113 | while ((req = elv_next_request(q)) != NULL) { | 
|  | 114 | do { | 
|  | 115 | ret = end_that_request_chunk(req, 0, | 
|  | 116 | req->current_nr_sectors << 9); | 
|  | 117 | } while (ret); | 
|  | 118 | } | 
|  | 119 | return; | 
|  | 120 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 |  | 
|  | 122 | if (!mq->req) | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 123 | wake_up_process(mq->thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } | 
|  | 125 |  | 
|  | 126 | /** | 
|  | 127 | * mmc_init_queue - initialise a queue structure. | 
|  | 128 | * @mq: mmc queue | 
|  | 129 | * @card: mmc card to attach this queue | 
|  | 130 | * @lock: queue lock | 
|  | 131 | * | 
|  | 132 | * Initialise a MMC card request queue. | 
|  | 133 | */ | 
|  | 134 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | 
|  | 135 | { | 
|  | 136 | struct mmc_host *host = card->host; | 
|  | 137 | u64 limit = BLK_BOUNCE_HIGH; | 
|  | 138 | int ret; | 
|  | 139 |  | 
| Greg Kroah-Hartman | fcaf71f | 2006-09-12 17:00:10 +0200 | [diff] [blame] | 140 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 
|  | 141 | limit = *mmc_dev(host)->dma_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  | 
|  | 143 | mq->card = card; | 
|  | 144 | mq->queue = blk_init_queue(mmc_request, lock); | 
|  | 145 | if (!mq->queue) | 
|  | 146 | return -ENOMEM; | 
|  | 147 |  | 
|  | 148 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 
|  | 149 | blk_queue_bounce_limit(mq->queue, limit); | 
| Pierre Ossman | 55db890 | 2006-11-21 17:55:45 +0100 | [diff] [blame] | 150 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | 
|  | 152 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | 
|  | 153 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 
|  | 154 |  | 
|  | 155 | mq->queue->queuedata = mq; | 
|  | 156 | mq->req = NULL; | 
|  | 157 |  | 
|  | 158 | mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, | 
|  | 159 | GFP_KERNEL); | 
|  | 160 | if (!mq->sg) { | 
|  | 161 | ret = -ENOMEM; | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 162 | goto cleanup_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | } | 
|  | 164 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | init_MUTEX(&mq->thread_sem); | 
|  | 166 |  | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 167 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); | 
|  | 168 | if (IS_ERR(mq->thread)) { | 
|  | 169 | ret = PTR_ERR(mq->thread); | 
|  | 170 | goto free_sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } | 
|  | 172 |  | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 173 | return 0; | 
|  | 174 |  | 
|  | 175 | free_sg: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | kfree(mq->sg); | 
|  | 177 | mq->sg = NULL; | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 178 | cleanup_queue: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | blk_cleanup_queue(mq->queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | return ret; | 
|  | 181 | } | 
|  | 182 | EXPORT_SYMBOL(mmc_init_queue); | 
|  | 183 |  | 
|  | 184 | void mmc_cleanup_queue(struct mmc_queue *mq) | 
|  | 185 | { | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 186 | request_queue_t *q = mq->queue; | 
|  | 187 | unsigned long flags; | 
|  | 188 |  | 
|  | 189 | /* Mark that we should start throwing out stragglers */ | 
|  | 190 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 191 | q->queuedata = NULL; | 
|  | 192 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 193 |  | 
|  | 194 | /* Then terminate our worker thread */ | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 195 | kthread_stop(mq->thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 |  | 
|  | 197 | kfree(mq->sg); | 
|  | 198 | mq->sg = NULL; | 
|  | 199 |  | 
|  | 200 | blk_cleanup_queue(mq->queue); | 
|  | 201 |  | 
|  | 202 | mq->card = NULL; | 
|  | 203 | } | 
|  | 204 | EXPORT_SYMBOL(mmc_cleanup_queue); | 
|  | 205 |  | 
|  | 206 | /** | 
|  | 207 | * mmc_queue_suspend - suspend a MMC request queue | 
|  | 208 | * @mq: MMC queue to suspend | 
|  | 209 | * | 
|  | 210 | * Stop the block request queue, and wait for our thread to | 
|  | 211 | * complete any outstanding requests.  This ensures that we | 
|  | 212 | * won't suspend while a request is being processed. | 
|  | 213 | */ | 
|  | 214 | void mmc_queue_suspend(struct mmc_queue *mq) | 
|  | 215 | { | 
|  | 216 | request_queue_t *q = mq->queue; | 
|  | 217 | unsigned long flags; | 
|  | 218 |  | 
|  | 219 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | 
|  | 220 | mq->flags |= MMC_QUEUE_SUSPENDED; | 
|  | 221 |  | 
|  | 222 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 223 | blk_stop_queue(q); | 
|  | 224 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 225 |  | 
|  | 226 | down(&mq->thread_sem); | 
|  | 227 | } | 
|  | 228 | } | 
|  | 229 | EXPORT_SYMBOL(mmc_queue_suspend); | 
|  | 230 |  | 
|  | 231 | /** | 
|  | 232 | * mmc_queue_resume - resume a previously suspended MMC request queue | 
|  | 233 | * @mq: MMC queue to resume | 
|  | 234 | */ | 
|  | 235 | void mmc_queue_resume(struct mmc_queue *mq) | 
|  | 236 | { | 
|  | 237 | request_queue_t *q = mq->queue; | 
|  | 238 | unsigned long flags; | 
|  | 239 |  | 
|  | 240 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | 
|  | 241 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | 
|  | 242 |  | 
|  | 243 | up(&mq->thread_sem); | 
|  | 244 |  | 
|  | 245 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 246 | blk_start_queue(q); | 
|  | 247 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 248 | } | 
|  | 249 | } | 
|  | 250 | EXPORT_SYMBOL(mmc_queue_resume); |