| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Pierre Ossman | 70f1048 | 2007-07-11 20:04:50 +0200 | [diff] [blame] | 2 | *  linux/drivers/mmc/card/queue.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | *  Copyright (C) 2003 Russell King, All Rights Reserved. | 
| Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 5 | *  Copyright 2006-2007 Pierre Ossman | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or modify | 
|  | 8 | * it under the terms of the GNU General Public License version 2 as | 
|  | 9 | * published by the Free Software Foundation. | 
|  | 10 | * | 
|  | 11 | */ | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/blkdev.h> | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 14 | #include <linux/freezer.h> | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 15 | #include <linux/kthread.h> | 
| Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 16 | #include <linux/scatterlist.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
|  | 18 | #include <linux/mmc/card.h> | 
|  | 19 | #include <linux/mmc/host.h> | 
| Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 20 | #include "queue.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 22 | #define MMC_QUEUE_BOUNCESZ	65536 | 
|  | 23 |  | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 24 | #define MMC_QUEUE_SUSPENDED	(1 << 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
|  | 26 | /* | 
| Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 27 | * Prepare a MMC request. This just filters out odd stuff. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | */ | 
|  | 29 | static int mmc_prep_request(struct request_queue *q, struct request *req) | 
|  | 30 | { | 
| Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 31 | /* | 
|  | 32 | * We only like normal block requests. | 
|  | 33 | */ | 
| Pierre Ossman | d6d8de3 | 2008-08-16 20:43:48 +0200 | [diff] [blame] | 34 | if (!blk_fs_request(req)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | blk_dump_rq_flags(req, "MMC bad request"); | 
| Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 36 | return BLKPREP_KILL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | } | 
|  | 38 |  | 
| Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 39 | req->cmd_flags |= REQ_DONTPREP; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
| Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 41 | return BLKPREP_OK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | } | 
|  | 43 |  | 
|  | 44 | static int mmc_queue_thread(void *d) | 
|  | 45 | { | 
|  | 46 | struct mmc_queue *mq = d; | 
|  | 47 | struct request_queue *q = mq->queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 |  | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 49 | current->flags |= PF_MEMALLOC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | down(&mq->thread_sem); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | do { | 
|  | 53 | struct request *req = NULL; | 
|  | 54 |  | 
|  | 55 | spin_lock_irq(q->queue_lock); | 
|  | 56 | set_current_state(TASK_INTERRUPTIBLE); | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 57 | if (!blk_queue_plugged(q)) | 
|  | 58 | req = blk_fetch_request(q); | 
| Juha [êölä | c723e08a | 2006-08-06 09:58:22 +0100 | [diff] [blame] | 59 | mq->req = req; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | spin_unlock_irq(q->queue_lock); | 
|  | 61 |  | 
|  | 62 | if (!req) { | 
| Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 63 | if (kthread_should_stop()) { | 
|  | 64 | set_current_state(TASK_RUNNING); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | break; | 
| Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 66 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | up(&mq->thread_sem); | 
|  | 68 | schedule(); | 
|  | 69 | down(&mq->thread_sem); | 
|  | 70 | continue; | 
|  | 71 | } | 
|  | 72 | set_current_state(TASK_RUNNING); | 
|  | 73 |  | 
|  | 74 | mq->issue_fn(mq, req); | 
|  | 75 | } while (1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | up(&mq->thread_sem); | 
|  | 77 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | return 0; | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * Generic MMC request handler.  This is called for any queue on a | 
|  | 83 | * particular host.  When the host is not busy, we look for a request | 
|  | 84 | * on any queue on this host, and attempt to issue it.  This may | 
|  | 85 | * not be the queue we were asked to process. | 
|  | 86 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 87 | static void mmc_request(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | { | 
|  | 89 | struct mmc_queue *mq = q->queuedata; | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 90 | struct request *req; | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 91 |  | 
|  | 92 | if (!mq) { | 
| Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 93 | while ((req = blk_fetch_request(q)) != NULL) { | 
|  | 94 | req->cmd_flags |= REQ_QUIET; | 
| Tejun Heo | 296b2f6 | 2009-05-08 11:54:15 +0900 | [diff] [blame] | 95 | __blk_end_request_all(req, -EIO); | 
| Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 96 | } | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 97 | return; | 
|  | 98 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
|  | 100 | if (!mq->req) | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 101 | wake_up_process(mq->thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | } | 
|  | 103 |  | 
|  | 104 | /** | 
|  | 105 | * mmc_init_queue - initialise a queue structure. | 
|  | 106 | * @mq: mmc queue | 
|  | 107 | * @card: mmc card to attach this queue | 
|  | 108 | * @lock: queue lock | 
|  | 109 | * | 
|  | 110 | * Initialise a MMC card request queue. | 
|  | 111 | */ | 
|  | 112 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) | 
|  | 113 | { | 
|  | 114 | struct mmc_host *host = card->host; | 
|  | 115 | u64 limit = BLK_BOUNCE_HIGH; | 
|  | 116 | int ret; | 
|  | 117 |  | 
| Greg Kroah-Hartman | fcaf71f | 2006-09-12 17:00:10 +0200 | [diff] [blame] | 118 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 
|  | 119 | limit = *mmc_dev(host)->dma_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 |  | 
|  | 121 | mq->card = card; | 
|  | 122 | mq->queue = blk_init_queue(mmc_request, lock); | 
|  | 123 | if (!mq->queue) | 
|  | 124 | return -ENOMEM; | 
|  | 125 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | mq->queue->queuedata = mq; | 
|  | 127 | mq->req = NULL; | 
|  | 128 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 129 | blk_queue_prep_rq(mq->queue, mmc_prep_request); | 
| Pierre Ossman | 9102895 | 2008-08-16 21:15:50 +0200 | [diff] [blame] | 130 | blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); | 
| Pierre Ossman | 8dddfe1 | 2008-10-14 20:04:46 +0200 | [diff] [blame] | 131 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 132 |  | 
|  | 133 | #ifdef CONFIG_MMC_BLOCK_BOUNCE | 
|  | 134 | if (host->max_hw_segs == 1) { | 
| Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 135 | unsigned int bouncesz; | 
|  | 136 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 137 | bouncesz = MMC_QUEUE_BOUNCESZ; | 
|  | 138 |  | 
|  | 139 | if (bouncesz > host->max_req_size) | 
|  | 140 | bouncesz = host->max_req_size; | 
|  | 141 | if (bouncesz > host->max_seg_size) | 
|  | 142 | bouncesz = host->max_seg_size; | 
| Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 143 | if (bouncesz > (host->max_blk_count * 512)) | 
|  | 144 | bouncesz = host->max_blk_count * 512; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 145 |  | 
| Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 146 | if (bouncesz > 512) { | 
|  | 147 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); | 
|  | 148 | if (!mq->bounce_buf) { | 
|  | 149 | printk(KERN_WARNING "%s: unable to " | 
|  | 150 | "allocate bounce buffer\n", | 
|  | 151 | mmc_card_name(card)); | 
|  | 152 | } | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | if (mq->bounce_buf) { | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 156 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | 
| Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 157 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 158 | blk_queue_max_segments(mq->queue, bouncesz / 512); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 159 | blk_queue_max_segment_size(mq->queue, bouncesz); | 
|  | 160 |  | 
| Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 161 | mq->sg = kmalloc(sizeof(struct scatterlist), | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 162 | GFP_KERNEL); | 
|  | 163 | if (!mq->sg) { | 
|  | 164 | ret = -ENOMEM; | 
| Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 165 | goto cleanup_queue; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 166 | } | 
| Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 167 | sg_init_table(mq->sg, 1); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 168 |  | 
| Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 169 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 170 | bouncesz / 512, GFP_KERNEL); | 
|  | 171 | if (!mq->bounce_sg) { | 
|  | 172 | ret = -ENOMEM; | 
| Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 173 | goto cleanup_queue; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 174 | } | 
| Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 175 | sg_init_table(mq->bounce_sg, bouncesz / 512); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 176 | } | 
|  | 177 | } | 
|  | 178 | #endif | 
|  | 179 |  | 
|  | 180 | if (!mq->bounce_buf) { | 
|  | 181 | blk_queue_bounce_limit(mq->queue, limit); | 
| Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 182 | blk_queue_max_hw_sectors(mq->queue, | 
| Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 183 | min(host->max_blk_count, host->max_req_size / 512)); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 184 | blk_queue_max_segments(mq->queue, host->max_hw_segs); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 185 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 
|  | 186 |  | 
| Haavard Skinnemoen | 05e5b13 | 2007-11-23 10:19:00 +0100 | [diff] [blame] | 187 | mq->sg = kmalloc(sizeof(struct scatterlist) * | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 188 | host->max_phys_segs, GFP_KERNEL); | 
|  | 189 | if (!mq->sg) { | 
|  | 190 | ret = -ENOMEM; | 
|  | 191 | goto cleanup_queue; | 
|  | 192 | } | 
| Haavard Skinnemoen | 05e5b13 | 2007-11-23 10:19:00 +0100 | [diff] [blame] | 193 | sg_init_table(mq->sg, host->max_phys_segs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | } | 
|  | 195 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | init_MUTEX(&mq->thread_sem); | 
|  | 197 |  | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 198 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); | 
|  | 199 | if (IS_ERR(mq->thread)) { | 
|  | 200 | ret = PTR_ERR(mq->thread); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 201 | goto free_bounce_sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | } | 
|  | 203 |  | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 204 | return 0; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 205 | free_bounce_sg: | 
|  | 206 | if (mq->bounce_sg) | 
|  | 207 | kfree(mq->bounce_sg); | 
|  | 208 | mq->bounce_sg = NULL; | 
| Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 209 | cleanup_queue: | 
|  | 210 | if (mq->sg) | 
|  | 211 | kfree(mq->sg); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | mq->sg = NULL; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 213 | if (mq->bounce_buf) | 
|  | 214 | kfree(mq->bounce_buf); | 
|  | 215 | mq->bounce_buf = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | blk_cleanup_queue(mq->queue); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | return ret; | 
|  | 218 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 |  | 
|  | 220 | void mmc_cleanup_queue(struct mmc_queue *mq) | 
|  | 221 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 222 | struct request_queue *q = mq->queue; | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 223 | unsigned long flags; | 
|  | 224 |  | 
| Pierre Ossman | d2b46f6 | 2007-04-28 16:52:12 +0200 | [diff] [blame] | 225 | /* Make sure the queue isn't suspended, as that will deadlock */ | 
|  | 226 | mmc_queue_resume(mq); | 
|  | 227 |  | 
| Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 228 | /* Then terminate our worker thread */ | 
| Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 229 | kthread_stop(mq->thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 |  | 
| Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 231 | /* Empty the queue */ | 
|  | 232 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 233 | q->queuedata = NULL; | 
|  | 234 | blk_start_queue(q); | 
|  | 235 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 236 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 237 | if (mq->bounce_sg) | 
|  | 238 | kfree(mq->bounce_sg); | 
|  | 239 | mq->bounce_sg = NULL; | 
|  | 240 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | kfree(mq->sg); | 
|  | 242 | mq->sg = NULL; | 
|  | 243 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 244 | if (mq->bounce_buf) | 
|  | 245 | kfree(mq->bounce_buf); | 
|  | 246 | mq->bounce_buf = NULL; | 
|  | 247 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | mq->card = NULL; | 
|  | 249 | } | 
|  | 250 | EXPORT_SYMBOL(mmc_cleanup_queue); | 
|  | 251 |  | 
|  | 252 | /** | 
|  | 253 | * mmc_queue_suspend - suspend a MMC request queue | 
|  | 254 | * @mq: MMC queue to suspend | 
|  | 255 | * | 
|  | 256 | * Stop the block request queue, and wait for our thread to | 
|  | 257 | * complete any outstanding requests.  This ensures that we | 
|  | 258 | * won't suspend while a request is being processed. | 
|  | 259 | */ | 
|  | 260 | void mmc_queue_suspend(struct mmc_queue *mq) | 
|  | 261 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 262 | struct request_queue *q = mq->queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | unsigned long flags; | 
|  | 264 |  | 
|  | 265 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { | 
|  | 266 | mq->flags |= MMC_QUEUE_SUSPENDED; | 
|  | 267 |  | 
|  | 268 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 269 | blk_stop_queue(q); | 
|  | 270 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 271 |  | 
|  | 272 | down(&mq->thread_sem); | 
|  | 273 | } | 
|  | 274 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 |  | 
|  | 276 | /** | 
|  | 277 | * mmc_queue_resume - resume a previously suspended MMC request queue | 
|  | 278 | * @mq: MMC queue to resume | 
|  | 279 | */ | 
|  | 280 | void mmc_queue_resume(struct mmc_queue *mq) | 
|  | 281 | { | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 282 | struct request_queue *q = mq->queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | unsigned long flags; | 
|  | 284 |  | 
|  | 285 | if (mq->flags & MMC_QUEUE_SUSPENDED) { | 
|  | 286 | mq->flags &= ~MMC_QUEUE_SUSPENDED; | 
|  | 287 |  | 
|  | 288 | up(&mq->thread_sem); | 
|  | 289 |  | 
|  | 290 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 291 | blk_start_queue(q); | 
|  | 292 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 293 | } | 
|  | 294 | } | 
| Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 295 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 296 | /* | 
|  | 297 | * Prepare the sg list(s) to be handed of to the host driver | 
|  | 298 | */ | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 299 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) | 
|  | 300 | { | 
|  | 301 | unsigned int sg_len; | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 302 | size_t buflen; | 
|  | 303 | struct scatterlist *sg; | 
|  | 304 | int i; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 305 |  | 
|  | 306 | if (!mq->bounce_buf) | 
|  | 307 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); | 
|  | 308 |  | 
|  | 309 | BUG_ON(!mq->bounce_sg); | 
|  | 310 |  | 
|  | 311 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); | 
|  | 312 |  | 
|  | 313 | mq->bounce_sg_len = sg_len; | 
|  | 314 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 315 | buflen = 0; | 
|  | 316 | for_each_sg(mq->bounce_sg, sg, sg_len, i) | 
|  | 317 | buflen += sg->length; | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 318 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 319 | sg_init_one(mq->sg, mq->bounce_buf, buflen); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 320 |  | 
|  | 321 | return 1; | 
|  | 322 | } | 
|  | 323 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 324 | /* | 
|  | 325 | * If writing, bounce the data to the buffer before the request | 
|  | 326 | * is sent to the host driver | 
|  | 327 | */ | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 328 | void mmc_queue_bounce_pre(struct mmc_queue *mq) | 
|  | 329 | { | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 330 | unsigned long flags; | 
|  | 331 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 332 | if (!mq->bounce_buf) | 
|  | 333 | return; | 
|  | 334 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 335 | if (rq_data_dir(mq->req) != WRITE) | 
|  | 336 | return; | 
|  | 337 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 338 | local_irq_save(flags); | 
|  | 339 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, | 
|  | 340 | mq->bounce_buf, mq->sg[0].length); | 
|  | 341 | local_irq_restore(flags); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 342 | } | 
|  | 343 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 344 | /* | 
|  | 345 | * If reading, bounce the data from the buffer after the request | 
|  | 346 | * has been handled by the host driver | 
|  | 347 | */ | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 348 | void mmc_queue_bounce_post(struct mmc_queue *mq) | 
|  | 349 | { | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 350 | unsigned long flags; | 
|  | 351 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 352 | if (!mq->bounce_buf) | 
|  | 353 | return; | 
|  | 354 |  | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 355 | if (rq_data_dir(mq->req) != READ) | 
|  | 356 | return; | 
|  | 357 |  | 
| Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 358 | local_irq_save(flags); | 
|  | 359 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, | 
|  | 360 | mq->bounce_buf, mq->sg[0].length); | 
|  | 361 | local_irq_restore(flags); | 
| Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 362 | } | 
|  | 363 |  |