mmc: Revert all packing related patches
The version of packing support excepted by the linux community is
slightly different from the one that was merged. This revert is needed
in order to upload the latest version from the linux community.
This patch reverts the following commits:
1.mmc: card: Add eMMC4.5 write packed commands unit-tests
2.mmc: card: Fix packing control enabling algorithm
3.mmc: block: Add MMC write packing statistics
4.mmc: msm_sdcc: enable the write packing control
5.mmc: msm_sdcc: Enable write packing capability
6.mmc: block: Add write packing control
7.mmc: core: Support packed write command for eMMC4.5 device
8.mmc: core: Add packed command feature of eMMC4.5
(cherry picked from commit f94cf3da103b344b13fa4d6665fd21dad1b95ead)
Change-Id: I2efc6dc8d8f6d5cc7e9efa99ec74914ffff96fcd
commit: 9b54d88c6a11ebfe069b7fdebcb521da21754c3f
commit: e2ecb58a6c5011549aac3e86fb1c13e7b7c65104
commit: e544d700e2dac1584a8172c4dc347d81ede203bd
commit: 8afe8d2a98a1bbf3804162ff5c95a56226935f5a
commit: 25e2261a556c4393f79d58bce814bb3df34b9549
commit: 63c61d6d8b8f37c71b4162b3affffdf72ac06811
commit: 968c774ea6466fa7adbf2eac333220132acda306
commit: 516994eee39282b8648b509e449ff83b49833209.
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
(cherry picked from commit 31fe84d6edae65f9df5663538e528697897be86e)
Signed-off-by: Maya Erez <merez@codeaurora.org>
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9110e23..63d2fa1 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -59,16 +59,6 @@
#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
- (req->cmd_flags & REQ_META)) && \
- (rq_data_dir(req) == WRITE))
-#define PACKED_CMD_VER 0x01
-#define PACKED_CMD_WR 0x02
-#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
- do { \
- if (stats->enabled) \
- stats->pack_stop_reason[reason]++; \
- } while (0)
static DEFINE_MUTEX(block_mutex);
@@ -120,26 +110,24 @@
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
int area_type;
- struct device_attribute num_wr_reqs_to_start_packing;
};
static DEFINE_MUTEX(open_lock);
-enum {
- MMC_PACKED_N_IDX = -1,
- MMC_PACKED_N_ZERO,
- MMC_PACKED_N_SINGLE,
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
};
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
-static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
-{
- mqrq->packed_cmd = MMC_PACKED_NONE;
- mqrq->packed_num = MMC_PACKED_N_ZERO;
-}
-
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -273,38 +261,6 @@
return ret;
}
-static ssize_t
-num_wr_reqs_to_start_packing_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- int num_wr_reqs_to_start_packing;
- int ret;
-
- num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
-
- ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
-
- mmc_blk_put(md);
- return ret;
-}
-
-static ssize_t
-num_wr_reqs_to_start_packing_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int value;
- struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-
- sscanf(buf, "%d", &value);
- if (value >= 0)
- md->queue.num_wr_reqs_to_start_packing = value;
-
- mmc_blk_put(md);
- return count;
-}
-
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -1140,60 +1096,12 @@
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
- if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
- if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
- return MMC_BLK_PARTIAL;
- else
- return MMC_BLK_SUCCESS;
- }
-
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
-static int mmc_blk_packed_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
- mmc_active);
- struct request *req = mq_rq->req;
- int err, check, status;
- u8 ext_csd[512];
-
- check = mmc_blk_err_check(card, areq);
- err = get_card_status(card, &status, 0);
- if (err) {
- pr_err("%s: error %d sending status command\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_ABORT;
- }
-
- if (status & R1_EXP_EVENT) {
- err = mmc_send_ext_csd(card, ext_csd);
- if (err) {
- pr_err("%s: error %d sending ext_csd\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_ABORT;
- }
-
- if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
- EXT_CSD_PACKED_FAILURE) &&
- (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
- EXT_CSD_PACKED_GENERIC_ERROR)) {
- if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
- EXT_CSD_PACKED_INDEXED_ERROR) {
- mq_rq->packed_fail_idx =
- ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
- return MMC_BLK_PARTIAL;
- }
- }
- }
-
- return check;
-}
-
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1349,361 +1257,10 @@
mmc_queue_bounce_pre(mqrq);
}
-static void mmc_blk_write_packing_control(struct mmc_queue *mq,
- struct request *req)
-{
- struct mmc_host *host = mq->card->host;
- int data_dir;
-
- /* Support for the write packing on eMMC 4.5 or later */
- if (mq->card->ext_csd.rev <= 5)
- return;
-
- if (!(host->caps2 & MMC_CAP2_PACKED_WR))
- return;
-
- /*
- * In case the packing control is not supported by the host, it should
- * not have an effect on the write packing. Therefore we have to enable
- * the write packing
- */
- if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
- mq->wr_packing_enabled = true;
- return;
- }
-
- if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
- if (mq->num_of_potential_packed_wr_reqs >
- mq->num_wr_reqs_to_start_packing)
- mq->wr_packing_enabled = true;
- mq->num_of_potential_packed_wr_reqs = 0;
- return;
- }
-
- data_dir = rq_data_dir(req);
-
- if (data_dir == READ) {
- mq->num_of_potential_packed_wr_reqs = 0;
- mq->wr_packing_enabled = false;
- return;
- } else if (data_dir == WRITE) {
- mq->num_of_potential_packed_wr_reqs++;
- }
-
- if (mq->num_of_potential_packed_wr_reqs >
- mq->num_wr_reqs_to_start_packing)
- mq->wr_packing_enabled = true;
-
-}
-
-struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
-{
- if (!card)
- return NULL;
-
- return &card->wr_pack_stats;
-}
-EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
-
-void mmc_blk_init_packed_statistics(struct mmc_card *card)
-{
- int max_num_of_packed_reqs = 0;
-
- if (!card || !card->wr_pack_stats.packing_events)
- return;
-
- max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
-
- spin_lock(&card->wr_pack_stats.lock);
- memset(card->wr_pack_stats.packing_events, 0,
- (max_num_of_packed_reqs + 1) *
- sizeof(*card->wr_pack_stats.packing_events));
- memset(&card->wr_pack_stats.pack_stop_reason, 0,
- sizeof(card->wr_pack_stats.pack_stop_reason));
- card->wr_pack_stats.enabled = true;
- spin_unlock(&card->wr_pack_stats.lock);
-}
-EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
-
-void print_mmc_packing_stats(struct mmc_card *card)
-{
- int i;
- int max_num_of_packed_reqs = 0;
-
- if ((!card) || (!card->wr_pack_stats.packing_events))
- return;
-
- max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
-
- spin_lock(&card->wr_pack_stats.lock);
-
- pr_info("%s: write packing statistics:\n",
- mmc_hostname(card->host));
-
- for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
- if (card->wr_pack_stats.packing_events[i] != 0)
- pr_info("%s: Packed %d reqs - %d times\n",
- mmc_hostname(card->host), i,
- card->wr_pack_stats.packing_events[i]);
- }
-
- pr_info("%s: stopped packing due to the following reasons:\n",
- mmc_hostname(card->host));
-
- if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
- pr_info("%s: %d times: exceedmax num of segments\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
- if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
- pr_info("%s: %d times: exceeding the max num of sectors\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
- if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
- pr_info("%s: %d times: wrong data direction\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
- if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
- pr_info("%s: %d times: flush or discard\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
- if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
- pr_info("%s: %d times: empty queue\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
- if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
- pr_info("%s: %d times: rel write\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
- if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
- pr_info("%s: %d times: Threshold\n",
- mmc_hostname(card->host),
- card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
-
- spin_unlock(&card->wr_pack_stats.lock);
-}
-EXPORT_SYMBOL(print_mmc_packing_stats);
-
-static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
-{
- struct request_queue *q = mq->queue;
- struct mmc_card *card = mq->card;
- struct request *cur = req, *next = NULL;
- struct mmc_blk_data *md = mq->data;
- bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
- unsigned int req_sectors = 0, phys_segments = 0;
- unsigned int max_blk_count, max_phys_segs;
- u8 put_back = 0;
- u8 max_packed_rw = 0;
- u8 reqs = 0;
- struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
-
- mmc_blk_clear_packed(mq->mqrq_cur);
-
- if (!(md->flags & MMC_BLK_CMD23) ||
- !card->ext_csd.packed_event_en)
- goto no_packed;
-
- if (!mq->wr_packing_enabled)
- goto no_packed;
-
- if ((rq_data_dir(cur) == WRITE) &&
- (card->host->caps2 & MMC_CAP2_PACKED_WR))
- max_packed_rw = card->ext_csd.max_packed_writes;
-
- if (max_packed_rw == 0)
- goto no_packed;
-
- if (mmc_req_rel_wr(cur) &&
- (md->flags & MMC_BLK_REL_WR) &&
- !en_rel_wr) {
- goto no_packed;
- }
-
- max_blk_count = min(card->host->max_blk_count,
- card->host->max_req_size >> 9);
- if (unlikely(max_blk_count > 0xffff))
- max_blk_count = 0xffff;
-
- max_phys_segs = queue_max_segments(q);
- req_sectors += blk_rq_sectors(cur);
- phys_segments += cur->nr_phys_segments;
-
- if (rq_data_dir(cur) == WRITE) {
- req_sectors++;
- phys_segments++;
- }
-
- spin_lock(&stats->lock);
-
- while (reqs < max_packed_rw - 1) {
- spin_lock_irq(q->queue_lock);
- next = blk_fetch_request(q);
- spin_unlock_irq(q->queue_lock);
- if (!next) {
- MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
- break;
- }
-
- if (next->cmd_flags & REQ_DISCARD ||
- next->cmd_flags & REQ_FLUSH) {
- MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
- put_back = 1;
- break;
- }
-
- if (rq_data_dir(cur) != rq_data_dir(next)) {
- MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
- put_back = 1;
- break;
- }
-
- if (mmc_req_rel_wr(next) &&
- (md->flags & MMC_BLK_REL_WR) &&
- !en_rel_wr) {
- MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
- put_back = 1;
- break;
- }
-
- req_sectors += blk_rq_sectors(next);
- if (req_sectors > max_blk_count) {
- if (stats->enabled)
- stats->pack_stop_reason[EXCEEDS_SECTORS]++;
- put_back = 1;
- break;
- }
-
- phys_segments += next->nr_phys_segments;
- if (phys_segments > max_phys_segs) {
- MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
- put_back = 1;
- break;
- }
-
- if (rq_data_dir(next) == WRITE)
- mq->num_of_potential_packed_wr_reqs++;
- list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
- cur = next;
- reqs++;
- }
-
- if (put_back) {
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(q, next);
- spin_unlock_irq(q->queue_lock);
- }
-
- if (stats->enabled) {
- if (reqs + 1 <= card->ext_csd.max_packed_writes)
- stats->packing_events[reqs + 1]++;
- if (reqs + 1 == max_packed_rw)
- MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
- }
-
- spin_unlock(&stats->lock);
-
- if (reqs > 0) {
- list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
- mq->mqrq_cur->packed_num = ++reqs;
- return reqs;
- }
-
-no_packed:
- mmc_blk_clear_packed(mq->mqrq_cur);
- return 0;
-}
-
-static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
- struct mmc_card *card,
- struct mmc_queue *mq)
-{
- struct mmc_blk_request *brq = &mqrq->brq;
- struct request *req = mqrq->req;
- struct request *prq;
- struct mmc_blk_data *md = mq->data;
- bool do_rel_wr;
- u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
- u8 i = 1;
-
- mqrq->packed_cmd = MMC_PACKED_WRITE;
- mqrq->packed_blocks = 0;
- mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
-
- memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
- packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
-
- /*
- * Argument for each entry of packed group
- */
- list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
- do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
- /* Argument of CMD23*/
- packed_cmd_hdr[(i * 2)] =
- (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
- blk_rq_sectors(prq);
- /* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
- mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
- mqrq->packed_blocks += blk_rq_sectors(prq);
- i++;
- }
-
- memset(brq, 0, sizeof(struct mmc_blk_request));
- brq->mrq.cmd = &brq->cmd;
- brq->mrq.data = &brq->data;
- brq->mrq.sbc = &brq->sbc;
- brq->mrq.stop = &brq->stop;
-
- brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1);
- brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
- brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
- brq->cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq->cmd.arg <<= 9;
- brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
- brq->data.blksz = 512;
- brq->data.blocks = mqrq->packed_blocks + 1;
- brq->data.flags |= MMC_DATA_WRITE;
- brq->data.fault_injected = false;
-
- brq->stop.opcode = MMC_STOP_TRANSMISSION;
- brq->stop.arg = 0;
- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-
- mmc_set_data_timeout(&brq->data, card);
-
- brq->data.sg = mqrq->sg;
- brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
-
- mqrq->mmc_active.mrq = &brq->mrq;
-
- /*
- * This is intended for packed commands tests usage - in case these
- * functions are not in use the respective pointers are NULL
- */
- if (mq->err_check_fn)
- mqrq->mmc_active.err_check = mq->err_check_fn;
- else
- mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
-
- if (mq->packed_test_fn)
- mq->packed_test_fn(mq->queue, mqrq);
-
- mmc_queue_bounce_pre(mqrq);
-}
-
static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
struct mmc_blk_request *brq, struct request *req,
int ret)
{
- struct mmc_queue_req *mq_rq;
- mq_rq = container_of(brq, struct mmc_queue_req, brq);
-
/*
* If this is an SD card and we're writing, we can first
* mark the known good sectors as ok.
@@ -1720,41 +1277,8 @@
ret = blk_end_request(req, 0, blocks << 9);
} else
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
- } else {
- if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
- ret = blk_end_request(req, 0, brq->data.bytes_xfered);
- }
- }
- return ret;
-}
-
-static int mmc_blk_end_packed_req(struct mmc_queue *mq,
- struct mmc_queue_req *mq_rq)
-{
- struct request *prq;
- int idx = mq_rq->packed_fail_idx, i = 0;
- int ret = 0;
-
- while (!list_empty(&mq_rq->packed_list)) {
- prq = list_entry_rq(mq_rq->packed_list.next);
- if (idx == i) {
- /* retry from error index */
- mq_rq->packed_num -= idx;
- mq_rq->req = prq;
- ret = 1;
-
- if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
- list_del_init(&prq->queuelist);
- mmc_blk_clear_packed(mq_rq);
- }
- return ret;
- }
- list_del_init(&prq->queuelist);
- blk_end_request(prq, 0, blk_rq_bytes(prq));
- i++;
- }
-
- mmc_blk_clear_packed(mq_rq);
+ } else
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
return ret;
}
@@ -1766,24 +1290,15 @@
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req, *prq;
+ struct request *req;
struct mmc_async_req *areq;
- const u8 packed_num = 2;
- u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
- if (rqc)
- reqs = mmc_blk_prep_packed_list(mq, rqc);
-
do {
if (rqc) {
- if (reqs >= packed_num)
- mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
- card, mq);
- else
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
@@ -1804,15 +1319,8 @@
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
-
- if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
- ret = mmc_blk_end_packed_req(mq, mq_rq);
- break;
- } else {
- ret = blk_end_request(req, 0,
+ ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
- }
-
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1845,8 +1353,7 @@
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV ||
- mq_rq->packed_cmd != MMC_PACKED_NONE)
+ if (err == -ENODEV)
goto cmd_abort;
/* Fall through */
}
@@ -1873,62 +1380,26 @@
}
if (ret) {
- if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
- } else {
- mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
- mmc_start_req(card->host,
- &mq_rq->mmc_active, NULL);
- }
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
}
} while (ret);
return 1;
cmd_abort:
- if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = blk_end_request(req, -EIO,
- blk_rq_cur_bytes(req));
- } else {
- while (!list_empty(&mq_rq->packed_list)) {
- prq = list_entry_rq(mq_rq->packed_list.next);
- list_del_init(&prq->queuelist);
- blk_end_request(prq, -EIO, blk_rq_bytes(prq));
- }
- mmc_blk_clear_packed(mq_rq);
- }
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
start_new_req:
if (rqc) {
- /*
- * If current request is packed, it needs to put back.
- */
- if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
- while (!list_empty(&mq->mqrq_cur->packed_list)) {
- prq = list_entry_rq(
- mq->mqrq_cur->packed_list.prev);
- if (prq->queuelist.prev !=
- &mq->mqrq_cur->packed_list) {
- list_del_init(&prq->queuelist);
- spin_lock_irq(mq->queue->queue_lock);
- blk_requeue_request(mq->queue, prq);
- spin_unlock_irq(mq->queue->queue_lock);
- } else {
- list_del_init(&prq->queuelist);
- }
- }
- mmc_blk_clear_packed(mq->mqrq_cur);
- }
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
}
@@ -1962,8 +1433,6 @@
goto out;
}
- mmc_blk_write_packing_control(mq, req);
-
if (req && req->cmd_flags & REQ_SANITIZE) {
/* complete ongoing async transfer before issuing sanitize */
if (card->host && card->host->areq)
@@ -2195,8 +1664,6 @@
if (md) {
card = md->queue.card;
- device_remove_file(disk_to_dev(md->disk),
- &md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2264,16 +1731,6 @@
goto power_ro_lock_fail;
}
- md->num_wr_reqs_to_start_packing.show =
- num_wr_reqs_to_start_packing_show;
- md->num_wr_reqs_to_start_packing.store =
- num_wr_reqs_to_start_packing_store;
- sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
- md->num_wr_reqs_to_start_packing.attr.name =
- "num_wr_reqs_to_start_packing";
- md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
- ret = device_create_file(disk_to_dev(md->disk),
- &md->num_wr_reqs_to_start_packing);
if (ret)
goto power_ro_lock_fail;