summaryrefslogtreecommitdiff
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c109
1 files changed, 51 insertions, 58 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ea70e8a03a14..0d2bcdb64756 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -427,14 +427,6 @@ static const struct block_device_operations mmc_bdops = {
#endif
};
-struct mmc_blk_request {
- struct mmc_request mrq;
- struct mmc_command sbc;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-};
-
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
@@ -824,7 +816,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- struct mmc_blk_request brq;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
int ret = 1, disable_multi = 0, retry = 0;
/*
@@ -839,60 +831,60 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
do {
u32 readcmd, writecmd;
- memset(&brq, 0, sizeof(struct mmc_blk_request));
- brq.mrq.cmd = &brq.cmd;
- brq.mrq.data = &brq.data;
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
- brq.cmd.arg = blk_rq_pos(req);
+ brq->cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
- brq.cmd.arg <<= 9;
- brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 512;
- brq.stop.opcode = MMC_STOP_TRANSMISSION;
- brq.stop.arg = 0;
- brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = blk_rq_sectors(req);
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq->data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
* requests.
*/
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
/*
* After a read error, we redo the request one sector at a time
* in order to accurately determine which sectors can be read
* successfully.
*/
- if (disable_multi && brq.data.blocks > 1)
- brq.data.blocks = 1;
+ if (disable_multi && brq->data.blocks > 1)
+ brq->data.blocks = 1;
- if (brq.data.blocks > 1 || do_rel_wr) {
+ if (brq->data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
* token, not a STOP_TRANSMISSION request.
*/
if (!mmc_host_is_spi(card->host) ||
rq_data_dir(req) == READ)
- brq.mrq.stop = &brq.stop;
+ brq->mrq.stop = &brq->stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
- brq.mrq.stop = NULL;
+ brq->mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = readcmd;
- brq.data.flags |= MMC_DATA_READ;
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
} else {
- brq.cmd.opcode = writecmd;
- brq.data.flags |= MMC_DATA_WRITE;
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
}
if (do_rel_wr)
- mmc_apply_rel_rw(&brq, card, req);
+ mmc_apply_rel_rw(brq, card, req);
/*
* Pre-defined multi-block transfers are preferable to
@@ -914,29 +906,29 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
*/
if ((md->flags & MMC_BLK_CMD23) &&
- mmc_op_multi(brq.cmd.opcode) &&
+ mmc_op_multi(brq->cmd.opcode) &&
(do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
- brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq.sbc.arg = brq.data.blocks |
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0);
- brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
- brq.mrq.sbc = &brq.sbc;
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
}
- mmc_set_data_timeout(&brq.data, card);
+ mmc_set_data_timeout(&brq->data, card);
- brq.data.sg = mq->sg;
- brq.data.sg_len = mmc_queue_map_sg(mq);
+ brq->data.sg = mq->mqrq_cur->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mq->mqrq_cur);
/*
* Adjust the sg list so it is the same size as the
* request.
*/
- if (brq.data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq.data.blocks << 9;
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
struct scatterlist *sg;
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
data_size -= sg->length;
if (data_size <= 0) {
sg->length += data_size;
@@ -944,14 +936,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
break;
}
}
- brq.data.sg_len = i;
+ brq->data.sg_len = i;
}
- mmc_queue_bounce_pre(mq);
+ mmc_queue_bounce_pre(mq->mqrq_cur);
- mmc_wait_for_req(card->host, &brq.mrq);
+ mmc_wait_for_req(card->host, &brq->mrq);
- mmc_queue_bounce_post(mq);
+ mmc_queue_bounce_post(mq->mqrq_cur);
/*
* sbc.error indicates a problem with the set block count
@@ -963,8 +955,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
* stop.error indicates a problem with the stop command. Data
* may have been transferred, or may still be transferring.
*/
- if (brq.sbc.error || brq.cmd.error || brq.stop.error) {
- switch (mmc_blk_cmd_recovery(card, req, &brq)) {
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq)) {
case ERR_RETRY:
if (retry++ < 5)
continue;
@@ -980,9 +972,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
* initial command - such as address errors. No data
* has been transferred.
*/
- if (brq.cmd.resp[0] & CMD_ERRORS) {
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
pr_err("%s: r/w command failed, status = %#x\n",
- req->rq_disk->disk_name, brq.cmd.resp[0]);
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
goto cmd_abort;
}
@@ -1009,15 +1001,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
(R1_CURRENT_STATE(status) == R1_STATE_PRG));
}
- if (brq.data.error) {
+ if (brq->data.error) {
pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.data.error,
+ req->rq_disk->disk_name, brq->data.error,
(unsigned)blk_rq_pos(req),
(unsigned)blk_rq_sectors(req),
- brq.cmd.resp[0], brq.stop.resp[0]);
+ brq->cmd.resp[0], brq->stop.resp[0]);
if (rq_data_dir(req) == READ) {
- if (brq.data.blocks > 1) {
+ if (brq->data.blocks > 1) {
/* Redo read one sector at a time */
pr_warning("%s: retrying using single block read\n",
req->rq_disk->disk_name);
@@ -1031,7 +1023,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
* read a single sector.
*/
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO, brq.data.blksz);
+ ret = __blk_end_request(req, -EIO,
+ brq->data.blksz);
spin_unlock_irq(&md->lock);
continue;
} else {
@@ -1043,7 +1036,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
* A block was successfully transferred.
*/
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
} while (ret);
@@ -1069,7 +1062,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
}
} else {
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
}