summaryrefslogtreecommitdiff
path: root/drivers/mmc/core/block.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-06-15 14:06:37 +0300
committerLeonard Crestez <leonard.crestez@nxp.com>2018-08-24 12:41:33 +0300
commit18cba3d375cc0ac993365df4ca0ad9a26462b606 (patch)
tree3c6c4c25dc89174bf7151a61c0cd5937c942782c /drivers/mmc/core/block.c
parent81722c4a78f2b68ce2a1c82f4aef355a47668f2c (diff)
MLK-16155-9 mmc: block: Add CQE support
Add CQE support to the block driver, including: - optionally using DCMD for flush requests - manually issuing discard requests - issuing read / write requests to the CQE - supporting block-layer timeouts - handling recovery - supporting re-tuning Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Haibo Chen <haibo.chen@nxp.com> Reviewed-by: Haibo Chen <haibo.chen@nxp.com>
Diffstat (limited to 'drivers/mmc/core/block.c')
-rw-r--r--drivers/mmc/core/block.c183
1 files changed, 182 insertions, 1 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index fb7add178dd5..1d5e9e0f0c39 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -110,6 +110,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -1559,6 +1560,186 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
*do_data_tag_p = do_data_tag;
}
+#define MMC_CQE_RETRIES 2
+
+void mmc_blk_cqe_complete_rq(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->cqe_in_flight[mmc_cqe_issue_type(host, req)] -= 1;
+
+ put_card = mmc_cqe_tot_in_flight(mq) == 0;
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_requeue_request(q, req);
+ else
+ __blk_end_request_all(req, -EIO);
+ } else if (mrq->data) {
+ if (__blk_end_request(req, 0, mrq->data->bytes_xfered))
+ blk_requeue_request(q, req);
+ } else {
+ __blk_end_request_all(req, 0);
+ }
+
+ mmc_cqe_kick_queue(mq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ mmc_get_card(card);
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ mq->cqe_in_recovery = true;
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ else
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ mq->cqe_in_recovery = false;
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+
+ mmc_put_card(card);
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mqrq->req;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->cqe_in_recovery)
+ mmc_blk_cqe_complete_rq(req);
+ else
+ blk_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
+enum mmc_issued mmc_blk_cqe_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ int ret;
+
+ ret = mmc_blk_part_switch(card, md);
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
+
+ switch (mmc_cqe_issue_type(host, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = host->cqe_ops->cqe_wait_for_idle(host);
+ if (ret)
+ return MMC_REQ_BUSY;
+ if (req && req_op(req) == REQ_OP_DISCARD) {
+ mmc_blk_issue_discard_rq(mq, req);
+ } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
+ mmc_blk_issue_secdiscard_rq(mq, req);
+ } else if (req && req_op(req) == REQ_OP_FLUSH) {
+ mmc_blk_issue_flush(mq, req);
+ } else {
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ if (req && req_op(req) == REQ_OP_FLUSH) {
+ ret = mmc_blk_cqe_issue_flush(mq, req);
+ } else if (req && ((req_op(req) == REQ_OP_READ) || (req_op(req) == REQ_OP_WRITE))) {
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ } else {
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ }
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1972,7 +2153,7 @@ again:
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname, area_type);
if (ret)
goto err_putdisk;