summaryrefslogtreecommitdiff
path: root/drivers/mmc/core
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-06-15 14:06:36 +0300
committerLeonard Crestez <leonard.crestez@nxp.com>2018-08-24 12:41:33 +0300
commit81722c4a78f2b68ce2a1c82f4aef355a47668f2c (patch)
tree12f5ee396a96461c47ed911ae7d5204e56e87ab5 /drivers/mmc/core
parent9ce82ca289520ecf1c5e97ea7e140d296ec3fbc5 (diff)
MLK-16155-8 mmc: block: Prepare CQE data
Enhance mmc_blk_data_prep() to support CQE requests. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Tested-by: Haibo Chen <haibo.chen@nxp.com> Reviewed-by: Haibo Chen <haibo.chen@nxp.com>
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r--drivers/mmc/core/block.c45
1 files changed, 34 insertions, 11 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b07bcc7d04bc..fb7add178dd5 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -36,6 +36,7 @@
#include <linux/compat.h>
#include <linux/pm_runtime.h>
#include <linux/idr.h>
+#include <linux/ioprio.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -1432,25 +1433,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr,
- bool *do_data_tag)
+ int disable_multi, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
+ bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- rq_data_dir(req) == WRITE &&
- (md->flags & MMC_BLK_REL_WR);
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1465,6 +1468,15 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we give priority to IOPRIO_CLASS_RT.
+ */
+ if (IOPRIO_PRIO_CLASS(req_get_ioprio(req)) == IOPRIO_CLASS_RT)
+ brq->data.flags |= MMC_DATA_PRIO;
/*
* The block layer doesn't support all sector count
@@ -1494,18 +1506,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks);
}
- if (*do_rel_wr)
+ if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
- *do_data_tag = card->ext_csd.data_tag_unit_size &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
@@ -1534,6 +1551,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
mqrq->areq.mrq = &brq->mrq;
mmc_queue_bounce_pre(mqrq);
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,