summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornaveenk <naveenk@nvidia.com>2012-08-06 20:03:12 +0530
committerVarun Colbert <vcolbert@nvidia.com>2012-08-20 17:55:48 -0700
commitfa3a748000b65ad4854a95761b6067f89f1c4c61 (patch)
tree9a9949a3acad2f2dc135facdab611be87edd00b0
parent2f20f517c1cd40d70d088828258e03d6b52c3996 (diff)
mmc: core: Support packed command for eMMC4.5 device
Packed command support for eMMC 4.5 device. Bug 837103 Change-Id: I155ecde2b90b73f6c972816d6a4ef0fb2444763c Signed-off-by: Seungwon Jeon <tgih.jun <at> samsung.com Signed-off-by: naveen kumar arepalli <naveenk@nvidia.com> Reviewed-on: http://git-master/r/121328 Reviewed-by: Varun Colbert <vcolbert@nvidia.com> Tested-by: Varun Colbert <vcolbert@nvidia.com>
-rw-r--r--drivers/mmc/card/block.c459
-rw-r--r--drivers/mmc/card/queue.c50
-rw-r--r--drivers/mmc/card/queue.h14
-rw-r--r--drivers/mmc/core/mmc.c24
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/core.h3
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/mmc/mmc.h16
9 files changed, 546 insertions, 25 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b7c16d4ef616..c5cfb1442bb4 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -79,6 +79,15 @@ static DECLARE_BITMAP(name_use, 256);
/*
* There is one mmc_blk_data per slot.
*/
+
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_RD 0x01
+#define PACKED_CMD_WR 0x02
+
+
struct mmc_blk_data {
spinlock_t lock;
struct gendisk *disk;
@@ -98,7 +107,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
-
+#define MMC_BLK_WR_HDR BIT(4)
/*
* Only set in main mmc_blk_data associated
* with mmc_card with mmc_set_drvdata, and keeps
@@ -1049,7 +1058,9 @@ static int mmc_blk_err_check(struct mmc_card *card,
* kind. If it was a write, we may have transitioned to
* program mode, which we have to wait for it to complete.
*/
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ if ((!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) ||
+ (mq_mrq->packed_cmd == MMC_PACKED_WR_HDR)) {
+
u32 status;
do {
int err = get_card_status(card, &status, 5);
@@ -1086,12 +1097,61 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (!brq->data.bytes_xfered)
return MMC_BLK_RETRY;
+ if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
+
if (blk_rq_bytes(req) != brq->data.bytes_xfered)
return MMC_BLK_PARTIAL;
return MMC_BLK_SUCCESS;
}
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ int err, check, status;
+ u8 ext_csd[512];
+
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXP_EVENT) {
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ mq_rq->packed_fail_idx =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ return MMC_BLK_PARTIAL;
+ }
+ }
+ }
+
+ return check;
+}
+
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1275,6 +1335,283 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
return ret;
}
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ u8 put_back = 0;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+
+ mq->mqrq_cur->packed_num = 0;
+
+ if (!(md->flags & MMC_BLK_CMD23) ||
+ !card->ext_csd.packed_event_en)
+ goto no_packed;
+
+ if (rq_data_dir(cur) == READ)
+ max_packed_rw = card->ext_csd.max_packed_reads;
+ else
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) &&
+ !en_rel_wr) {
+ goto no_packed;
+ }
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += req->nr_phys_segments;
+
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors++;
+ phys_segments++;
+ }
+
+ while (reqs < max_packed_rw - 1) {
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next)
+ break;
+
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH) {
+ put_back = 1;
+ break;
+ }
+
+ if (rq_data_dir(cur) != rq_data_dir(next)) {
+ put_back = 1;
+ break;
+ }
+
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) &&
+ !en_rel_wr) {
+ put_back = 1;
+ break;
+ }
+
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count) {
+ put_back = 1;
+ break;
+ }
+
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs) {
+ put_back = 1;
+ break;
+ }
+
+ list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
+ cur = next;
+ reqs++;
+ }
+
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
+ mq->mqrq_cur->packed_num = ++reqs;
+ return reqs;
+ }
+
+no_packed:
+ mq->mqrq_cur->packed_cmd = MMC_PACKED_NONE;
+ mq->mqrq_cur->packed_num = 0;
+ return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq,
+ u8 reqs)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ bool do_rel_wr;
+ u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+ u8 i = 1;
+
+ mqrq->packed_cmd = (rq_data_dir(req) == READ) ?
+ MMC_PACKED_WR_HDR : MMC_PACKED_WRITE;
+ mqrq->packed_blocks = 0;
+ mqrq->packed_fail_idx = -1;
+
+ memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
+ packed_cmd_hdr[0] = (reqs << 16) |
+ (((rq_data_dir(req) == READ) ?
+ PACKED_CMD_RD : PACKED_CMD_WR) << 8) |
+ PACKED_CMD_VER;
+
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ /* Argument of CMD23*/
+ packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ mqrq->packed_blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ ((rq_data_dir(req) == READ) ? 1 : mqrq->packed_blocks + 1);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ /*
+ * Write separately the packd command header only for packed read.
+ * In case of packed write, header is sent with blocks of data.
+ */
+ brq->data.blocks = (rq_data_dir(req) == READ) ?
+ 1 : mqrq->packed_blocks + 1;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static void mmc_blk_packed_rrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+
+ mqrq->packed_cmd = MMC_PACKED_READ;
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.stop = &brq->stop;
+
+ brq->cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->data.blocks = mqrq->packed_blocks;
+ brq->data.flags |= MMC_DATA_READ;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+
+static int mmc_blk_chk_hdr_err(struct mmc_queue *mq, int status)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int type = MMC_BLK_WR_HDR;
+ int err;
+
+ switch (status) {
+ case MMC_BLK_PARTIAL:
+ case MMC_BLK_RETRY:
+ err = 0;
+ break;
+ case MMC_BLK_CMD_ERR:
+ case MMC_BLK_ABORT:
+ case MMC_BLK_DATA_ERR:
+ case MMC_BLK_ECC_ERR:
+ err = mmc_blk_reset(md, card->host, type);
+ break;
+ }
+
+ return err;
+}
+
+static int mmc_blk_issue_packed_rd(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int status, ret = -EIO, retry = 2;
+
+ do {
+ mmc_start_req(card->host, NULL, (int *) &status);
+ if (status) {
+ ret = mmc_blk_chk_hdr_err(mq, status);
+ if (ret)
+ break;
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq, mq_rq->packed_num);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ } else {
+ mmc_blk_packed_rrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ ret = 0;
+ break;
+ }
+ } while (retry-- > 0);
+
+ return ret;
+}
+
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
@@ -1283,21 +1620,32 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
+ struct request *req, *prq;
struct mmc_async_req *areq;
+ u8 reqs = 0;
if (!rqc && !mq->mqrq_prev->req)
return 0;
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
do {
if (rqc) {
- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ if (reqs >= 2)
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq, reqs);
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status);
- if (!areq)
- return 0;
+ if (!areq) {
+ if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR)
+ goto snd_packed_rd;
+ else
+ return 0;
+ }
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
@@ -1312,10 +1660,34 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0,
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
+ int idx = mq_rq->packed_fail_idx, i = 0;
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ if (idx == i) {
+ /* retry from error index */
+ mq_rq->packed_num -= idx;
+ if (mq_rq->packed_num == 1)
+ mq_rq->packed_cmd = MMC_PACKED_NONE;
+ mq_rq->req = prq;
+ ret = 1;
+ break;
+ }
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(prq, 0, blk_rq_bytes(prq));
+ spin_unlock_irq(&md->lock);
+ i++;
+ }
+ if (idx == -1)
+ mq_rq->packed_num = 0;
+ break;
+ } else {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0,
brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ spin_unlock_irq(&md->lock);
+ }
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1367,40 +1739,83 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
*/
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, -EIO,
- brq->data.blksz);
+ brq->data.blksz);
spin_unlock_irq(&md->lock);
if (!ret)
goto start_new_req;
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE)
+ break;
break;
case MMC_BLK_NOMEDIUM:
goto cmd_abort;
}
if (ret) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
- mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ } else {
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq, mq_rq->packed_num);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ if (mq_rq->packed_cmd == MMC_PACKED_WR_HDR) {
+ if (mmc_blk_issue_packed_rd(mq, mq_rq))
+ goto cmd_abort;
+ }
+ }
+
}
} while (ret);
if (brq->cmd.resp[0] & R1_URGENT_BKOPS)
mmc_card_set_need_bkops(card);
+snd_packed_rd:
+ if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR) {
+ if (mmc_blk_issue_packed_rd(mq, mq->mqrq_cur))
+ goto start_new_req;
+ }
+
return 1;
cmd_abort:
- spin_lock_irq(&md->lock);
- if (mmc_card_removed(card))
- req->cmd_flags |= REQ_QUIET;
- while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
- spin_unlock_irq(&md->lock);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ spin_lock_irq(&md->lock);
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ spin_unlock_irq(&md->lock);
+ } else {
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(&md->lock);
+ __blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ spin_unlock_irq(&md->lock);
+ }
+ }
start_new_req:
if (rqc) {
+ /*
+ * If current request is packed, it need to put back.
+ */
+ if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
+ while (!list_empty(&mq->mqrq_cur->packed_list)) {
+ prq = list_entry_rq(mq->mqrq_cur->packed_list.prev);
+ if (prq->queuelist.prev != &mq->mqrq_cur->packed_list) {
+ list_del_init(&prq->queuelist);
+ blk_requeue_request(mq->queue, prq);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+ }
+
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 87186868ae6d..d584d1f3ef8c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -183,6 +183,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+ INIT_LIST_HEAD(&mqrq_cur->packed_list);
+ INIT_LIST_HEAD(&mqrq_prev->packed_list);
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
@@ -383,6 +385,40 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq,
+ struct scatterlist *sg)
+{
+ struct scatterlist *__sg;
+ unsigned int sg_len = 0;
+ struct request *req;
+ enum mmc_packed_cmd cmd;
+
+ cmd = mqrq->packed_cmd;
+
+ if (cmd == MMC_PACKED_WR_HDR || cmd == MMC_PACKED_WRITE) {
+ __sg = sg;
+ sg_set_buf(__sg, mqrq->packed_cmd_hdr,
+ sizeof(mqrq->packed_cmd_hdr));
+ sg_len++;
+ if (cmd == MMC_PACKED_WR_HDR) {
+ sg_mark_end(__sg);
+ return sg_len;
+ }
+ __sg->page_link &= ~0x02;
+ }
+
+ __sg = sg + sg_len;
+ list_for_each_entry(req, &mqrq->packed_list, queuelist) {
+ sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+ __sg = sg + (sg_len - 1);
+ (__sg++)->page_link &= ~0x02;
+ }
+ sg_mark_end(sg + (sg_len - 1));
+ return sg_len;
+}
+
+
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
@@ -393,12 +429,20 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
struct scatterlist *sg;
int i;
- if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ if (!mqrq->bounce_buf) {
+ if (!list_empty(&mqrq->packed_list))
+ return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
+ else
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ }
+
BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+ if (!list_empty(&mqrq->packed_list))
+ sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
+ else
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4b9f9f..4424280c0394 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -12,6 +12,14 @@ struct mmc_blk_request {
struct mmc_data data;
};
+enum mmc_packed_cmd {
+ MMC_PACKED_NONE = 0,
+ MMC_PACKED_WR_HDR,
+ MMC_PACKED_WRITE,
+ MMC_PACKED_READ,
+};
+
+
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
@@ -20,6 +28,12 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req mmc_active;
+ struct list_head packed_list;
+ u32 packed_cmd_hdr[128];
+ unsigned int packed_blocks;
+ enum mmc_packed_cmd packed_cmd;
+ int packed_fail_idx;
+ u8 packed_num;
};
struct mmc_queue {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 7b81847e904b..4c3f74b04c99 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -481,6 +481,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.boot_ro_lockable = true;
}
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
+
if (card->ext_csd.rev >= 5) {
/* check whether the eMMC card supports HPI */
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
@@ -1327,6 +1332,25 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
+ if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+ (card->ext_csd.max_packed_writes > 0) &&
+ (card->ext_csd.max_packed_reads > 0)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
if (!oldcard)
host->card = card;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index ea9076608b8c..7a899ccb04f1 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -335,6 +335,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512);
}
+EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 6755442e46d8..c7d90bd3f3bf 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -54,6 +54,9 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
+ u8 max_packed_writes;
+ u8 max_packed_reads;
+ u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index ea6f1befbfc9..6224c611b5d0 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -22,6 +22,8 @@ struct mmc_request;
struct mmc_command {
u32 opcode;
u32 arg;
+#define MMC_CMD23_ARG_REL_WR (1 << 31)
+#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
u32 resp[4];
unsigned int flags; /* expected response type */
#define MMC_RSP_PRESENT (1 << 0)
@@ -150,6 +152,7 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
+extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index dd458b3d202b..023cb6f66504 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -236,6 +236,7 @@ struct mmc_host {
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
MMC_CAP2_HS200_1_2V_SDR)
+#define MMC_CAP2_PACKED_CMD (1 << 7) /* Allow packed command */
#define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */
#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */
#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index cb0b03e7d425..af6ab2969bed 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -139,6 +139,7 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
+#define R1_EXP_EVENT (1 << 6) /* sr, a */
#define R1_URGENT_BKOPS (1 << 6) /* sr, a */
#define R1_APP_CMD (1 << 5) /* sr, c */
@@ -275,6 +276,10 @@ struct _mmc_csd {
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
+#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
+#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
+#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
+#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
@@ -329,11 +334,22 @@ struct _mmc_csd {
#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
+#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
+#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
+#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
/*
* EXT_CSD field definitions
*/
+#define EXT_CSD_PACKED_EVENT_EN (1 << 3)
+
+#define EXT_CSD_PACKED_FAILURE (1 << 3)
+
+#define EXT_CSD_PACKED_GENERIC_ERROR (1 << 0)
+#define EXT_CSD_PACKED_INDEXED_ERROR (1 << 1)
+
+
#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)