summaryrefslogtreecommitdiff
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c712
1 files changed, 646 insertions, 66 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 61d233a7c118..71da5641e258 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -31,7 +31,11 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -48,6 +52,13 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -64,6 +75,7 @@ static int max_devices;
/* 256 minors, so at most 256 separate devices */
static DECLARE_BITMAP(dev_use, 256);
+static DECLARE_BITMAP(name_use, 256);
/*
* There is one mmc_blk_data per slot.
@@ -72,9 +84,24 @@ struct mmc_blk_data {
spinlock_t lock;
struct gendisk *disk;
struct mmc_queue queue;
+ struct list_head part;
+
+ unsigned int flags;
+#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
unsigned int usage;
unsigned int read_only;
+ unsigned int part_type;
+ unsigned int name_idx;
+
+ /*
+ * Only set in main mmc_blk_data associated
+ * with mmc_card with mmc_set_drvdata, and keeps
+ * track of the current selected device partition.
+ */
+ unsigned int part_curr;
+ struct device_attribute force_ro;
};
static DEFINE_MUTEX(open_lock);
@@ -97,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
return md;
}
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+ int devmaj = MAJOR(disk_devt(disk));
+ int devidx = MINOR(disk_devt(disk)) / perdev_minors;
+
+ if (!devmaj)
+ devidx = disk->first_minor / perdev_minors;
+ return devidx;
+}
+
static void mmc_blk_put(struct mmc_blk_data *md)
{
mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
- int devmaj = MAJOR(disk_devt(md->disk));
- int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = md->disk->first_minor / perdev_minors;
-
+ int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
__clear_bit(devidx, dev_use);
@@ -118,6 +150,38 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+ ret = snprintf(buf, PAGE_SIZE, "%d",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ char *end;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ unsigned long set = simple_strtoul(buf, &end, 0);
+ if (end == buf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_disk_ro(dev_to_disk(dev), set || md->read_only);
+ ret = count;
+out:
+ mmc_blk_put(md);
+ return ret;
+}
+
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -158,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ struct mmc_ioc_cmd __user *user)
+{
+ struct mmc_blk_ioc_data *idata;
+ int err;
+
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+ err = -EFAULT;
+ goto idata_err;
+ }
+
+ idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+ if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+ err = -EOVERFLOW;
+ goto idata_err;
+ }
+
+ idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+ if (!idata->buf) {
+ err = -ENOMEM;
+ goto idata_err;
+ }
+
+ if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+ idata->ic.data_ptr, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto copy_err;
+ }
+
+ return idata;
+
+copy_err:
+ kfree(idata->buf);
+idata_err:
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ struct mmc_ioc_cmd __user *ic_ptr)
+{
+ struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {0};
+ struct scatterlist sg;
+ int err;
+
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
+ idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ err = -EINVAL;
+ goto cmd_done;
+ }
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ mmc_claim_host(card->host);
+
+ if (idata->ic.is_acmd) {
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ goto cmd_rel_host;
+ }
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the host driver
+ * to compute timeout. When all host drivers support
+ * cmd.cmd_timeout for R1B, this can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ err = cmd.error;
+ goto cmd_rel_host;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ err = data.error;
+ goto cmd_rel_host;
+ }
+
+ /*
+ * According to the SD specs, some commands require a delay after
+ * issuing the command.
+ */
+ if (idata->ic.postsleep_min_us)
+ usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+ if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+
+ if (!idata->ic.write_flag) {
+ if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+ idata->buf, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+ }
+
+cmd_rel_host:
+ mmc_release_host(card->host);
+
+cmd_done:
+ mmc_blk_put(md);
+ kfree(idata->buf);
+ kfree(idata);
+ return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -EINVAL;
+ if (cmd == MMC_IOC_CMD)
+ ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
+ .ioctl = mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_blk_compat_ioctl,
+#endif
};
struct mmc_blk_request {
struct mmc_request mrq;
+ struct mmc_command sbc;
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
};
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ int ret;
+ struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+ if (main_md->part_curr == md->part_type)
+ return 0;
+
+ if (mmc_card_mmc(card)) {
+ card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ card->ext_csd.part_config |= md->part_type;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
+ card->ext_csd.part_time);
+ if (ret)
+ return ret;
+}
+
+ main_md->part_curr = md->part_type;
+ return 0;
+}
+
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
int err;
u32 result;
__be32 *blocks;
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_data data;
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
unsigned int timeout_us;
struct scatterlist sg;
- memset(&cmd, 0, sizeof(struct mmc_command));
-
cmd.opcode = MMC_APP_CMD;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -203,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- memset(&data, 0, sizeof(struct mmc_data));
-
data.timeout_ns = card->csd.tacc_ns * 100;
data.timeout_clks = card->csd.tacc_clks * 100;
@@ -223,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
data.sg = &sg;
data.sg_len = 1;
- memset(&mrq, 0, sizeof(struct mmc_request));
-
mrq.cmd = &cmd;
mrq.data = &data;
@@ -247,10 +527,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
static u32 get_card_status(struct mmc_card *card, struct request *req)
{
- struct mmc_command cmd;
+ struct mmc_command cmd = {0};
int err;
- memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
@@ -269,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
unsigned int from, nr, arg;
int err = 0;
- mmc_claim_host(card->host);
-
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
goto out;
@@ -284,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
else
arg = MMC_ERASE_ARG;
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0);
+ if (err)
+ goto out;
+ }
err = mmc_erase(card, from, nr, arg);
out:
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
- mmc_release_host(card->host);
-
return err ? 0 : 1;
}
@@ -303,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
unsigned int from, nr, arg;
int err = 0;
- mmc_claim_host(card->host);
-
if (!mmc_can_secure_erase_trim(card)) {
err = -EOPNOTSUPP;
goto out;
@@ -318,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
else
arg = MMC_SECURE_ERASE_ARG;
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ 0);
+ if (err)
+ goto out;
+ }
err = mmc_erase(card, from, nr, arg);
- if (!err && arg == MMC_SECURE_TRIM1_ARG)
+ if (!err && arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0);
+ if (err)
+ goto out;
+ }
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+ }
out:
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
- mmc_release_host(card->host);
-
return err ? 0 : 1;
}
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+
+ /*
+ * No-op, only service this because we need REQ_FUA for reliable
+ * writes.
+ */
+ spin_lock_irq(&md->lock);
+ __blk_end_request_all(req, 0);
+ spin_unlock_irq(&md->lock);
+
+ return 1;
+}
+
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+ struct mmc_card *card,
+ struct request *req)
+{
+ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+ /* Legacy mode imposes restrictions on transfers. */
+ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > card->ext_csd.rel_sectors)
+ brq->data.blocks = card->ext_csd.rel_sectors;
+ else if (brq->data.blocks < card->ext_csd.rel_sectors)
+ brq->data.blocks = 1;
+ }
+}
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
@@ -338,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_request brq;
int ret = 1, disable_multi = 0;
- mmc_claim_host(card->host);
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
+ */
+ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+ (req->cmd_flags & REQ_META)) &&
+ (rq_data_dir(req) == WRITE) &&
+ (md->flags & MMC_BLK_REL_WR);
do {
- struct mmc_command cmd;
+ struct mmc_command cmd = {0};
u32 readcmd, writecmd, status = 0;
memset(&brq, 0, sizeof(struct mmc_blk_request));
@@ -374,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
if (disable_multi && brq.data.blocks > 1)
brq.data.blocks = 1;
- if (brq.data.blocks > 1) {
+ if (brq.data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
* token, not a STOP_TRANSMISSION request.
*/
- if (!mmc_host_is_spi(card->host)
- || rq_data_dir(req) == READ)
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
brq.mrq.stop = &brq.stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
@@ -396,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
brq.data.flags |= MMC_DATA_WRITE;
}
+ if (do_rel_wr)
+ mmc_apply_rel_rw(&brq, card, req);
+
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
+
+ if ((md->flags & MMC_BLK_CMD23) &&
+ mmc_op_multi(brq.cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq.sbc.arg = brq.data.blocks |
+ (do_rel_wr ? (1 << 31) : 0);
+ brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq.mrq.sbc = &brq.sbc;
+ }
+
mmc_set_data_timeout(&brq.data, card);
brq.data.sg = mq->sg;
@@ -431,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
* until later as we need to wait for the card to leave
* programming mode even when things go wrong.
*/
- if (brq.cmd.error || brq.data.error || brq.stop.error) {
+ if (brq.sbc.error || brq.cmd.error ||
+ brq.data.error || brq.stop.error) {
if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
/* Redo read one sector at a time */
printk(KERN_WARNING "%s: retrying using single "
@@ -442,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
status = get_card_status(card, req);
}
+ if (brq.sbc.error) {
+ printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
+ "command, response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq.sbc.error,
+ brq.sbc.resp[0], status);
+ }
+
if (brq.cmd.error) {
printk(KERN_ERR "%s: error %d sending read/write "
"command, response %#x, card status %#x\n",
@@ -520,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
spin_unlock_irq(&md->lock);
} while (ret);
- mmc_release_host(card->host);
-
return 1;
cmd_err:
@@ -548,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
spin_unlock_irq(&md->lock);
}
- mmc_release_host(card->host);
-
spin_lock_irq(&md->lock);
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
@@ -560,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
+ int ret;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+
+ mmc_claim_host(card->host);
+ ret = mmc_blk_part_switch(card, md);
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
if (req->cmd_flags & REQ_DISCARD) {
if (req->cmd_flags & REQ_SECURE)
- return mmc_blk_issue_secdiscard_rq(mq, req);
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
- return mmc_blk_issue_discard_rq(mq, req);
+ ret = mmc_blk_issue_discard_rq(mq, req);
+ } else if (req->cmd_flags & REQ_FLUSH) {
+ ret = mmc_blk_issue_flush(mq, req);
} else {
- return mmc_blk_issue_rw_rq(mq, req);
+ ret = mmc_blk_issue_rw_rq(mq, req);
}
+
+out:
+ mmc_release_host(card->host);
+ return ret;
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -576,7 +974,11 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
-static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ struct device *parent,
+ sector_t size,
+ bool default_ro,
+ const char *subname)
{
struct mmc_blk_data *md;
int devidx, ret;
@@ -592,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
goto out;
}
+ /*
+ * !subname implies we are creating main mmc_blk_data that will be
+ * associated with mmc_card with mmc_set_drvdata. Due to device
+ * partitions, devidx will not coincide with a per-physical card
+ * index anymore so we keep track of a name index.
+ */
+ if (!subname) {
+ md->name_idx = find_first_zero_bit(name_use, max_devices);
+ __set_bit(md->name_idx, name_use);
+ }
+ else
+ md->name_idx = ((struct mmc_blk_data *)
+ dev_to_disk(parent)->private_data)->name_idx;
/*
* Set the read-only status based on the supported commands
@@ -606,6 +1021,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
}
spin_lock_init(&md->lock);
+ INIT_LIST_HEAD(&md->part);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock);
@@ -620,8 +1036,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
- md->disk->driverfs_dev = &card->dev;
- set_disk_ro(md->disk, md->read_only);
+ md->disk->driverfs_dev = parent;
+ set_disk_ro(md->disk, md->read_only || default_ro);
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -636,32 +1052,107 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%d", devidx);
+ "mmcblk%d%s", md->name_idx, subname ? subname : "");
blk_queue_logical_block_size(md->queue.queue, 512);
+ set_capacity(md->disk, size);
+
+ if (mmc_host_cmd23(card->host)) {
+ if (mmc_card_mmc(card) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+ }
+
+ if (mmc_card_mmc(card) &&
+ md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ }
+
+ return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ return ERR_PTR(ret);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+ sector_t size;
+ struct mmc_blk_data *md;
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
* The EXT_CSD sector count is in number or 512 byte
* sectors.
*/
- set_capacity(md->disk, card->ext_csd.sectors);
+ size = card->ext_csd.sectors;
} else {
/*
* The CSD capacity field is in units of read_blkbits.
* set_capacity takes units of 512 bytes.
*/
- set_capacity(md->disk,
- card->csd.capacity << (card->csd.read_blkbits - 9));
+ size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
+
+ md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
return md;
+}
- err_putdisk:
- put_disk(md->disk);
- err_kfree:
- kfree(md);
- out:
- return ERR_PTR(ret);
+static int mmc_blk_alloc_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_type,
+ sector_t size,
+ bool default_ro,
+ const char *subname)
+{
+ char cap_str[10];
+ struct mmc_blk_data *part_md;
+
+ part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+ subname);
+ if (IS_ERR(part_md))
+ return PTR_ERR(part_md);
+ part_md->part_type = part_type;
+ list_add(&part_md->part, &md->part);
+
+ string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ printk(KERN_INFO "%s: %s %s partition %u %s\n",
+ part_md->disk->disk_name, mmc_card_id(card),
+ mmc_card_name(card), part_md->part_type, cap_str);
+ return 0;
+}
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
+{
+ int ret = 0;
+
+ if (!mmc_card_mmc(card))
+ return 0;
+
+ if (card->ext_csd.boot_size) {
+ ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
+ card->ext_csd.boot_size >> 9,
+ true,
+ "boot0");
+ if (ret)
+ return ret;
+ ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
+ card->ext_csd.boot_size >> 9,
+ true,
+ "boot1");
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
static int
@@ -682,9 +1173,81 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
return 0;
}
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+ if (md) {
+ if (md->disk->flags & GENHD_FL_UP) {
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+
+ /* Stop new requests from getting into the queue */
+ del_gendisk(md->disk);
+ }
+
+ /* Then flush out any already in there */
+ mmc_cleanup_queue(&md->queue);
+ mmc_blk_put(md);
+ }
+}
+
+static void mmc_blk_remove_parts(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ struct list_head *pos, *q;
+ struct mmc_blk_data *part_md;
+
+ __clear_bit(md->name_idx, name_use);
+ list_for_each_safe(pos, q, &md->part) {
+ part_md = list_entry(pos, struct mmc_blk_data, part);
+ list_del(pos);
+ mmc_blk_remove_req(part_md);
+ }
+}
+
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+ int ret;
+
+ add_disk(md->disk);
+ md->force_ro.show = force_ro_show;
+ md->force_ro.store = force_ro_store;
+ sysfs_attr_init(&md->force_ro.attr);
+ md->force_ro.attr.name = "force_ro";
+ md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+ if (ret)
+ del_gendisk(md->disk);
+
+ return ret;
+}
+
+static const struct mmc_fixup blk_fixups[] =
+{
+ MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ END_FIXUP
+};
+
static int mmc_blk_probe(struct mmc_card *card)
{
- struct mmc_blk_data *md;
+ struct mmc_blk_data *md, *part_md;
int err;
char cap_str[10];
@@ -708,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card)
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");
+ if (mmc_blk_alloc_parts(card, md))
+ goto out;
+
mmc_set_drvdata(card, md);
- add_disk(md->disk);
+ mmc_fixup_device(card, blk_fixups);
+
+ if (mmc_add_disk(md))
+ goto out;
+
+ list_for_each_entry(part_md, &md->part, part) {
+ if (mmc_add_disk(part_md))
+ goto out;
+ }
return 0;
out:
- mmc_cleanup_queue(&md->queue);
- mmc_blk_put(md);
-
+ mmc_blk_remove_parts(card, md);
+ mmc_blk_remove_req(md);
return err;
}
@@ -723,36 +1296,43 @@ static void mmc_blk_remove(struct mmc_card *card)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);
- if (md) {
- /* Stop new requests from getting into the queue */
- del_gendisk(md->disk);
-
- /* Then flush out any already in there */
- mmc_cleanup_queue(&md->queue);
-
- mmc_blk_put(md);
- }
+ mmc_blk_remove_parts(card, md);
+ mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
}
#ifdef CONFIG_PM
static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
{
+ struct mmc_blk_data *part_md;
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
mmc_queue_suspend(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_suspend(&part_md->queue);
+ }
}
return 0;
}
static int mmc_blk_resume(struct mmc_card *card)
{
+ struct mmc_blk_data *part_md;
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
mmc_blk_set_blksize(md, card);
+
+ /*
+ * Resume involves the card going into idle state,
+ * so current partition is always the main one.
+ */
+ md->part_curr = md->part_type;
mmc_queue_resume(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_resume(&part_md->queue);
+ }
}
return 0;
}