summaryrefslogtreecommitdiff
path: root/drivers/target/target_core_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r--drivers/target/target_core_user.c182
1 files changed, 90 insertions, 92 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9425354aef99..d6634baebb47 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
size = round_up(size+offset, PAGE_SIZE);
while (size) {
- flush_dcache_page(virt_to_page(start));
+ flush_dcache_page(vmalloc_to_page(start));
start += PAGE_SIZE;
size -= PAGE_SIZE;
}
@@ -669,15 +669,17 @@ static void scatter_data_area(struct tcmu_dev *udev,
void *from, *to = NULL;
size_t copy_bytes, to_offset, offset;
struct scatterlist *sg;
- struct page *page;
+ struct page *page = NULL;
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
from = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
if (block_remaining == 0) {
- if (to)
+ if (to) {
+ flush_dcache_page(page);
kunmap_atomic(to);
+ }
block_remaining = DATA_BLOCK_SIZE;
dbi = tcmu_cmd_get_dbi(tcmu_cmd);
@@ -722,7 +724,6 @@ static void scatter_data_area(struct tcmu_dev *udev,
memcpy(to + offset,
from + sg->length - sg_remaining,
copy_bytes);
- tcmu_flush_dcache_range(to, copy_bytes);
}
sg_remaining -= copy_bytes;
@@ -731,8 +732,10 @@ static void scatter_data_area(struct tcmu_dev *udev,
kunmap_atomic(from - sg->offset);
}
- if (to)
+ if (to) {
+ flush_dcache_page(page);
kunmap_atomic(to);
+ }
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
@@ -778,13 +781,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
dbi = tcmu_cmd_get_dbi(cmd);
page = tcmu_get_block_page(udev, dbi);
from = kmap_atomic(page);
+ flush_dcache_page(page);
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
if (read_len < copy_bytes)
copy_bytes = read_len;
offset = DATA_BLOCK_SIZE - block_remaining;
- tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from + offset,
copy_bytes);
@@ -882,41 +885,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
- struct timer_list *timer)
+static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
- struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- int cmd_id;
-
- if (tcmu_cmd->cmd_id)
- goto setup_timer;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
- return cmd_id;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
- udev->name, tmo / MSEC_PER_SEC);
-
-setup_timer:
if (!tmo)
- return 0;
+ return;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
- return 0;
+ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
+ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
}
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
- int ret;
/*
* For backwards compat if qfull_time_out is not set use
@@ -931,13 +917,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
else
tmo = TCMU_TIME_OUT;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
- if (ret)
- return ret;
+ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
- pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
+ tcmu_cmd, udev->name);
return 0;
}
@@ -959,7 +943,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, ret;
+ int iov_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
@@ -1026,7 +1010,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
entry->hdr.cmd_id = 0; /* not used for PAD */
entry->hdr.kflags = 0;
entry->hdr.uflags = 0;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
@@ -1060,14 +1044,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
- &udev->cmd_timer);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
+
+ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
+
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/*
@@ -1084,7 +1075,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
cdb_off = CMDR_OFF + cmd_head + base_command_size;
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
entry->req.cdb_off = cdb_off;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ tcmu_flush_dcache_range(entry, command_size);
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
@@ -1232,7 +1223,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ /*
+ * Flush max. up to end of cmd ring since current entry might
+ * be a padding that is shorter than sizeof(*entry)
+ */
+ size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
+ udev->cmdr_size);
+ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
+ ring_left : sizeof(*entry));
if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
UPDATE_HEAD(udev->cmdr_last_cleaned,
@@ -1279,50 +1277,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
return handled;
}
-static int tcmu_check_expired_cmd(int id, void *p, void *data)
+static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
- struct tcmu_dev *udev = cmd->tcmu_dev;
- u8 scsi_status;
struct se_cmd *se_cmd;
- bool is_running;
-
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
- return 0;
if (!time_after(jiffies, cmd->deadline))
- return 0;
+ return;
- is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
+ cmd->se_cmd = NULL;
- if (is_running) {
- /*
- * If cmd_time_out is disabled but qfull is set deadline
- * will only reflect the qfull timeout. Ignore it.
- */
- if (!udev->cmd_time_out)
- return 0;
+ pr_debug("Timing out inflight cmd %u on dev %s.\n",
+ cmd->cmd_id, cmd->tcmu_dev->name);
- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
- /*
- * target_complete_cmd will translate this to LUN COMM FAILURE
- */
- scsi_status = SAM_STAT_CHECK_CONDITION;
- list_del_init(&cmd->queue_entry);
- cmd->se_cmd = NULL;
- } else {
- list_del_init(&cmd->queue_entry);
- idr_remove(&udev->commands, id);
- tcmu_free_cmd(cmd);
- scsi_status = SAM_STAT_TASK_SET_FULL;
- }
+ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
+}
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
- id, udev->name, is_running ? "inflight" : "queued");
+static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
- target_complete_cmd(se_cmd, scsi_status);
- return 0;
+ if (!time_after(jiffies, cmd->deadline))
+ return;
+
+ pr_debug("Timing out queued cmd %p on dev %s.\n",
+ cmd, cmd->tcmu_dev->name);
+
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1407,16 +1394,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
+static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
- bool drained = true;
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->qfull_queue))
- return true;
+ return;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
@@ -1425,11 +1411,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
- pr_debug("removing cmd %u on dev %s from queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("removing cmd %p on dev %s from queue\n",
+ tcmu_cmd, udev->name);
if (fail) {
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
@@ -1444,10 +1429,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) {
- pr_debug("cmd %u on dev %s failed with %u\n",
- tcmu_cmd->cmd_id, udev->name, scsi_ret);
-
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ pr_debug("cmd %p on dev %s failed with %u\n",
+ tcmu_cmd, udev->name, scsi_ret);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
@@ -1462,13 +1445,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
* the queue
*/
list_splice_tail(&cmds, &udev->qfull_queue);
- drained = false;
break;
}
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
- return drained;
}
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1652,6 +1633,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
idr_destroy(&udev->commands);
WARN_ON(!all_expired);
@@ -2037,9 +2020,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
- continue;
-
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2077,6 +2057,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
del_timer(&udev->cmd_timer);
+ run_qfull_queue(udev, false);
+
mutex_unlock(&udev->cmdr_lock);
}
@@ -2698,6 +2680,7 @@ static void find_free_blocks(void)
static void check_timedout_devices(void)
{
struct tcmu_dev *udev, *tmp_dev;
+ struct tcmu_cmd *cmd, *tmp_cmd;
LIST_HEAD(devs);
spin_lock_bh(&timed_out_udevs_lock);
@@ -2708,9 +2691,24 @@ static void check_timedout_devices(void)
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (udev->cmd_time_out) {
+ list_for_each_entry_safe(cmd, tmp_cmd,
+ &udev->inflight_queue,
+ queue_entry) {
+ tcmu_check_expired_ring_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->inflight_queue,
+ &udev->cmd_timer);
+ }
+ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
+ queue_entry) {
+ tcmu_check_expired_queue_cmd(cmd);
+ }
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);