summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
authorRoland Dreier <roland@purestorage.com>2011-09-28 22:12:07 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2011-11-11 09:42:32 -0800
commit4013ba5c21af68a4257729ff04b0441165ffe215 (patch)
treeb0828015b0d9002ca5c9a1579a34ecda67e857b5 /drivers/target
parent1c146e2bbcbac885a02e90e0614d7287268b45d1 (diff)
target: Prevent cmd->se_queue_node double add
commit 79a7fef26431830e22e282053d050af790117db8 upstream. This patch addresses a bug with the lio-core-2.6.git conversion of transport_add_cmd_to_queue() to use a single embedded list_head, instead of individual struct se_queue_req allocations allowing a single se_cmd to be added to the queue mulitple times. This was changed in the following: commit 2a9e4d5ca5d99f4c600578d6285d45142e7e5208 Author: Andy Grover <agrover@redhat.com> Date: Tue Apr 26 17:45:51 2011 -0700 target: Embed qr in struct se_cmd The problem is that some target code still assumes performing multiple adds is allowed via transport_add_cmd_to_queue(), which ends up causing list corruption in qobj->qobj_list code. This patch addresses this by removing an existing struct se_cmd from the list before the add, and removes an unnecessary list walk in transport_remove_cmd_from_queue() It also changes cmd->t_transport_queue_active to use explict sets intead of increment/decrement to prevent confusion during exception path handling. Signed-off-by: Roland Dreier <roland@purestorage.com> Cc: Andy Grover <agrover@redhat.com> Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_transport.c31
2 files changed, 16 insertions, 17 deletions
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 27d4925e51c3..7bce92fc9de6 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -340,7 +340,7 @@ int core_tmr_lun_reset(
atomic_dec(&cmd->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
- list_del(&cmd->se_queue_node);
+ list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index a4b0a8d27f25..df64f8b9d3bf 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -621,8 +621,6 @@ static void transport_add_cmd_to_queue(
struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
- INIT_LIST_HEAD(&cmd->se_queue_node);
-
if (t_state) {
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
@@ -631,15 +629,21 @@ static void transport_add_cmd_to_queue(
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+
+ /* If the cmd is already on the list, remove it before we add it */
+ if (!list_empty(&cmd->se_queue_node))
+ list_del(&cmd->se_queue_node);
+ else
+ atomic_inc(&qobj->queue_cnt);
+
if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
list_add(&cmd->se_queue_node, &qobj->qobj_list);
} else
list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
- atomic_inc(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 1);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq);
}
@@ -656,9 +660,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
}
cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
- atomic_dec(&cmd->t_transport_queue_active);
+ atomic_set(&cmd->t_transport_queue_active, 0);
- list_del(&cmd->se_queue_node);
+ list_del_init(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -668,7 +672,6 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj)
{
- struct se_cmd *t;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -676,14 +679,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
-
- list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
- if (t == cmd) {
- atomic_dec(&cmd->t_transport_queue_active);
- atomic_dec(&qobj->queue_cnt);
- list_del(&cmd->se_queue_node);
- break;
- }
+ atomic_set(&cmd->t_transport_queue_active, 0);
+ atomic_dec(&qobj->queue_cnt);
+ list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
if (atomic_read(&cmd->t_transport_queue_active)) {
@@ -1067,7 +1065,7 @@ static void transport_release_all_cmds(struct se_device *dev)
list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
se_queue_node) {
t_state = cmd->t_state;
- list_del(&cmd->se_queue_node);
+ list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
flags);
@@ -1598,6 +1596,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
+ INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->t_task_list);
init_completion(&cmd->transport_lun_fe_stop_comp);