summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2017-03-01 23:13:26 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-12-20 10:07:26 +0100
commit0d34f4770ea11c92af2c6a15c73aaebc75ea94ed (patch)
treeeebdd4463253e591e0d8fd5aa3b204a280b04524 /drivers/target
parent8f60ef94477cb0dd3023b337640ea634886ac1d1 (diff)
target: Use system workqueue for ALUA transitions
[ Upstream commit 207ee84133c00a8a2a5bdec94df4a5b37d78881c ] If tcmu-runner is processing a STPG and needs to change the kernel's ALUA state then we cannot use the same work queue for task management requests and ALUA transitions, because we could deadlock. The problem occurs when a STPG times out before tcmu-runner is able to call into target_tg_pt_gp_alua_access_state_store-> core_alua_do_port_transition -> core_alua_do_transition_tg_pt -> queue_work. In this case, the tmr is on the work queue waiting for the STPG to complete, but the STPG transition is now queued behind the waiting tmr. Note: This bug will also be fixed by this patch: http://www.spinics.net/lists/target-devel/msg14560.html which switches the tmr code to use the system workqueues. For both, I am not sure if we need a dedicated workqueue since it is not a performance path and I do not think we need WQ_MEM_RECLAIM to make forward progress to free up memory like the block layer does. Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org> Signed-off-by: Sasha Levin <alexander.levin@verizon.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_alua.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 4c82bbe19003..60891241858e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1118,13 +1118,11 @@ static int core_alua_do_transition_tg_pt(
unsigned long transition_tmo;
transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work,
- transition_tmo);
+ schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work,
+ transition_tmo);
} else {
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work, 0);
+ schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
}