summaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2012-07-25 15:58:48 -0400
committerJeff Garzik <jgarzik@redhat.com>2012-07-25 15:58:48 -0400
commit8407884dd9164ec18ed2afc00f56b87e36c51fcf (patch)
treeb6ea42c231d7d39f454de28a068d78ce11709770 /drivers/scsi/scsi_lib.c
parentdc7f71f486f4f5fa96f6dcf86833da020cde8a11 (diff)
parentbdc0077af574800d24318b6945cf2344e8dbb050 (diff)
Merge branch 'master' [vanilla Linus master] into libata-dev.git/upstream
Two bits were appended to the end of the bitfield list in struct scsi_device. Resolve that conflict by including both bits. Conflicts: include/scsi/scsi_device.h
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c104
1 files changed, 50 insertions, 54 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08f1e297c735..ffd77739ae3e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -126,7 +126,7 @@ static void scsi_unprep_request(struct request *req)
* for a requeue after completion, which should only occur in this
* file.
*/
-static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
@@ -172,15 +172,14 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
/*
* Requeue this command. It will go before all other commands
- * that are already in the queue.
+ * that are already in the queue. Schedule requeue work under
+ * lock such that the kblockd_schedule_work() call happens
+ * before blk_cleanup_queue() finishes.
*/
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
kblockd_schedule_work(q, &device->requeue_work);
-
- return 0;
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
@@ -202,9 +201,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
-int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- return __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, 1);
}
/**
* scsi_execute - insert request and wait for the result
@@ -423,10 +422,6 @@ static void scsi_run_queue(struct request_queue *q)
LIST_HEAD(starved_list);
unsigned long flags;
- /* if the device is dead, sdev will be NULL, so no queue to run */
- if (!sdev)
- return;
-
shost = sdev->host;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
@@ -500,15 +495,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
+ struct scsi_device *sdev = cmd->device;
struct request *req = cmd->request;
unsigned long flags;
+ /*
+ * We need to hold a reference on the device to avoid the queue being
+ * killed after the unlock and before scsi_run_queue is invoked which
+ * may happen because scsi_unprep_request() puts the command which
+ * releases its reference on the device.
+ */
+ get_device(&sdev->sdev_gendev);
+
spin_lock_irqsave(q->queue_lock, flags);
scsi_unprep_request(req);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
+
+ put_device(&sdev->sdev_gendev);
}
void scsi_next_command(struct scsi_cmnd *cmd)
@@ -1190,6 +1196,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
switch (sdev->sdev_state) {
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
/*
* If the device is offline we refuse to process any
* commands. The device must be brought online
@@ -1387,16 +1394,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
* may be changed after request stacking drivers call the function,
* regardless of taking lock or not.
*
- * When scsi can't dispatch I/Os anymore and needs to kill I/Os
- * (e.g. !sdev), scsi needs to return 'not busy'.
- * Otherwise, request stacking drivers may hold requests forever.
+ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
+ * needs to return 'not busy'. Otherwise, request stacking drivers
+ * may hold requests forever.
*/
static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- if (!sdev)
+ if (blk_queue_dead(q))
return 0;
shost = sdev->host;
@@ -1507,12 +1514,6 @@ static void scsi_request_fn(struct request_queue *q)
struct scsi_cmnd *cmd;
struct request *req;
- if (!sdev) {
- while ((req = blk_peek_request(q)) != NULL)
- scsi_kill_request(req, q);
- return;
- }
-
if(!get_device(&sdev->sdev_gendev))
/* We must be tearing the block queue down already */
return;
@@ -1714,20 +1715,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return q;
}
-void scsi_free_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- WARN_ON(q->queuedata);
-
- /* cause scsi_request_fn() to kill all non-finished requests */
- spin_lock_irqsave(q->queue_lock, flags);
- q->request_fn(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- blk_cleanup_queue(q);
-}
-
/*
* Function: scsi_block_requests()
*
@@ -2098,6 +2085,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_CREATED:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
case SDEV_QUIESCE:
case SDEV_BLOCK:
break;
@@ -2110,6 +2098,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
break;
default:
goto illegal;
@@ -2117,6 +2106,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
break;
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
@@ -2153,6 +2143,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_RUNNING:
case SDEV_QUIESCE:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
case SDEV_BLOCK:
break;
default:
@@ -2165,6 +2156,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
case SDEV_CANCEL:
break;
default:
@@ -2422,7 +2414,6 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
- * This routine assumes the host_lock is held on entry.
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2455,6 +2446,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
/**
* scsi_internal_device_unblock - resume a device after a block request
* @sdev: device to resume
+ * @new_state: state to set devices to after unblocking
*
* Called by scsi lld's or the midlayer to restart the device queue
* for the previously suspended scsi device. Called from interrupt or
@@ -2464,25 +2456,29 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
*
* Notes:
* This routine transitions the device to the SDEV_RUNNING state
- * (which must be a legal transition) allowing the midlayer to
- * goose the queue for this device. This routine assumes the
- * host_lock is held upon entry.
+ * or to one of the offline states (which must be a legal transition)
+ * allowing the midlayer to goose the queue for this device.
*/
int
-scsi_internal_device_unblock(struct scsi_device *sdev)
+scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
-
- /*
- * Try to transition the scsi device to SDEV_RUNNING
- * and goose the device queue if successful.
+
+ /*
+ * Try to transition the scsi device to SDEV_RUNNING or one of the
+ * offlined states and goose the device queue if successful.
*/
if (sdev->sdev_state == SDEV_BLOCK)
- sdev->sdev_state = SDEV_RUNNING;
- else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
- sdev->sdev_state = SDEV_CREATED;
- else if (sdev->sdev_state != SDEV_CANCEL &&
+ sdev->sdev_state = new_state;
+ else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
+ if (new_state == SDEV_TRANSPORT_OFFLINE ||
+ new_state == SDEV_OFFLINE)
+ sdev->sdev_state = new_state;
+ else
+ sdev->sdev_state = SDEV_CREATED;
+ } else if (sdev->sdev_state != SDEV_CANCEL &&
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
@@ -2523,26 +2519,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block);
static void
device_unblock(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_unblock(sdev);
+ scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
}
static int
target_unblock(struct device *dev, void *data)
{
if (scsi_is_target_device(dev))
- starget_for_each_device(to_scsi_target(dev), NULL,
+ starget_for_each_device(to_scsi_target(dev), data,
device_unblock);
return 0;
}
void
-scsi_target_unblock(struct device *dev)
+scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
{
if (scsi_is_target_device(dev))
- starget_for_each_device(to_scsi_target(dev), NULL,
+ starget_for_each_device(to_scsi_target(dev), &new_state,
device_unblock);
else
- device_for_each_child(dev, NULL, target_unblock);
+ device_for_each_child(dev, &new_state, target_unblock);
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);