summaryrefslogtreecommitdiff
path: root/drivers/scsi/scsi_transport_fc.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 20:40:54 -0700
committerJens Axboe <axboe@kernel.dk>2012-06-25 11:53:48 +0200
commit86072d8112595ea1b6beeb33f578e7c2839e014e (patch)
treead49aefc355232e099c07c4a371b437e5241981f /drivers/scsi/scsi_transport_fc.c
parenta91a5ac6858fbf7477131e1210cb3e897b668e6f (diff)
block: drop custom queue draining used by scsi_transport_{iscsi|fc}
iscsi_remove_host() uses bsg_remove_queue() which implements custom queue draining. fc_bsg_remove() open-codes mostly identical logic. The draining logic isn't correct in that blk_stop_queue() doesn't prevent new requests from being queued - it just stops processing, so nothing prevents new requests to be queued after the logic determines that the queue is drained. blk_cleanup_queue() now implements proper queue draining and these custom draining logics aren't necessary. Drop them and use bsg_unregister_queue() + blk_cleanup_queue() instead. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Mike Christie <michaelc@cs.wisc.edu> Acked-by: Vivek Goyal <vgoyal@redhat.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@emulex.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/scsi/scsi_transport_fc.c')
-rw-r--r--drivers/scsi/scsi_transport_fc.c38
1 files changed, 0 insertions, 38 deletions
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 579760420d53..a9617ad05f33 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -4130,45 +4130,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
static void
fc_bsg_remove(struct request_queue *q)
{
- struct request *req; /* block request */
- int counts; /* totals for request_list count and starved */
-
if (q) {
- /* Stop taking in new requests */
- spin_lock_irq(q->queue_lock);
- blk_stop_queue(q);
-
- /* drain all requests in the queue */
- while (1) {
- /* need the lock to fetch a request
- * this may fetch the same reqeust as the previous pass
- */
- req = blk_fetch_request(q);
- /* save requests in use and starved */
- counts = q->rq.count[0] + q->rq.count[1] +
- q->rq.starved[0] + q->rq.starved[1];
- spin_unlock_irq(q->queue_lock);
- /* any requests still outstanding? */
- if (counts == 0)
- break;
-
- /* This may be the same req as the previous iteration,
- * always send the blk_end_request_all after a prefetch.
- * It is not okay to not end the request because the
- * prefetch started the request.
- */
- if (req) {
- /* return -ENXIO to indicate that this queue is
- * going away
- */
- req->errors = -ENXIO;
- blk_end_request_all(req, -ENXIO);
- }
-
- msleep(200); /* allow bsg to possibly finish */
- spin_lock_irq(q->queue_lock);
- }
-
bsg_unregister_queue(q);
blk_cleanup_queue(q);
}