summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blktrace.c24
-rw-r--r--block/compat_ioctl.c5
-rw-r--r--block/elevator.c26
-rw-r--r--block/ll_rw_blk.c49
4 files changed, 92 insertions, 12 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index 9b4da4ae3c7d..568588cd16b2 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -235,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
kfree(bt);
}
-static int blk_trace_remove(struct request_queue *q)
+int blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
@@ -249,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q)
return 0;
}
+EXPORT_SYMBOL_GPL(blk_trace_remove);
static int blk_dropped_open(struct inode *inode, struct file *filp)
{
@@ -316,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
-int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
+int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
struct blk_user_trace_setup *buts)
{
struct blk_trace *old_bt, *bt = NULL;
struct dentry *dir = NULL;
- char b[BDEVNAME_SIZE];
int ret, i;
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
- strcpy(buts->name, bdevname(bdev, b));
+ strcpy(buts->name, name);
/*
* some device names have larger paths - convert the slashes
@@ -352,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
goto err;
bt->dir = dir;
- bt->dev = bdev->bd_dev;
+ bt->dev = dev;
atomic_set(&bt->dropped, 0);
ret = -EIO;
@@ -399,8 +399,8 @@ err:
return ret;
}
-static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
- char __user *arg)
+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
@@ -409,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
if (ret)
return -EFAULT;
- ret = do_blk_trace_setup(q, bdev, &buts);
+ ret = do_blk_trace_setup(q, name, dev, &buts);
if (ret)
return ret;
@@ -418,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
return 0;
}
+EXPORT_SYMBOL_GPL(blk_trace_setup);
-static int blk_trace_startstop(struct request_queue *q, int start)
+int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
int ret;
@@ -452,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start)
return ret;
}
+EXPORT_SYMBOL_GPL(blk_trace_startstop);
/**
* blk_trace_ioctl: - handle the ioctls associated with tracing
@@ -464,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
struct request_queue *q;
int ret, start = 0;
+ char b[BDEVNAME_SIZE];
q = bdev_get_queue(bdev);
if (!q)
@@ -473,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
switch (cmd) {
case BLKTRACESETUP:
- ret = blk_trace_setup(q, bdev, arg);
+ strcpy(b, bdevname(bdev, b));
+ ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
break;
case BLKTRACESTART:
start = 1;
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index cae0a852619e..b73373216b0e 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
struct blk_user_trace_setup buts;
struct compat_blk_user_trace_setup cbuts;
struct request_queue *q;
+ char b[BDEVNAME_SIZE];
int ret;
q = bdev_get_queue(bdev);
@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
return -EFAULT;
+ strcpy(b, bdevname(bdev, b));
+
buts = (struct blk_user_trace_setup) {
.act_mask = cbuts.act_mask,
.buf_size = cbuts.buf_size,
@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
memcpy(&buts.name, &cbuts.name, 32);
mutex_lock(&bdev->bd_mutex);
- ret = do_blk_trace_setup(q, bdev, &buts);
+ ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
mutex_unlock(&bdev->bd_mutex);
if (ret)
return ret;
diff --git a/block/elevator.c b/block/elevator.c
index f9736fbdab03..8cd5775acd7a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
q->boundary_rq = NULL;
}
- if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
+ if (rq->cmd_flags & REQ_DONTPREP)
+ break;
+
+ if (q->dma_drain_size && rq->data_len) {
+ /*
+ * make sure space for the drain appears we
+ * know we can do this because max_hw_segments
+ * has been adjusted to be one fewer than the
+ * device can handle
+ */
+ rq->nr_phys_segments++;
+ rq->nr_hw_segments++;
+ }
+
+ if (!q->prep_rq_fn)
break;
ret = q->prep_rq_fn(q, rq);
@@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
* avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one.
*/
+ if (q->dma_drain_size && rq->data_len &&
+ !(rq->cmd_flags & REQ_DONTPREP)) {
+ /*
+ * remove the space for the drain we added
+ * so that we don't add it again
+ */
+ --rq->nr_phys_segments;
+ --rq->nr_hw_segments;
+ }
+
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c16fdfed8c62..1932a56f5e4b 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -721,6 +721,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ *
+ * @q: the request queue for the device
+ * @buf: physically contiguous buffer
+ * @size: size of the buffer in bytes
+ *
+ * Some devices have excess DMA problems and can't simply discard (or
+ * zero fill) the unwanted piece of the transfer. They have to have a
+ * real area of memory to transfer it into. The use case for this is
+ * ATAPI devices in DMA mode. If the packet command causes a transfer
+ * bigger than the transfer size some HBAs will lock up if there
+ * aren't DMA elements to contain the excess transfer. What this API
+ * does is adjust the queue so that the buf is always appended
+ * silently to the scatterlist.
+ *
+ * Note: This routine adjusts max_hw_segments to make room for
+ * appending the drain buffer. If you call
+ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
+ * calling this routine, you must set the limit to one fewer than your
+ * device can support otherwise there won't be room for the drain
+ * buffer.
+ */
+int blk_queue_dma_drain(struct request_queue *q, void *buf,
+ unsigned int size)
+{
+ if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+ return -EINVAL;
+ /* make room for appending the drain */
+ --q->max_hw_segments;
+ --q->max_phys_segments;
+ q->dma_drain_buffer = buf;
+ q->dma_drain_size = size;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
+
+/**
* blk_queue_segment_boundary - set boundary rules for segment merging
* @q: the request queue for the device
* @mask: the memory boundary mask
@@ -1374,6 +1413,16 @@ new_segment:
bvprv = bvec;
} /* segments in rq */
+ if (q->dma_drain_size) {
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+ q->dma_drain_size,
+ ((unsigned long)q->dma_drain_buffer) &
+ (PAGE_SIZE - 1));
+ nsegs++;
+ }
+
if (sg)
sg_mark_end(sg);