summaryrefslogtreecommitdiff
path: root/drivers/usb/core/message.c
diff options
context:
space:
mode:
authorAlan Stern <stern@rowland.harvard.edu>2010-04-02 13:27:28 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-05-20 13:21:37 -0700
commitff9c895f07d36193c75533bda8193bde8ca99d02 (patch)
tree386ca8e37734c4810e59a55eaba92e4e88275d14 /drivers/usb/core/message.c
parent0ff8d1b3c858ea7c8daa54f7577971a76d04d283 (diff)
USB: fix usbmon and DMA mapping for scatter-gather URBs
This patch (as1368) fixes a rather obscure bug in usbmon: When tracing URBs sent by the scatter-gather library, it accesses the data buffers while they are still mapped for DMA. The solution is to move the mapping and unmapping out of the s-g library and into the usual place in hcd.c. This requires the addition of new URB flag bits to describe the kind of mapping needed, since we have to call dma_map_sg() if the HCD supports native scatter-gather operation and dma_map_page() if it doesn't. The nice thing about having the new flags is that they simplify the testing for unmapping. The patch removes the only caller of usb_buffer_[un]map_sg(), so those functions are #if'ed out. A later patch will remove them entirely. As a result of this change, urb->sg will be set in situations where it wasn't set previously. Hence the xhci and whci drivers are adjusted to test urb->num_sgs instead, which retains its original meaning and is nonzero only when the HCD has to handle a scatterlist. Finally, even when a submission error occurs we don't want to hand URBs to usbmon before they are unmapped. The submission path is rearranged so that map_urb_for_dma() is called only for non-root-hub URBs and unmap_urb_for_dma() is called immediately after a submission error. This simplifies the error handling. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> CC: <stable@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb/core/message.c')
-rw-r--r--drivers/usb/core/message.c45
1 files changed, 10 insertions, 35 deletions
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 619c44fb8a96..79d1cdf4a635 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io)
kfree(io->urbs);
io->urbs = NULL;
}
- if (io->dev->dev.dma_mask != NULL)
- usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
- io->sg, io->nents);
io->dev = NULL;
}
@@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
{
int i;
int urb_flags;
- int dma;
int use_sg;
if (!io || !dev || !sg
@@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->pipe = pipe;
io->sg = sg;
io->nents = nents;
-
- /* not all host controllers use DMA (like the mainstream pci ones);
- * they can use PIO (sl811) or be software over another transport.
- */
- dma = (dev->dev.dma_mask != NULL);
- if (dma)
- io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
- sg, nents);
- else
- io->entries = nents;
+ io->entries = nents;
/* initialize all the urbs we'll use */
- if (io->entries <= 0)
- return io->entries;
-
if (dev->bus->sg_tablesize > 0) {
io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
use_sg = true;
@@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
goto nomem;
urb_flags = 0;
- if (dma)
- urb_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein(pipe))
urb_flags |= URB_SHORT_NOT_OK;
@@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[0]->complete = sg_complete;
io->urbs[0]->context = io;
+
/* A length of zero means transfer the whole sg list */
io->urbs[0]->transfer_buffer_length = length;
if (length == 0) {
for_each_sg(sg, sg, io->entries, i) {
io->urbs[0]->transfer_buffer_length +=
- sg_dma_len(sg);
+ sg->length;
}
}
io->urbs[0]->sg = io;
@@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[i]->context = io;
/*
- * Some systems need to revert to PIO when DMA is temporarily
- * unavailable. For their sakes, both transfer_buffer and
- * transfer_dma are set when possible.
- *
- * Note that if IOMMU coalescing occurred, we cannot
- * trust sg_page anymore, so check if S/G list shrunk.
+ * Some systems can't use DMA; they use PIO instead.
+ * For their sakes, transfer_buffer is set whenever
+ * possible.
*/
- if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
+ if (!PageHighMem(sg_page(sg)))
io->urbs[i]->transfer_buffer = sg_virt(sg);
else
io->urbs[i]->transfer_buffer = NULL;
- if (dma) {
- io->urbs[i]->transfer_dma = sg_dma_address(sg);
- len = sg_dma_len(sg);
- } else {
- /* hc may use _only_ transfer_buffer */
- len = sg->length;
- }
-
+ len = sg->length;
if (length) {
len = min_t(unsigned, len, length);
length -= len;
@@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->entries = i + 1;
}
io->urbs[i]->transfer_buffer_length = len;
+
+ io->urbs[i]->sg = (struct usb_sg_request *) sg;
}
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
}