summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/dma-debug.c115
-rw-r--r--lib/swiotlb.c119
2 files changed, 167 insertions, 67 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 69da09a085a1..8fcc09c91e1b 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -85,6 +85,7 @@ static u32 show_num_errors = 1;
static u32 num_free_entries;
static u32 min_free_entries;
+static u32 nr_total_entries;
/* number of preallocated entries requested by kernel cmdline */
static u32 req_entries;
@@ -185,15 +186,50 @@ static void put_hash_bucket(struct hash_bucket *bucket,
static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
struct dma_debug_entry *ref)
{
- struct dma_debug_entry *entry;
+ struct dma_debug_entry *entry, *ret = NULL;
+ int matches = 0, match_lvl, last_lvl = 0;
list_for_each_entry(entry, &bucket->list, list) {
- if ((entry->dev_addr == ref->dev_addr) &&
- (entry->dev == ref->dev))
+ if ((entry->dev_addr != ref->dev_addr) ||
+ (entry->dev != ref->dev))
+ continue;
+
+ /*
+ * Some drivers map the same physical address multiple
+ * times. Without a hardware IOMMU this results in the
+ * same device addresses being put into the dma-debug
+ * hash multiple times too. This can result in false
+ * positives being reported. Therfore we implement a
+ * best-fit algorithm here which returns the entry from
+ * the hash which fits best to the reference value
+ * instead of the first-fit.
+ */
+ matches += 1;
+ match_lvl = 0;
+ entry->size == ref->size ? ++match_lvl : match_lvl;
+ entry->type == ref->type ? ++match_lvl : match_lvl;
+ entry->direction == ref->direction ? ++match_lvl : match_lvl;
+
+ if (match_lvl == 3) {
+ /* perfect-fit - return the result */
return entry;
+ } else if (match_lvl > last_lvl) {
+ /*
+ * We found an entry that fits better then the
+ * previous one
+ */
+ last_lvl = match_lvl;
+ ret = entry;
+ }
}
- return NULL;
+ /*
+ * If we have multiple matches but no perfect-fit, just return
+ * NULL.
+ */
+ ret = (matches == 1) ? ret : NULL;
+
+ return ret;
}
/*
@@ -257,6 +293,21 @@ static void add_dma_entry(struct dma_debug_entry *entry)
put_hash_bucket(bucket, &flags);
}
+static struct dma_debug_entry *__dma_entry_alloc(void)
+{
+ struct dma_debug_entry *entry;
+
+ entry = list_entry(free_entries.next, struct dma_debug_entry, list);
+ list_del(&entry->list);
+ memset(entry, 0, sizeof(*entry));
+
+ num_free_entries -= 1;
+ if (num_free_entries < min_free_entries)
+ min_free_entries = num_free_entries;
+
+ return entry;
+}
+
/* struct dma_entry allocator
*
* The next two functions implement the allocator for
@@ -276,9 +327,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
goto out;
}
- entry = list_entry(free_entries.next, struct dma_debug_entry, list);
- list_del(&entry->list);
- memset(entry, 0, sizeof(*entry));
+ entry = __dma_entry_alloc();
#ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
@@ -286,9 +335,6 @@ static struct dma_debug_entry *dma_entry_alloc(void)
entry->stacktrace.skip = 2;
save_stack_trace(&entry->stacktrace);
#endif
- num_free_entries -= 1;
- if (num_free_entries < min_free_entries)
- min_free_entries = num_free_entries;
out:
spin_unlock_irqrestore(&free_entries_lock, flags);
@@ -310,6 +356,53 @@ static void dma_entry_free(struct dma_debug_entry *entry)
spin_unlock_irqrestore(&free_entries_lock, flags);
}
+int dma_debug_resize_entries(u32 num_entries)
+{
+ int i, delta, ret = 0;
+ unsigned long flags;
+ struct dma_debug_entry *entry;
+ LIST_HEAD(tmp);
+
+ spin_lock_irqsave(&free_entries_lock, flags);
+
+ if (nr_total_entries < num_entries) {
+ delta = num_entries - nr_total_entries;
+
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
+ for (i = 0; i < delta; i++) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ break;
+
+ list_add_tail(&entry->list, &tmp);
+ }
+
+ spin_lock_irqsave(&free_entries_lock, flags);
+
+ list_splice(&tmp, &free_entries);
+ nr_total_entries += i;
+ num_free_entries += i;
+ } else {
+ delta = nr_total_entries - num_entries;
+
+ for (i = 0; i < delta && !list_empty(&free_entries); i++) {
+ entry = __dma_entry_alloc();
+ kfree(entry);
+ }
+
+ nr_total_entries -= i;
+ }
+
+ if (nr_total_entries != num_entries)
+ ret = 1;
+
+ spin_unlock_irqrestore(&free_entries_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_debug_resize_entries);
+
/*
* DMA-API debugging init code
*
@@ -439,6 +532,8 @@ void dma_debug_init(u32 num_entries)
return;
}
+ nr_total_entries = num_free_entries;
+
printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n");
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 2b0b5a7d2ced..bffe6d7ef9d9 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,8 +60,8 @@ enum dma_sync_target {
int swiotlb_force;
/*
- * Used to do a quick range check in swiotlb_unmap_single and
- * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
+ * Used to do a quick range check in unmap_single and
+ * sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static char *io_tlb_start, *io_tlb_end;
@@ -129,7 +129,7 @@ dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
return paddr;
}
-phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
+phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
{
return baddr;
}
@@ -140,9 +140,15 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
}
-static void *swiotlb_bus_to_virt(dma_addr_t address)
+void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
{
- return phys_to_virt(swiotlb_bus_to_phys(address));
+ return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
+}
+
+int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
+ dma_addr_t addr, size_t size)
+{
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
}
int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -309,10 +315,10 @@ cleanup1:
return -ENOMEM;
}
-static int
+static inline int
address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
- return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+ return swiotlb_arch_address_needs_mapping(hwdev, addr, size);
}
static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
@@ -341,7 +347,7 @@ static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
unsigned long flags;
while (size) {
- sz = min(PAGE_SIZE - offset, size);
+ sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn),
@@ -476,7 +482,7 @@ found:
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
static void
-unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -560,7 +566,6 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
size)) {
/*
* The allocated memory isn't reachable by the device.
- * Fall back on swiotlb_map_single().
*/
free_pages((unsigned long) ret, order);
ret = NULL;
@@ -568,9 +573,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (!ret) {
/*
* We are either out of memory or the device can't DMA
- * to GFP_DMA memory; fall back on
- * swiotlb_map_single(), which will grab memory from
- * the lowest available address range.
+ * to GFP_DMA memory; fall back on map_single(), which
+ * will grab memory from the lowest available address range.
*/
ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
if (!ret)
@@ -587,7 +591,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
+ do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
return NULL;
}
*dma_handle = dev_addr;
@@ -604,7 +608,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
+ do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -634,7 +638,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
+ * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
*/
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
@@ -642,18 +646,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
struct dma_attrs *attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
- void *ptr = page_address(page) + offset;
dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys);
void *map;
BUG_ON(dir == DMA_NONE);
/*
- * If the pointer passed in happens to be in the device's DMA window,
+ * If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (!address_needs_mapping(dev, dev_addr, size) &&
- !range_needs_mapping(virt_to_phys(ptr), size))
+ !range_needs_mapping(phys, size))
return dev_addr;
/*
@@ -679,23 +682,35 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_single call. All
+ * match what was provided for in a previous swiotlb_map_page call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
+static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
+{
+ char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
+
+ BUG_ON(dir == DMA_NONE);
+
+ if (is_swiotlb_buffer(dma_addr)) {
+ do_unmap_single(hwdev, dma_addr, size, dir);
+ return;
+ }
+
+ if (dir != DMA_FROM_DEVICE)
+ return;
+
+ dma_mark_clean(dma_addr, size);
+}
+
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- char *dma_addr = swiotlb_bus_to_virt(dev_addr);
-
- BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr))
- unmap_single(hwdev, dma_addr, size, dir);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
+ unmap_single(hwdev, dev_addr, size, dir);
}
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
@@ -703,7 +718,7 @@ EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
*
- * If you perform a swiotlb_map_single() but wish to interrogate the buffer
+ * If you perform a swiotlb_map_page() but wish to interrogate the buffer
* using the cpu, yet do not wish to teardown the dma mapping, you must
* call this function before doing so. At the next point you give the dma
* address back to the card, you must first perform a
@@ -713,13 +728,19 @@ static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
- char *dma_addr = swiotlb_bus_to_virt(dev_addr);
+ char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr))
+
+ if (is_swiotlb_buffer(dma_addr)) {
sync_single(hwdev, dma_addr, size, dir, target);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
+ return;
+ }
+
+ if (dir != DMA_FROM_DEVICE)
+ return;
+
+ dma_mark_clean(dma_addr, size);
}
void
@@ -746,13 +767,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
int dir, int target)
{
- char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
-
- BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(dma_addr))
- sync_single(hwdev, dma_addr, size, dir, target);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(dma_addr, size);
+ swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
}
void
@@ -777,7 +792,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_single
+ * This is the scatter-gather version of the above swiotlb_map_page
* interface. Here the scatter gather list elements are each tagged with the
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
@@ -788,7 +803,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
- * Device ownership issues as mentioned above for swiotlb_map_single are the
+ * Device ownership issues as mentioned above for swiotlb_map_page are the
* same here.
*/
int
@@ -836,7 +851,7 @@ EXPORT_SYMBOL(swiotlb_map_sg);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_single() above.
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
*/
void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
@@ -847,13 +862,9 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
- for_each_sg(sgl, sg, nelems, i) {
- if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
- unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
- sg->dma_length, dir);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
- }
+ for_each_sg(sgl, sg, nelems, i)
+ unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
+
}
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -879,15 +890,9 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
struct scatterlist *sg;
int i;
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i) {
- if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg)))
- sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
+ for_each_sg(sgl, sg, nelems, i)
+ swiotlb_sync_single(hwdev, sg->dma_address,
sg->dma_length, dir, target);
- else if (dir == DMA_FROM_DEVICE)
- dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
- }
}
void