summaryrefslogtreecommitdiff
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-05-03 11:06:55 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-05-22 16:33:14 +0100
commitdeace4a6b440f2e05f3e073338b28901d02a15c9 (patch)
treef6e739ac0a7f193042b1cadf10fbfd863717e593 /arch/arm/mm/dma-mapping.c
parent86f40622af7329375e38f282f6c0aab95f3e5f72 (diff)
ARM: dma-mapping: avoid calling dma_cache_maint_page() on dev=>cpu
Avoid calling dma_cache_maint_page() when unmapping a DMA_TO_DEVICE buffer. The L1 cache ops never do anything in this circumstance, nor do they ever need to - all that matters for this case is that the data written is visible to the device before DMA starts. What happens during the transfer (provided the buffer is not written to) is of no real consequence. We already do this optimisation for the L2 cache. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f62aa0677e5c..137463bcbeac 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -904,11 +904,12 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
unsigned long paddr = page_to_phys(page) + off;
/* FIXME: non-speculating: not required */
- /* don't bother invalidating if DMA to device */
- if (dir != DMA_TO_DEVICE)
+ /* in any case, don't bother invalidating if DMA to device */
+ if (dir != DMA_TO_DEVICE) {
outer_inv_range(paddr, paddr + size);
- dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+ dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+ }
/*
* Mark the D-cache clean for these pages to avoid extra flushing.