summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/dma-mapping.h
diff options
context:
space:
mode:
authorManoj Chourasia <mchourasia@nvidia.com>2012-06-04 17:25:43 +0530
committerRohan Somvanshi <rsomvanshi@nvidia.com>2012-06-13 02:48:30 -0700
commit017d8bd2c4bd03afe04721476dd26388a4bfe7f6 (patch)
tree7b09ed282288a302c0a9f43cd43115bc03989ff6 /arch/arm/include/asm/dma-mapping.h
parent6a42ac50db6ff2832ff616f586fe7217b885df14 (diff)
Avoid aliasing mappings in DMA coherent allocator
Avoid multiple mappings with DMA coherent/writecombine allocator by pre- allocating the mappings, and removing that memory from the system memory mapping. (See previous discussions on linux-arm-kernel as to why this is bad.) NB1: By default, we preallocate 2MB for DMA coherent, and 2MB for write combine memory, rather than 1MB for each in case 1MB is not sufficient for existing platform usage. Platforms have the option of shrinking this down to 1MB DMA / 1MB WC (or even 2MB DMA / 0MB WC) if they so wish. The DMA memory must be a multiple of 1MB, the write combine memory must also be a multiple of 1MB, and the two together must be a multiple of 2MB. NB2: On ARMv6/7 where we use 'normal uncacheable' memory for both DMA and WC, the two pools are combined into one, as was the case with the previous implementation. The down side to this change is that the memory is permanently set aside for DMA purposes, but I believe that to be unavoidable if we are to avoid the possibility of the cache getting in the way on VIPT CPUs. This removes the last known offender (at this time) from the kernel. Given that DMA memory is fully coherent by this patch, cache invalidation/clean is not required and so, we skip cache related activities for the memory managed by the DMA layer. The bus address -> virtual address conversion normally used in the calling path and the fact that we remove kernel static mapping corresponding to the DMA buffers leads to exceptions otherwise. bug 876019 bug 965047 bug 987589 Change-Id: I72beb386605aafe1a301494a95a67d094ea6b2e4 Signed-off-by: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Manoj Chourasia <mchourasia@nvidia.com> Reviewed-on: http://git-master/r/106212 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Varun Wadekar <vwadekar@nvidia.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: Venkat Moganty <vmoganty@nvidia.com>
Diffstat (limited to 'arch/arm/include/asm/dma-mapping.h')
-rw-r--r--arch/arm/include/asm/dma-mapping.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7a21d0bf7134..19e3c8c77f01 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -75,6 +75,27 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
* Private support functions: these are not part of the API and are
* liable to change. Drivers must not use these.
*/
+#if defined(CONFIG_NON_ALIASED_COHERENT_MEM)
+static inline void __dma_single_cpu_to_dev(struct device *dev,
+ const void *kaddr, size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_single_cpu_to_dev(struct device *dev,
+ const void *, size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_cpu_to_dev(dev, kaddr, size, dir);
+}
+
+static inline void __dma_single_dev_to_cpu(struct device *dev,
+ const void *kaddr, size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_single_dev_to_cpu(struct device *dev,
+ const void *, size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_dev_to_cpu(dev, kaddr, size, dir);
+}
+#else
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
@@ -94,6 +115,7 @@ static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
if (!arch_is_coherent())
___dma_single_dev_to_cpu(kaddr, size, dir);
}
+#endif
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
@@ -199,8 +221,12 @@ int dma_mmap_coherent(struct device *, struct vm_area_struct *,
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
gfp_t);
+#if defined(CONFIG_NON_ALIASED_COHERENT_MEM)
+extern void dma_free_writecombine(struct device *, size_t, void *, dma_addr_t);
+#else
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
+#endif
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t);
@@ -421,7 +447,12 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return;
+#if defined(CONFIG_NON_ALIASED_COHERENT_MEM)
+ __dma_single_dev_to_cpu(dev, dma_to_virt(dev, handle) + offset,
+ size, dir);
+#else
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
+#endif
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -435,7 +466,12 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
+#if defined(CONFIG_NON_ALIASED_COHERENT_MEM)
+ __dma_single_cpu_to_dev(dev, dma_to_virt(dev, handle) + offset,
+ size, dir);
+#else
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
+#endif
}
static inline void dma_sync_single_for_cpu(struct device *dev,