summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-05-30 10:48:29 +0200
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-06-04 08:01:24 +0200
commitf1ae98da8525c6b8b1c301c3a2b0bd2b6515cca2 (patch)
treef73e377f98bbb452612a1f53b3d399cff6cac1fa /arch
parentf8f5701bdaf9134b1f90e5044a82c66324d2073f (diff)
ARM: dma-mapping: remove unconditional dependency on CMA
CMA has been enabled unconditionally on all ARMv6+ systems to solve the long standing issue of double kernel mappings for all dma coherent buffers. This however created a dependency on CONFIG_EXPERIMENTAL for the whole ARM architecture what should be really avoided. This patch removes this dependency and lets one use old, well-tested dma-mapping implementation also on ARMv6+ systems without the need to use EXPERIMENTAL stuff. Reported-by: Russell King <linux@arm.linux.org.uk> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/mm/dma-mapping.c10
2 files changed, 4 insertions, 7 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b649c5904a4f..84449dd8f031 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,7 +7,6 @@ config ARM
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
- select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ea6b43154090..106c4c0ebccd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
-#ifndef CONFIG_ARM_DMA_USE_IOMMU
- if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
-#endif
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
struct page *page;
void *ptr;
- if (cpu_architecture() < CPU_ARCH_ARMv6)
+ if (!IS_ENABLED(CONFIG_CMA))
return 0;
ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (arch_is_coherent() || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else if (gfp & GFP_ATOMIC)
addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
if (arch_is_coherent() || nommu()) {
__dma_free_buffer(page, size);
- } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+ } else if (!IS_ENABLED(CONFIG_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {