summaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorManoj Chourasia <mchourasia@nvidia.com>2012-06-04 17:25:43 +0530
committerRohan Somvanshi <rsomvanshi@nvidia.com>2012-06-13 02:48:30 -0700
commit017d8bd2c4bd03afe04721476dd26388a4bfe7f6 (patch)
tree7b09ed282288a302c0a9f43cd43115bc03989ff6 /arch/arm/mm/mmu.c
parent6a42ac50db6ff2832ff616f586fe7217b885df14 (diff)
Avoid aliasing mappings in DMA coherent allocator
Avoid multiple mappings with DMA coherent/writecombine allocator by pre- allocating the mappings, and removing that memory from the system memory mapping. (See previous discussions on linux-arm-kernel as to why this is bad.) NB1: By default, we preallocate 2MB for DMA coherent, and 2MB for write combine memory, rather than 1MB for each in case 1MB is not sufficient for existing platform usage. Platforms have the option of shrinking this down to 1MB DMA / 1MB WC (or even 2MB DMA / 0MB WC) if they so wish. The DMA memory must be a multiple of 1MB, the write combine memory must also be a multiple of 1MB, and the two together must be a multiple of 2MB. NB2: On ARMv6/7 where we use 'normal uncacheable' memory for both DMA and WC, the two pools are combined into one, as was the case with the previous implementation. The down side to this change is that the memory is permanently set aside for DMA purposes, but I believe that to be unavoidable if we are to avoid the possibility of the cache getting in the way on VIPT CPUs. This removes the last known offender (at this time) from the kernel. Given that DMA memory is fully coherent by this patch, cache invalidation/clean is not required and so, we skip cache related activities for the memory managed by the DMA layer. The bus address -> virtual address conversion normally used in the calling path and the fact that we remove kernel static mapping corresponding to the DMA buffers leads to exceptions otherwise. bug 876019 bug 965047 bug 987589 Change-Id: I72beb386605aafe1a301494a95a67d094ea6b2e4 Signed-off-by: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Manoj Chourasia <mchourasia@nvidia.com> Reviewed-on: http://git-master/r/106212 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Varun Wadekar <vwadekar@nvidia.com> Reviewed-by: Krishna Reddy <vdumpa@nvidia.com> Reviewed-by: Venkat Moganty <vmoganty@nvidia.com>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4fa9c246ae93..bb80555edac9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -273,6 +273,18 @@ static struct mem_type mem_types[] = {
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
+#ifdef CONFIG_NON_ALIASED_COHERENT_MEM
+ [MT_DMA_COHERENT] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE |
+ PMD_SECT_S,
+ .domain = DOMAIN_IO,
+ },
+ [MT_WC_COHERENT] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE |
+ PMD_SECT_S,
+ .domain = DOMAIN_IO,
+ },
+#endif
};
const struct mem_type *get_mem_type(unsigned int type)
@@ -353,6 +365,9 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+#ifdef CONFIG_NON_ALIASED_COHERENT_MEM
+ mem_types[MT_DMA_COHERENT].prot_sect |= PMD_SECT_XN;
+#endif
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
@@ -457,13 +472,30 @@ static void __init build_mem_type_table(void)
/* Non-cacheable Normal is XCB = 001 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
PMD_SECT_BUFFERED;
+#ifdef CONFIG_NON_ALIASED_COHERENT_MEM
+ mem_types[MT_WC_COHERENT].prot_sect |=
+ PMD_SECT_BUFFERED;
+ mem_types[MT_DMA_COHERENT].prot_sect |=
+ PMD_SECT_BUFFERED;
+#endif
} else {
/* For both ARMv6 and non-TEX-remapping ARMv7 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
PMD_SECT_TEX(1);
+#ifdef CONFIG_NON_ALIASED_COHERENT_MEM
+ mem_types[MT_WC_COHERENT].prot_sect |=
+ PMD_SECT_TEX(1);
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+ mem_types[MT_DMA_COHERENT].prot_sect |=
+ PMD_SECT_TEX(1);
+#endif
+#endif
}
} else {
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+#ifdef CONFIG_NON_ALIASED_COHERENT_MEM
+ mem_types[MT_WC_COHERENT].prot_sect |= PMD_SECT_BUFFERED;
+#endif
}
for (i = 0; i < 16; i++) {
@@ -986,6 +1018,10 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
create_mapping(&map);
}
+#ifdef CONFIG_NON_ALIASED_COHERENT_MEM
+ dma_coherent_mapping();
+#endif
+
/*
* Ask the machine support to map in the statically mapped devices.
*/