summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2015-04-23 12:46:16 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-05-13 05:14:26 -0700
commit41e45229f0a528a17ea7e36e9164b04fd071b751 (patch)
tree282604c489c9e8b84dd40d1a5f15ec653a052ff8 /arch
parent9b19c50bff3747129752fa7883cf9a8a7d9e96b2 (diff)
arm64: dma-mapping: always clear allocated buffers
commit 6829e274a623187c24f7cfc0e3d35f25d087fcc5 upstream. Buffers allocated by dma_alloc_coherent() are always zeroed on Alpha, ARM (32bit), MIPS, PowerPC, x86/x86_64 and probably other architectures. It turned out that some drivers rely on this 'feature'. Allocated buffer might be also exposed to userspace with dma_mmap() call, so clearing it is desired from security point of view to avoid exposing random memory to userspace. This patch unifies dma_alloc_coherent() behavior on ARM64 architecture with other implementations by unconditionally zeroing allocated buffer. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/mm/dma-mapping.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index ef7d112f5ce0..e0f14ee26b68 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
*ret_page = phys_to_page(phys);
ptr = (void *)val;
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
+ memset(ptr, 0, size);
}
return ptr;
@@ -113,8 +112,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
addr = page_address(page);
- if (flags & __GFP_ZERO)
- memset(addr, 0, size);
+ memset(addr, 0, size);
return addr;
} else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);