summaryrefslogtreecommitdiff
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c98
1 files changed, 72 insertions, 26 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..f61a5707823a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -9,6 +9,7 @@
*
* DMA uncached mapping support.
*/
+#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/gfp.h>
@@ -157,9 +158,47 @@ struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
+static int __dma_supported(struct device *dev, u64 mask, bool warn)
+{
+ unsigned long max_dma_pfn;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) < max_pfn) {
+ if (warn) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ }
+ return 0;
+ }
+
+ max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ if (dma_to_pfn(dev, mask) < max_dma_pfn) {
+ if (warn)
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ max_dma_pfn + 1);
+ return 0;
+ }
+
+ return 1;
+}
+
static u64 get_coherent_dma_mask(struct device *dev)
{
- u64 mask = (u64)arm_dma_limit;
+ u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
@@ -173,12 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- if ((~mask) & (u64)arm_dma_limit) {
- dev_warn(dev, "coherent DMA mask %#llx is smaller "
- "than system GFP_DMA mask %#llx\n",
- mask, (u64)arm_dma_limit);
+ if (!__dma_supported(dev, mask, true))
return 0;
- }
}
return mask;
@@ -687,7 +722,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -700,7 +735,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
@@ -1007,9 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- if (mask < (u64)arm_dma_limit)
- return 0;
- return 1;
+ return __dma_supported(dev, mask, false);
}
EXPORT_SYMBOL(dma_supported);
@@ -1232,7 +1265,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
break;
len = (j - i) << PAGE_SHIFT;
- ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ ret = iommu_map(mapping->domain, iova, phys, len,
+ IOMMU_READ|IOMMU_WRITE);
if (ret < 0)
goto fail;
iova += len;
@@ -1431,6 +1465,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
GFP_KERNEL);
}
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+ int prot;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ return prot;
+}
+
/*
* Map a part of the scatter-gather list into contiguous io address space
*/
@@ -1444,6 +1499,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int ret = 0;
unsigned int count;
struct scatterlist *s;
+ int prot;
size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE;
@@ -1460,7 +1516,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
- ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, prot);
if (ret < 0)
goto fail;
count += len >> PAGE_SHIFT;
@@ -1665,19 +1723,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- switch (dir) {
- case DMA_BIDIRECTIONAL:
- prot = IOMMU_READ | IOMMU_WRITE;
- break;
- case DMA_TO_DEVICE:
- prot = IOMMU_READ;
- break;
- case DMA_FROM_DEVICE:
- prot = IOMMU_WRITE;
- break;
- default:
- prot = 0;
- }
+ prot = __dma_direction_to_prot(dir);
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
if (ret < 0)