From a6df13631781815752638547a49222610929f89e Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Fri, 21 Nov 2014 11:07:39 +0000 Subject: xen/arm/arm64: introduce xen_arch_need_swiotlb commit a4dba130891271084344c12537731542ec77cb85 upstream. Introduce an arch specific function to find out whether a particular dma mapping operation needs to bounce on the swiotlb buffer. On ARM and ARM64, if the page involved is a foreign page and the device is not coherent, we need to bounce because at unmap time we cannot execute any required cache maintenance operations (we don't know how to find the pfn from the mfn). No change of behaviour for x86. Signed-off-by: Stefano Stabellini Reviewed-by: David Vrabel Reviewed-by: Catalin Marinas Acked-by: Ian Campbell Acked-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/xen/page.h | 4 ++++ arch/arm/xen/mm.c | 7 +++++++ arch/x86/include/asm/xen/page.h | 7 +++++++ drivers/xen/swiotlb-xen.c | 5 ++++- 4 files changed, 22 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 135c24a5ba26..68c739b3fdf4 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) #define xen_unmap(cookie) iounmap((cookie)) +bool xen_arch_need_swiotlb(struct device *dev, + unsigned long pfn, + unsigned long mfn); + #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index b0e77de99148..f8a576b1d9bb 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -16,6 +16,13 @@ #include #include +bool xen_arch_need_swiotlb(struct device *dev, + unsigned long pfn, + unsigned long mfn) +{ + return (pfn != mfn); +} + int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, unsigned int address_bits, dma_addr_t *dma_handle) diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index c949923a5668..f58ef6c0613b 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr); #define xen_remap(cookie, size) ioremap((cookie), (size)); #define xen_unmap(cookie) iounmap((cookie)) +static inline bool xen_arch_need_swiotlb(struct device *dev, + unsigned long pfn, + unsigned long mfn) +{ + return false; +} + #endif /* _ASM_X86_XEN_PAGE_H */ diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index f3a9d831d0f9..c9d0d5a0e662 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -397,7 +397,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * buffering it. */ if (dma_capable(dev, dev_addr, size) && - !range_straddles_page_boundary(phys, size) && !swiotlb_force) { + !range_straddles_page_boundary(phys, size) && + !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) && + !swiotlb_force) { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed * by the function. */ @@ -555,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, dma_addr_t dev_addr = xen_phys_to_bus(paddr); if (swiotlb_force || + xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) || !dma_capable(hwdev, dev_addr, sg->length) || range_straddles_page_boundary(paddr, sg->length)) { phys_addr_t map = swiotlb_tbl_map_single(hwdev, -- cgit v1.2.3