summaryrefslogtreecommitdiff
path: root/arch/arm/xen/mm32.c
blob: a5a93fce913bf07d6aa6a69826ff95792974b0d5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#include <linux/cpu.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/highmem.h>

#include <xen/features.h>


/* functions called by SWIOTLB */

static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
	size_t size, enum dma_data_direction dir,
	void (*op)(const void *, size_t, int))
{
	unsigned long pfn;
	size_t left = size;

	pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
	offset %= PAGE_SIZE;

	do {
		size_t len = left;
		void *vaddr;
	
		if (!pfn_valid(pfn))
		{
			/* TODO: cache flush */
		} else {
			struct page *page = pfn_to_page(pfn);

			if (PageHighMem(page)) {
				if (len + offset > PAGE_SIZE)
					len = PAGE_SIZE - offset;

				if (cache_is_vipt_nonaliasing()) {
					vaddr = kmap_atomic(page);
					op(vaddr + offset, len, dir);
					kunmap_atomic(vaddr);
				} else {
					vaddr = kmap_high_get(page);
					if (vaddr) {
						op(vaddr + offset, len, dir);
						kunmap_high(page);
					}
				}
			} else {
				vaddr = page_address(page) + offset;
				op(vaddr, len, dir);
			}
		}

		offset = 0;
		pfn++;
		left -= len;
	} while (left);
}

static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{
	/* Cannot use __dma_page_dev_to_cpu because we don't have a
	 * struct page for handle */

	if (dir != DMA_TO_DEVICE)
		outer_inv_range(handle, handle + size);

	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
}

static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{

	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);

	if (dir == DMA_FROM_DEVICE) {
		outer_inv_range(handle, handle + size);
	} else {
		outer_clean_range(handle, handle + size);
	}
}

void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)

{
	if (!__generic_dma_ops(hwdev)->unmap_page)
		return;
	if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
		return;

	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
}

void xen_dma_sync_single_for_cpu(struct device *hwdev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
		return;
	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
}

void xen_dma_sync_single_for_device(struct device *hwdev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	if (!__generic_dma_ops(hwdev)->sync_single_for_device)
		return;
	__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
}

int __init xen_mm32_init(void)
{
	if (!xen_initial_domain())
		return 0;

	return 0;
}
arch_initcall(xen_mm32_init);