summaryrefslogtreecommitdiff
path: root/include/asm-s390/hugetlb.h
blob: 670a1d1745d271e6e89a8246e6432c0c281b3d94 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
/*
 *  IBM System z Huge TLB Page Support for Kernel.
 *
 *    Copyright IBM Corp. 2008
 *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
 */

#ifndef _ASM_S390_HUGETLB_H
#define _ASM_S390_HUGETLB_H

#include <asm/page.h>
#include <asm/pgtable.h>


#define is_hugepage_only_range(mm, addr, len)	0
#define hugetlb_free_pgd_range			free_pgd_range

void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t pte);

/*
 * If the arch doesn't supply something else, assume that hugepage
 * size aligned regions are ok without further preparation.
 */
static inline int prepare_hugepage_range(struct file *file,
			unsigned long addr, unsigned long len)
{
	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (addr & ~HPAGE_MASK)
		return -EINVAL;
	return 0;
}

#define hugetlb_prefault_arch_hook(mm)		do { } while (0)

int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);

static inline pte_t pte_mkhuge(pte_t pte)
{
	/*
	 * PROT_NONE needs to be remapped from the pte type to the ste type.
	 * The HW invalid bit is also different for pte and ste. The pte
	 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
	 * bit, so we don't have to clear it.
	 */
	if (pte_val(pte) & _PAGE_INVALID) {
		if (pte_val(pte) & _PAGE_SWT)
			pte_val(pte) |= _HPAGE_TYPE_NONE;
		pte_val(pte) |= _SEGMENT_ENTRY_INV;
	}
	/*
	 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
	 * table entry.
	 */
	pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
	/*
	 * Also set the change-override bit because we don't need dirty bit
	 * tracking for hugetlbfs pages.
	 */
	pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
	return pte;
}

static inline pte_t huge_pte_wrprotect(pte_t pte)
{
	pte_val(pte) |= _PAGE_RO;
	return pte;
}

static inline int huge_pte_none(pte_t pte)
{
	return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
		!(pte_val(pte) & _SEGMENT_ENTRY_RO);
}

static inline pte_t huge_ptep_get(pte_t *ptep)
{
	pte_t pte = *ptep;
	unsigned long mask;

	if (!MACHINE_HAS_HPAGE) {
		ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
		if (ptep) {
			mask = pte_val(pte) &
				(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
			pte = pte_mkhuge(*ptep);
			pte_val(pte) |= mask;
		}
	}
	return pte;
}

static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep)
{
	pte_t pte = huge_ptep_get(ptep);

	pmd_clear((pmd_t *) ptep);
	return pte;
}

static inline void __pmd_csp(pmd_t *pmdp)
{
	register unsigned long reg2 asm("2") = pmd_val(*pmdp);
	register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
					       _SEGMENT_ENTRY_INV;
	register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;

	asm volatile(
		"	csp %1,%3"
		: "=m" (*pmdp)
		: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
	pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
}

static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
{
	unsigned long sto = (unsigned long) pmdp -
				pmd_index(address) * sizeof(pmd_t);

	if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
		asm volatile(
			"	.insn	rrf,0xb98e0000,%2,%3,0,0"
			: "=m" (*pmdp)
			: "m" (*pmdp), "a" (sto),
			  "a" ((address & HPAGE_MASK))
		);
	}
	pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
}

static inline void huge_ptep_invalidate(struct mm_struct *mm,
					unsigned long address, pte_t *ptep)
{
	pmd_t *pmdp = (pmd_t *) ptep;

	if (!MACHINE_HAS_IDTE) {
		__pmd_csp(pmdp);
		if (mm->context.noexec) {
			pmdp = get_shadow_table(pmdp);
			__pmd_csp(pmdp);
		}
		return;
	}

	__pmd_idte(address, pmdp);
	if (mm->context.noexec) {
		pmdp = get_shadow_table(pmdp);
		__pmd_idte(address, pmdp);
	}
	return;
}

#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({									    \
	int __changed = !pte_same(huge_ptep_get(__ptep), __entry);	    \
	if (__changed) {						    \
		huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep);	    \
		set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);   \
	}								    \
	__changed;							    \
})

#define huge_ptep_set_wrprotect(__mm, __addr, __ptep)			\
({									\
	pte_t __pte = huge_ptep_get(__ptep);				\
	if (pte_write(__pte)) {						\
		if (atomic_read(&(__mm)->mm_users) > 1 ||		\
		    (__mm) != current->active_mm)			\
			huge_ptep_invalidate(__mm, __addr, __ptep);	\
		set_huge_pte_at(__mm, __addr, __ptep,			\
				huge_pte_wrprotect(__pte));		\
	}								\
})

static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	huge_ptep_invalidate(vma->vm_mm, address, ptep);
}

#endif /* _ASM_S390_HUGETLB_H */