summaryrefslogtreecommitdiff
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c226
1 files changed, 122 insertions, 104 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1b79ca67392f..be99357d238c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,8 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/swapops.h>
+#include <linux/ksm.h>
+#include <linux/mman.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
break;
/* Walk the process page table, lock and get pte pointer */
ptep = get_locked_pte(gmap->mm, addr, &ptl);
- if (unlikely(!ptep))
- continue;
+ VM_BUG_ON(!ptep);
/* Set notification bit in the pgste of the pte */
entry = *ptep;
if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
gaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
- spin_unlock(ptl);
+ pte_unmap_unlock(ptep, ptl);
}
up_read(&gmap->mm->mmap_sem);
return rc;
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table)
__free_page(page);
}
-static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end, bool init_skey)
-{
- pte_t *start_pte, *pte;
- spinlock_t *ptl;
- pgste_t pgste;
-
- start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- pte = start_pte;
- do {
- pgste = pgste_get_lock(pte);
- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
- if (init_skey) {
- unsigned long address;
-
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
- PGSTE_GR_BIT | PGSTE_GC_BIT);
-
- /* skip invalid and not writable pages */
- if (pte_val(*pte) & _PAGE_INVALID ||
- !(pte_val(*pte) & _PAGE_WRITE)) {
- pgste_set_unlock(pte, pgste);
- continue;
- }
-
- address = pte_val(*pte) & PAGE_MASK;
- page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
- }
- pgste_set_unlock(pte, pgste);
- } while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap_unlock(start_pte, ptl);
-
- return addr;
-}
-
-static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end, bool init_skey)
-{
- unsigned long next;
- pmd_t *pmd;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- if (pmd_none_or_clear_bad(pmd))
- continue;
- next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
- } while (pmd++, addr = next, addr != end);
-
- return addr;
-}
-
-static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end, bool init_skey)
-{
- unsigned long next;
- pud_t *pud;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
- } while (pud++, addr = next, addr != end);
-
- return addr;
-}
-
-void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
- unsigned long end, bool init_skey)
-{
- unsigned long addr, next;
- pgd_t *pgd;
-
- down_write(&mm->mmap_sem);
- if (init_skey && mm_use_skey(mm))
- goto out_up;
- addr = start;
- pgd = pgd_offset(mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
- } while (pgd++, addr = next, addr != end);
- if (init_skey)
- current->mm->context.use_skey = 1;
-out_up:
- up_write(&mm->mmap_sem);
-}
-EXPORT_SYMBOL(page_table_reset_pgste);
-
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq)
{
@@ -936,7 +844,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
down_read(&mm->mmap_sem);
retry:
- ptep = get_locked_pte(current->mm, addr, &ptl);
+ ptep = get_locked_pte(mm, addr, &ptl);
if (unlikely(!ptep)) {
up_read(&mm->mmap_sem);
return -EFAULT;
@@ -980,6 +888,45 @@ retry:
}
EXPORT_SYMBOL(set_guest_storage_key);
+unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+{
+ spinlock_t *ptl;
+ pgste_t pgste;
+ pte_t *ptep;
+ uint64_t physaddr;
+ unsigned long key = 0;
+
+ down_read(&mm->mmap_sem);
+ ptep = get_locked_pte(mm, addr, &ptl);
+ if (unlikely(!ptep)) {
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+ }
+ pgste = pgste_get_lock(ptep);
+
+ if (pte_val(*ptep) & _PAGE_INVALID) {
+ key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
+ key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
+ key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
+ key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
+ } else {
+ physaddr = pte_val(*ptep) & PAGE_MASK;
+ key = page_get_storage_key(physaddr);
+
+ /* Reflect guest's logical view, not physical */
+ if (pgste_val(pgste) & PGSTE_GR_BIT)
+ key |= _PAGE_REFERENCED;
+ if (pgste_val(pgste) & PGSTE_GC_BIT)
+ key |= _PAGE_CHANGED;
+ }
+
+ pgste_set_unlock(ptep, pgste);
+ pte_unmap_unlock(ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return key;
+}
+EXPORT_SYMBOL(get_guest_storage_key);
+
#else /* CONFIG_PGSTE */
static inline int page_table_with_pgste(struct page *page)
@@ -992,11 +939,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
return NULL;
}
-void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
- unsigned long end, bool init_skey)
-{
-}
-
static inline void page_table_free_pgste(unsigned long *table)
{
}
@@ -1347,13 +1289,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
* Enable storage key handling from now on and initialize the storage
* keys with the default key.
*/
-void s390_enable_skey(void)
+static int __s390_enable_skey(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
{
- page_table_reset_pgste(current->mm, 0, TASK_SIZE, true);
+ unsigned long ptev;
+ pgste_t pgste;
+
+ pgste = pgste_get_lock(pte);
+ /*
+ * Remove all zero page mappings,
+ * after establishing a policy to forbid zero page mappings
+ * following faults for that page will get fresh anonymous pages
+ */
+ if (is_zero_pfn(pte_pfn(*pte))) {
+ ptep_flush_direct(walk->mm, addr, pte);
+ pte_val(*pte) = _PAGE_INVALID;
+ }
+ /* Clear storage key */
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
+ PGSTE_GR_BIT | PGSTE_GC_BIT);
+ ptev = pte_val(*pte);
+ if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
+ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
+ pgste_set_unlock(pte, pgste);
+ return 0;
+}
+
+int s390_enable_skey(void)
+{
+ struct mm_walk walk = { .pte_entry = __s390_enable_skey };
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int rc = 0;
+
+ down_write(&mm->mmap_sem);
+ if (mm_use_skey(mm))
+ goto out_up;
+
+ mm->context.use_skey = 1;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ MADV_UNMERGEABLE, &vma->vm_flags)) {
+ mm->context.use_skey = 0;
+ rc = -ENOMEM;
+ goto out_up;
+ }
+ }
+ mm->def_flags &= ~VM_MERGEABLE;
+
+ walk.mm = mm;
+ walk_page_range(0, TASK_SIZE, &walk);
+
+out_up:
+ up_write(&mm->mmap_sem);
+ return rc;
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
/*
+ * Reset CMMA state, make all pages stable again.
+ */
+static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgste_t pgste;
+
+ pgste = pgste_get_lock(pte);
+ pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+ pgste_set_unlock(pte, pgste);
+ return 0;
+}
+
+void s390_reset_cmma(struct mm_struct *mm)
+{
+ struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
+
+ down_write(&mm->mmap_sem);
+ walk.mm = mm;
+ walk_page_range(0, TASK_SIZE, &walk);
+ up_write(&mm->mmap_sem);
+}
+EXPORT_SYMBOL_GPL(s390_reset_cmma);
+
+/*
* Test and reset if a guest page is dirty
*/
bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)