summaryrefslogtreecommitdiff
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 14:01:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 14:01:03 -0500
commit497a5df7bf6ffd136ae21c49d1a01292930d7ca2 (patch)
tree774215d43cdc9fbd8660086d1801400163dcbd52 /arch/x86/xen/mmu.c
parent714d8e7e27197dd39b2550e762a6a6fcf397a471 (diff)
parent0b97b03d88b40bfbd7ff0e069186a137d9786d43 (diff)
Merge tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen features and fixes from David Vrabel: - use a single source list of hypercalls, generating other tables etc. at build time. - add a "Xen PV" APIC driver to support >255 VCPUs in PV guests. - significant performance improve to guest save/restore/migration. - scsiback/front save/restore support. - infrastructure for multi-page xenbus rings. - misc fixes. * tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/pci: Try harder to get PXM information for Xen xenbus_client: Extend interface to support multi-page ring xen-pciback: also support disabling of bus-mastering and memory-write-invalidate xen: support suspend/resume in pvscsi frontend xen: scsiback: add LUN of restored domain xen-scsiback: define a pr_fmt macro with xen-pvscsi xen/mce: fix up xen_late_init_mcelog() error handling xen/privcmd: improve performance of MMAPBATCH_V2 xen: unify foreign GFN map/unmap for auto-xlated physmap guests x86/xen/apic: WARN with details. x86/xen: Provide a "Xen PV" APIC driver to support >255 VCPUs xen/pciback: Don't print scary messages when unsupported by hypervisor. xen: use generated hypercall symbols in arch/x86/xen/xen-head.S xen: use generated hypervisor symbols in arch/x86/xen/trace.c xen: synchronize include/xen/interface/xen.h with xen xen: build infrastructure for generating hypercall depending symbols xen: balloon: Use static attribute groups for sysfs entries xen: pcpu: Use static attribute groups for sysfs entry
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c207
1 files changed, 83 insertions, 124 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 65083ad63b6f..dd151b2045b0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2436,99 +2436,11 @@ void __init xen_hvm_init_mmu_ops(void)
}
#endif
-#ifdef CONFIG_XEN_PVH
-/*
- * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
- * space creating new guest on pvh dom0 and needing to map domU pages.
- */
-static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
- unsigned int domid)
-{
- int rc, err = 0;
- xen_pfn_t gpfn = lpfn;
- xen_ulong_t idx = fgfn;
-
- struct xen_add_to_physmap_range xatp = {
- .domid = DOMID_SELF,
- .foreign_domid = domid,
- .size = 1,
- .space = XENMAPSPACE_gmfn_foreign,
- };
- set_xen_guest_handle(xatp.idxs, &idx);
- set_xen_guest_handle(xatp.gpfns, &gpfn);
- set_xen_guest_handle(xatp.errs, &err);
-
- rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
- if (rc < 0)
- return rc;
- return err;
-}
-
-static int xlate_remove_from_p2m(unsigned long spfn, int count)
-{
- struct xen_remove_from_physmap xrp;
- int i, rc;
-
- for (i = 0; i < count; i++) {
- xrp.domid = DOMID_SELF;
- xrp.gpfn = spfn+i;
- rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
- if (rc)
- break;
- }
- return rc;
-}
-
-struct xlate_remap_data {
- unsigned long fgfn; /* foreign domain's gfn */
- pgprot_t prot;
- domid_t domid;
- int index;
- struct page **pages;
-};
-
-static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
- void *data)
-{
- int rc;
- struct xlate_remap_data *remap = data;
- unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
- pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
-
- rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
- if (rc)
- return rc;
- native_set_pte(ptep, pteval);
-
- return 0;
-}
-
-static int xlate_remap_gfn_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long mfn,
- int nr, pgprot_t prot, unsigned domid,
- struct page **pages)
-{
- int err;
- struct xlate_remap_data pvhdata;
-
- BUG_ON(!pages);
-
- pvhdata.fgfn = mfn;
- pvhdata.prot = prot;
- pvhdata.domid = domid;
- pvhdata.index = 0;
- pvhdata.pages = pages;
- err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
- xlate_map_pte_fn, &pvhdata);
- flush_tlb_all();
- return err;
-}
-#endif
-
#define REMAP_BATCH_SIZE 16
struct remap_data {
- unsigned long mfn;
+ xen_pfn_t *mfn;
+ bool contiguous;
pgprot_t prot;
struct mmu_update *mmu_update;
};
@@ -2537,7 +2449,14 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data)
{
struct remap_data *rmd = data;
- pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot));
+ pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
+
+ /* If we have a contigious range, just update the mfn itself,
+ else update pointer to be "next mfn". */
+ if (rmd->contiguous)
+ (*rmd->mfn)++;
+ else
+ rmd->mfn++;
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
rmd->mmu_update->val = pte_val_ma(pte);
@@ -2546,26 +2465,26 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
return 0;
}
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
- unsigned long addr,
- xen_pfn_t mfn, int nr,
- pgprot_t prot, unsigned domid,
- struct page **pages)
-
+static int do_remap_mfn(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid,
+ struct page **pages)
{
+ int err = 0;
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
- int batch;
unsigned long range;
- int err = 0;
+ int mapped = 0;
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
if (xen_feature(XENFEAT_auto_translated_physmap)) {
#ifdef CONFIG_XEN_PVH
/* We need to update the local page tables and the xen HAP */
- return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
- domid, pages);
+ return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+ prot, domid, pages);
#else
return -EINVAL;
#endif
@@ -2573,9 +2492,15 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
rmd.mfn = mfn;
rmd.prot = prot;
+ /* We use the err_ptr to indicate if there we are doing a contigious
+ * mapping or a discontigious mapping. */
+ rmd.contiguous = !err_ptr;
while (nr) {
- batch = min(REMAP_BATCH_SIZE, nr);
+ int index = 0;
+ int done = 0;
+ int batch = min(REMAP_BATCH_SIZE, nr);
+ int batch_left = batch;
range = (unsigned long)batch << PAGE_SHIFT;
rmd.mmu_update = mmu_update;
@@ -2584,23 +2509,72 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
if (err)
goto out;
- err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
- if (err < 0)
- goto out;
+ /* We record the error for each page that gives an error, but
+ * continue mapping until the whole set is done */
+ do {
+ int i;
+
+ err = HYPERVISOR_mmu_update(&mmu_update[index],
+ batch_left, &done, domid);
+
+ /*
+ * @err_ptr may be the same buffer as @mfn, so
+ * only clear it after each chunk of @mfn is
+ * used.
+ */
+ if (err_ptr) {
+ for (i = index; i < index + done; i++)
+ err_ptr[i] = 0;
+ }
+ if (err < 0) {
+ if (!err_ptr)
+ goto out;
+ err_ptr[i] = err;
+ done++; /* Skip failed frame. */
+ } else
+ mapped += done;
+ batch_left -= done;
+ index += done;
+ } while (batch_left);
nr -= batch;
addr += range;
+ if (err_ptr)
+ err_ptr += batch;
}
-
- err = 0;
out:
xen_flush_tlb_all();
- return err;
+ return err < 0 ? err : mapped;
+}
+
+int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t mfn, int nr,
+ pgprot_t prot, unsigned domid,
+ struct page **pages)
+{
+ return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned domid, struct page **pages)
+{
+ /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
+ * and the consequences later is quite hard to detect what the actual
+ * cause of "wrong memory was mapped in".
+ */
+ BUG_ON(err_ptr == NULL);
+ return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
+
/* Returns: 0 success */
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
@@ -2609,22 +2583,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
return 0;
#ifdef CONFIG_XEN_PVH
- while (numpgs--) {
- /*
- * The mmu has already cleaned up the process mmu
- * resources at this point (lookup_address will return
- * NULL).
- */
- unsigned long pfn = page_to_pfn(pages[numpgs]);
-
- xlate_remove_from_p2m(pfn, 1);
- }
- /*
- * We don't need to flush tlbs because as part of
- * xlate_remove_from_p2m, the hypervisor will do tlb flushes
- * after removing the p2m entries from the EPT/NPT
- */
- return 0;
+ return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
#else
return -EINVAL;
#endif