summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/traps.c20
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/xen/setup.c65
3 files changed, 41 insertions, 49 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ade185a46b1d..679302c312f8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -109,6 +109,12 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
preempt_count_dec();
}
+/*
+ * In IST context, we explicitly disable preemption. This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
void ist_enter(struct pt_regs *regs)
{
if (user_mode(regs)) {
@@ -123,13 +129,7 @@ void ist_enter(struct pt_regs *regs)
rcu_nmi_enter();
}
- /*
- * We are atomic because we're on the IST stack; or we're on
- * x86_32, in which case we still shouldn't schedule; or we're
- * on x86_64 and entered from user mode, in which case we're
- * still atomic unless ist_begin_non_atomic is called.
- */
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
/* This code is a bit fragile. Test it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -137,7 +137,7 @@ void ist_enter(struct pt_regs *regs)
void ist_exit(struct pt_regs *regs)
{
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
if (!user_mode(regs))
rcu_nmi_exit();
@@ -168,7 +168,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
BUG_ON((unsigned long)(current_top_of_stack() -
current_stack_pointer()) >= THREAD_SIZE);
- preempt_count_sub(HARDIRQ_OFFSET);
+ preempt_enable_no_resched();
}
/**
@@ -178,7 +178,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
*/
void ist_end_non_atomic(void)
{
- preempt_count_add(HARDIRQ_OFFSET);
+ preempt_disable();
}
static nokprobe_inline int
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 605cea75eb0d..be222666b1c2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3014,6 +3014,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
if (dbgregs->flags)
return -EINVAL;
+ if (dbgregs->dr6 & ~0xffffffffull)
+ return -EINVAL;
+ if (dbgregs->dr7 & ~0xffffffffull)
+ return -EINVAL;
+
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
kvm_update_dr0123(vcpu);
vcpu->arch.dr6 = dbgregs->dr6;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 7ab29518a3b9..e345891450c3 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -393,6 +393,9 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
+ if (remap_pfn == 0)
+ remap_pfn = nr_pages;
+
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i;
@@ -438,17 +441,29 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
return remap_pfn;
}
-static void __init xen_set_identity_and_remap(unsigned long nr_pages)
+static unsigned long __init xen_count_remap_pages(
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long remap_pages)
+{
+ if (start_pfn >= nr_pages)
+ return remap_pages;
+
+ return remap_pages + min(end_pfn, nr_pages) - start_pfn;
+}
+
+static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
+ unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned long nr_pages, unsigned long last_val))
{
phys_addr_t start = 0;
- unsigned long last_pfn = nr_pages;
+ unsigned long ret_val = 0;
const struct e820entry *entry = xen_e820_map;
int i;
/*
* Combine non-RAM regions and gaps until a RAM region (or the
- * end of the map) is reached, then set the 1:1 map and
- * remap the memory in those non-RAM regions.
+ * end of the map) is reached, then call the provided function
+ * to perform its duty on the non-RAM region.
*
* The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1
@@ -466,14 +481,13 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages)
end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn)
- last_pfn = xen_set_identity_and_remap_chunk(
- start_pfn, end_pfn, nr_pages,
- last_pfn);
+ ret_val = func(start_pfn, end_pfn, nr_pages,
+ ret_val);
start = end;
}
}
- pr_info("Released %ld page(s)\n", xen_released_pages);
+ return ret_val;
}
/*
@@ -596,35 +610,6 @@ static void __init xen_ignore_unusable(void)
}
}
-static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
-{
- unsigned long extra = 0;
- unsigned long start_pfn, end_pfn;
- const struct e820entry *entry = xen_e820_map;
- int i;
-
- end_pfn = 0;
- for (i = 0; i < xen_e820_map_entries; i++, entry++) {
- start_pfn = PFN_DOWN(entry->addr);
- /* Adjacent regions on non-page boundaries handling! */
- end_pfn = min(end_pfn, start_pfn);
-
- if (start_pfn >= max_pfn)
- return extra + max_pfn - end_pfn;
-
- /* Add any holes in map to result. */
- extra += start_pfn - end_pfn;
-
- end_pfn = PFN_UP(entry->addr + entry->size);
- end_pfn = min(end_pfn, max_pfn);
-
- if (entry->type != E820_RAM)
- extra += end_pfn - start_pfn;
- }
-
- return extra;
-}
-
bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{
struct e820entry *entry;
@@ -804,7 +789,7 @@ char * __init xen_memory_setup(void)
max_pages = xen_get_max_pages();
/* How many extra pages do we need due to remapping? */
- max_pages += xen_count_remap_pages(max_pfn);
+ max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
if (max_pages > max_pfn)
extra_pages += max_pages - max_pfn;
@@ -922,7 +907,9 @@ char * __init xen_memory_setup(void)
* Set identity map on non-RAM pages and prepare remapping the
* underlying RAM.
*/
- xen_set_identity_and_remap(max_pfn);
+ xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
+
+ pr_info("Released %ld page(s)\n", xen_released_pages);
return "Xen";
}