diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2019-06-14 16:26:50 -0400 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2019-06-14 16:26:50 -0400 |
commit | ca699d88fdbba0b809f564d198323ab03d5b4fd5 (patch) | |
tree | 8647bfcef73254a4ff84e708534e89f550b0f73f /arch/x86 | |
parent | 5b4910e057b97b241219a42bc32bc1caa605f05a (diff) | |
parent | 768292d053619b2725b846ed2bf556bf40f43de2 (diff) |
Merge tag 'v4.19.50' into v4.19-rt
This is the 4.19.50 stable release
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/entry/calling.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeature.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/jump_label.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/rmwcc.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 49 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace_64.S | 8 | ||||
-rw-r--r-- | arch/x86/kernel/jump_label.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 6 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 3 | ||||
-rw-r--r-- | arch/x86/lib/insn-eval.c | 47 | ||||
-rw-r--r-- | arch/x86/power/cpu.c | 10 | ||||
-rw-r--r-- | arch/x86/power/hibernate_64.c | 33 |
16 files changed, 136 insertions, 78 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index ab2071e40efe..ce0d0424a53d 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -305,7 +305,7 @@ vdso_install: archprepare: checkbin checkbin: -ifndef CC_HAVE_ASM_GOTO +ifndef CONFIG_CC_HAS_ASM_GOTO @echo Compiler lacks asm-goto support. @exit 1 endif diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 352e70cd33e8..e699b2041665 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -337,7 +337,7 @@ For 32-bit we have the following conventions - kernel is built with */ .macro CALL_enter_from_user_mode #ifdef CONFIG_CONTEXT_TRACKING -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 #endif call enter_from_user_mode diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index aced6c9290d6..ce95b8cbd229 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO) +#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO) /* * Workaround for the sake of BPF compilation which utilizes kernel diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 8c0de4282659..7010e1c594c4 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -2,19 +2,6 @@ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H -#ifndef HAVE_JUMP_LABEL -/* - * For better or for worse, if jump labels (the gcc extension) are missing, - * then the entire static branch patching infrastructure is compiled out. - * If that happens, the code in here will malfunction. Raise a compiler - * error instead. - * - * In theory, jump labels and the static branch patching infrastructure - * could be decoupled to fix this. - */ -#error asm/jump_label.h included on a non-jump-label kernel -#endif - #define JUMP_LABEL_NOP_SIZE 5 #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h index 4914a3e7c803..033dc7ca49e9 100644 --- a/arch/x86/include/asm/rmwcc.h +++ b/arch/x86/include/asm/rmwcc.h @@ -4,7 +4,7 @@ #define __CLOBBERS_MEM(clb...) "memory", ## clb -#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) +#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO) /* Use asm goto */ @@ -21,7 +21,7 @@ cc_label: \ #define __BINARY_RMWcc_ARG " %1, " -#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ /* Use flags output or a set instruction */ @@ -36,7 +36,7 @@ do { \ #define __BINARY_RMWcc_ARG " %2, " -#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ +#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */ #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 8824d01c0c35..da0b6bc090f3 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o dumpstack.o nmi.o obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o -obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o +obj-y += setup.o x86_init.o i8259.o irqinit.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o obj-$(CONFIG_X86_64) += sys_x86_64.o diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 4d2a401c178b..9f033dfd2766 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -752,18 +752,21 @@ union ftrace_op_code_union { } __attribute__((packed)); }; +#define RET_SIZE 1 + static unsigned long create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) { - unsigned const char *jmp; unsigned long start_offset; unsigned long end_offset; unsigned long op_offset; unsigned long offset; + unsigned long npages; unsigned long size; - unsigned long ip; + unsigned long retq; unsigned long *ptr; void *trampoline; + void *ip; /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; union ftrace_op_code_union op_ptr; @@ -783,27 +786,28 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* * Allocate enough size to store the ftrace_caller code, - * the jmp to ftrace_epilogue, as well as the address of - * the ftrace_ops this trampoline is used for. + * the iret , as well as the address of the ftrace_ops this + * trampoline is used for. */ - trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *)); + trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); if (!trampoline) return 0; - *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *); + *tramp_size = size + RET_SIZE + sizeof(void *); + npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); /* Copy ftrace_caller onto the trampoline memory */ ret = probe_kernel_read(trampoline, (void *)start_offset, size); - if (WARN_ON(ret < 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(ret < 0)) + goto fail; - ip = (unsigned long)trampoline + size; + ip = trampoline + size; - /* The trampoline ends with a jmp to ftrace_epilogue */ - jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_epilogue); - memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE); + /* The trampoline ends with ret(q) */ + retq = (unsigned long)ftrace_stub; + ret = probe_kernel_read(ip, (void *)retq, RET_SIZE); + if (WARN_ON(ret < 0)) + goto fail; /* * The address of the ftrace_ops that is used for this trampoline @@ -813,17 +817,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) * the global function_trace_op variable. */ - ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE); + ptr = (unsigned long *)(trampoline + size + RET_SIZE); *ptr = (unsigned long)ops; op_offset -= start_offset; memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); /* Are we pointing to the reference? */ - if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { - tramp_free(trampoline, *tramp_size); - return 0; - } + if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) + goto fail; /* Load the contents of ptr into the callback parameter */ offset = (unsigned long)ptr; @@ -837,7 +839,16 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* ALLOC_TRAMP flags lets us know we created it */ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; + /* + * Module allocation needs to be completed by making the page + * executable. The page is still writable, which is a security hazard, + * but anyhow ftrace breaks W^X completely. + */ + set_memory_x((unsigned long)trampoline, npages); return (unsigned long)trampoline; +fail: + tramp_free(trampoline, *tramp_size); + return 0; } static unsigned long calc_trampoline_call_offset(bool save_regs) diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 91b2cff4b79a..75f2b36b41a6 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -171,9 +171,6 @@ GLOBAL(ftrace_call) restore_mcount_regs /* - * The copied trampoline must call ftrace_epilogue as it - * still may need to call the function graph tracer. - * * The code up to this label is copied into trampolines so * think twice before adding any new code or changing the * layout here. @@ -185,7 +182,10 @@ GLOBAL(ftrace_graph_call) jmp ftrace_stub #endif -/* This is weak to keep gas from relaxing the jumps */ +/* + * This is weak to keep gas from relaxing the jumps. + * It is also used to copy the retq for trampolines. + */ WEAK(ftrace_stub) retq ENDPROC(ftrace_caller) diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index eeea935e9bb5..4c3d9a3d45b2 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -16,8 +16,6 @@ #include <asm/alternative.h> #include <asm/text-patching.h> -#ifdef HAVE_JUMP_LABEL - union jump_code_union { char code[JUMP_LABEL_NOP_SIZE]; struct { @@ -142,5 +140,3 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, if (jlstate == JL_STATE_UPDATE) __jump_label_transform(entry, type, text_poke_early, 1); } - -#endif diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 544bc2dfe408..e83a057564d1 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -431,8 +431,20 @@ void *alloc_insn_page(void) void *page; page = module_alloc(PAGE_SIZE); - if (page) - set_memory_ro((unsigned long)page & PAGE_MASK, 1); + if (!page) + return NULL; + + /* + * First make the page read-only, and only then make it executable to + * prevent it from being W+X in between. + */ + set_memory_ro((unsigned long)page, 1); + + /* + * TODO: Once additional kernel code protection mechanisms are set, ensure + * that the page was not maliciously altered and it is still zeroed. + */ + set_memory_x((unsigned long)page, 1); return page; } @@ -440,8 +452,12 @@ void *alloc_insn_page(void) /* Recover page to RW mode before releasing it */ void free_insn_page(void *page) { - set_memory_nx((unsigned long)page & PAGE_MASK, 1); - set_memory_rw((unsigned long)page & PAGE_MASK, 1); + /* + * First make the page non-executable, and only then make it writable to + * prevent it from being W+X in between. + */ + set_memory_nx((unsigned long)page, 1); + set_memory_rw((unsigned long)page, 1); module_memfree(page); } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2fb152d813c1..85e6d5620188 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -151,10 +151,10 @@ SECTIONS *(.text.__x86.indirect_thunk) __indirect_thunk_end = .; #endif - } :text = 0x9090 - /* End of text section */ - _etext = .; + /* End of text section */ + _etext = .; + } :text = 0x9090 NOTES :text :note diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 860bd271619d..4a688ef9e448 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -456,7 +456,7 @@ FOP_END; /* * XXX: inoutclob user must know where the argument is being expanded. - * Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault. + * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault. */ #define asm_safe(insn, inoutclob...) \ ({ \ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 94066fbfea6e..b2b11374c663 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2987,6 +2987,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_ID; + break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 9119d8e41f1f..87dcba101e56 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, } /** - * get_desc() - Obtain pointer to a segment descriptor + * get_desc() - Obtain contents of a segment descriptor + * @out: Segment descriptor contents on success * @sel: Segment selector * * Given a segment selector, obtain a pointer to the segment descriptor. @@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs, * * Returns: * - * Pointer to segment descriptor on success. + * True on success, false on failure. * * NULL on error. */ -static struct desc_struct *get_desc(unsigned short sel) +static bool get_desc(struct desc_struct *out, unsigned short sel) { struct desc_ptr gdt_desc = {0, 0}; unsigned long desc_base; #ifdef CONFIG_MODIFY_LDT_SYSCALL if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) { - struct desc_struct *desc = NULL; + bool success = false; struct ldt_struct *ldt; /* Bits [15:3] contain the index of the desired entry. */ @@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel) mutex_lock(¤t->active_mm->context.lock); ldt = current->active_mm->context.ldt; - if (ldt && sel < ldt->nr_entries) - desc = &ldt->entries[sel]; + if (ldt && sel < ldt->nr_entries) { + *out = ldt->entries[sel]; + success = true; + } mutex_unlock(¤t->active_mm->context.lock); - return desc; + return success; } #endif native_store_gdt(&gdt_desc); @@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel) desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK); if (desc_base > gdt_desc.size) - return NULL; + return false; - return (struct desc_struct *)(gdt_desc.address + desc_base); + *out = *(struct desc_struct *)(gdt_desc.address + desc_base); + return true; } /** @@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel) */ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) { - struct desc_struct *desc; + struct desc_struct desc; short sel; sel = get_segment_selector(regs, seg_reg_idx); @@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) if (!sel) return -1L; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return -1L; - return get_desc_base(desc); + return get_desc_base(&desc); } /** @@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) */ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) { - struct desc_struct *desc; + struct desc_struct desc; unsigned long limit; short sel; @@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) if (!sel) return 0; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return 0; /* @@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) * not tested when checking the segment limits. In practice, * this means that the segment ends in (limit << 12) + 0xfff. */ - limit = get_desc_limit(desc); - if (desc->g) + limit = get_desc_limit(&desc); + if (desc.g) limit = (limit << 12) + 0xfff; return limit; @@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx) */ int insn_get_code_seg_params(struct pt_regs *regs) { - struct desc_struct *desc; + struct desc_struct desc; short sel; if (v8086_mode(regs)) @@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_regs *regs) if (sel < 0) return sel; - desc = get_desc(sel); - if (!desc) + if (!get_desc(&desc, sel)) return -EINVAL; /* @@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_regs *regs) * determines whether a segment contains data or code. If this is a data * segment, return error. */ - if (!(desc->type & BIT(3))) + if (!(desc.type & BIT(3))) return -EINVAL; - switch ((desc->l << 1) | desc->d) { + switch ((desc.l << 1) | desc.d) { case 0: /* * Legacy mode. CS.L=0, CS.D=0. Address and operand size are * both 16-bit. diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index a7d966964c6f..513ce09e9950 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void) * address in its instruction pointer may not be possible to resolve * any more at that point (the page tables used by it previously may * have been overwritten by hibernate image data). + * + * First, make sure that we wake up all the potentially disabled SMT + * threads which have been initially brought up and then put into + * mwait/cpuidle sleep. + * Those will be put to proper (not interfering with hibernation + * resume) sleep afterwards, and the resumed kernel will decide itself + * what to do with them. */ + ret = cpuhp_smt_enable(); + if (ret) + return ret; smp_ops.play_dead = resume_play_dead; ret = disable_nonboot_cpus(); smp_ops.play_dead = play_dead; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index f8e3b668d20b..c9986041a5e1 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -13,6 +13,7 @@ #include <linux/suspend.h> #include <linux/scatterlist.h> #include <linux/kdebug.h> +#include <linux/cpu.h> #include <crypto/hash.h> @@ -363,3 +364,35 @@ int arch_hibernation_header_restore(void *addr) return 0; } + +int arch_resume_nosmt(void) +{ + int ret = 0; + /* + * We reached this while coming out of hibernation. This means + * that SMT siblings are sleeping in hlt, as mwait is not safe + * against control transition during resume (see comment in + * hibernate_resume_nonboot_cpu_disable()). + * + * If the resumed kernel has SMT disabled, we have to take all the + * SMT siblings out of hlt, and offline them again so that they + * end up in mwait proper. + * + * Called with hotplug disabled. + */ + cpu_hotplug_enable(); + if (cpu_smt_control == CPU_SMT_DISABLED || + cpu_smt_control == CPU_SMT_FORCE_DISABLED) { + enum cpuhp_smt_control old = cpu_smt_control; + + ret = cpuhp_smt_enable(); + if (ret) + goto out; + ret = cpuhp_smt_disable(old); + if (ret) + goto out; + } +out: + cpu_hotplug_disable(); + return ret; +} |