|
Message-ID: <151785233719414@kroah.com> Date: Mon, 05 Feb 2018 09:38:57 -0800 From: <gregkh@...uxfoundation.org> To: luto@...nel.org,bp@...en8.de,gregkh@...uxfoundation.org,kernel-hardening@...ts.openwall.com,mingo@...nel.org,tglx@...utronix.de,torvalds@...ux-foundation.org Cc: <stable@...r.kernel.org>, <stable-commits@...r.kernel.org> Subject: Patch "x86/entry/64: Remove the SYSCALL64 fast path" has been added to the 4.15-stable tree This is a note to let you know that I've just added the patch titled x86/entry/64: Remove the SYSCALL64 fast path to the 4.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86entry64_Remove_the_SYSCALL64_fast_path.patch and it can be found in the queue-4.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@...r.kernel.org> know about it. Subject: x86/entry/64: Remove the SYSCALL64 fast path From: Andy Lutomirski luto@...nel.org Date: Sun Jan 28 10:38:49 2018 -0800 From: Andy Lutomirski luto@...nel.org commit 21d375b6b34ff511a507de27bf316b3dde6938d9 The SYCALLL64 fast path was a nice, if small, optimization back in the good old days when syscalls were actually reasonably fast. Now there is PTI to slow everything down, and indirect branches are verboten, making everything messier. The retpoline code in the fast path is particularly nasty. Just get rid of the fast path. The slow path is barely slower. [ tglx: Split out the 'push all extra regs' part ] Signed-off-by: Andy Lutomirski <luto@...nel.org> Signed-off-by: Thomas Gleixner <tglx@...utronix.de> Acked-by: Ingo Molnar <mingo@...nel.org> Cc: Borislav Petkov <bp@...en8.de> Cc: Linus Torvalds <torvalds@...ux-foundation.org> Cc: Kernel Hardening <kernel-hardening@...ts.openwall.com> Link: https://lkml.kernel.org/r/462dff8d4d64dfbfc851fbf3130641809d980ecd.1517164461.git.luto@kernel.org Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org> --- arch/x86/entry/entry_64.S | 117 -------------------------------------------- arch/x86/entry/syscall_64.c | 7 -- 2 files changed, 2 insertions(+), 122 deletions(-) --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -241,86 +241,11 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) TRACE_IRQS_OFF - /* - * If we need to do entry work or if we guess we'll need to do - * exit work, go straight to the slow path. - */ - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz entry_SYSCALL64_slow_path - -entry_SYSCALL_64_fastpath: - /* - * Easy case: enable interrupts and issue the syscall. If the syscall - * needs pt_regs, we'll call a stub that disables interrupts again - * and jumps to the slow path. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) -#if __SYSCALL_MASK == ~0 - cmpq $__NR_syscall_max, %rax -#else - andl $__SYSCALL_MASK, %eax - cmpl $__NR_syscall_max, %eax -#endif - ja 1f /* return -ENOSYS (already in pt_regs->ax) */ - movq %r10, %rcx - - /* - * This call instruction is handled specially in stub_ptregs_64. - * It might end up jumping to the slow path. If it jumps, RAX - * and all argument registers are clobbered. - */ -#ifdef CONFIG_RETPOLINE - movq sys_call_table(, %rax, 8), %rax - call __x86_indirect_thunk_rax -#else - call *sys_call_table(, %rax, 8) -#endif -.Lentry_SYSCALL_64_after_fastpath_call: - - movq %rax, RAX(%rsp) -1: - - /* - * If we get here, then we know that pt_regs is clean for SYSRET64. - * If we see that no exit work is required (which we are required - * to check with IRQs off), then we can go straight to SYSRET64. - */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - movq PER_CPU_VAR(current_task), %r11 - testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) - jnz 1f - - LOCKDEP_SYS_EXIT - TRACE_IRQS_ON /* user mode is traced as IRQs on */ - movq RIP(%rsp), %rcx - movq EFLAGS(%rsp), %r11 - addq $6*8, %rsp /* skip extra regs -- they were preserved */ - UNWIND_HINT_EMPTY - jmp .Lpop_c_regs_except_rcx_r11_and_sysret - -1: - /* - * The fast path looked good when we started, but something changed - * along the way and we need to switch to the slow path. Calling - * raise(3) will trigger this, for example. IRQs are off. - */ - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_ANY) - SAVE_EXTRA_REGS - movq %rsp, %rdi - call syscall_return_slowpath /* returns with IRQs disabled */ - jmp return_from_SYSCALL_64 - -entry_SYSCALL64_slow_path: /* IRQs are off. */ SAVE_EXTRA_REGS movq %rsp, %rdi call do_syscall_64 /* returns with IRQs disabled */ -return_from_SYSCALL_64: TRACE_IRQS_IRETQ /* we're about to change IF */ /* @@ -393,7 +318,6 @@ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY POP_EXTRA_REGS -.Lpop_c_regs_except_rcx_r11_and_sysret: popq %rsi /* skip r11 */ popq %r10 popq %r9 @@ -424,47 +348,6 @@ syscall_return_via_sysret: USERGS_SYSRET64 END(entry_SYSCALL_64) -ENTRY(stub_ptregs_64) - /* - * Syscalls marked as needing ptregs land here. - * If we are on the fast path, we need to save the extra regs, - * which we achieve by trying again on the slow path. If we are on - * the slow path, the extra regs are already saved. - * - * RAX stores a pointer to the C function implementing the syscall. - * IRQs are on. - */ - cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) - jne 1f - - /* - * Called from fast path -- disable IRQs again, pop return address - * and jump to slow path - */ - DISABLE_INTERRUPTS(CLBR_ANY) - TRACE_IRQS_OFF - popq %rax - UNWIND_HINT_REGS extra=0 - jmp entry_SYSCALL64_slow_path - -1: - JMP_NOSPEC %rax /* Called from C */ -END(stub_ptregs_64) - -.macro ptregs_stub func -ENTRY(ptregs_\func) - UNWIND_HINT_FUNC - leaq \func(%rip), %rax - jmp stub_ptregs_64 -END(ptregs_\func) -.endm - -/* Instantiate ptregs_stub for each ptregs-using syscall */ -#define __SYSCALL_64_QUAL_(sym) -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym -#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) -#include <asm/syscalls_64.h> - /* * %rdi: prev task * %rsi: next task --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -7,14 +7,11 @@ #include <asm/asm-offsets.h> #include <asm/syscall.h> -#define __SYSCALL_64_QUAL_(sym) sym -#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym - -#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); #include <asm/syscalls_64.h> #undef __SYSCALL_64 -#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym), +#define __SYSCALL_64(nr, sym, qual) [nr] = sym, extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); Patches currently in stable-queue which might be from luto@...nel.org are queue-4.15/objtool_Add_support_for_alternatives_at_the_end_of_a_section.patch queue-4.15/x86pti_Mark_constant_arrays_as___initconst.patch queue-4.15/x86speculation_Use_Indirect_Branch_Prediction_Barrier_in_context_switch.patch queue-4.15/x86spectre_Fix_spelling_mistake_vunerable-_vulnerable.patch queue-4.15/x86get_user_Use_pointer_masking_to_limit_speculation.patch queue-4.15/x86paravirt_Remove_noreplace-paravirt_cmdline_option.patch queue-4.15/KVM_VMX_Make_indirect_call_speculation_safe.patch queue-4.15/KVMVMX_Allow_direct_access_to_MSR_IA32_SPEC_CTRL.patch queue-4.15/x86entry64_Remove_the_SYSCALL64_fast_path.patch queue-4.15/KVMSVM_Allow_direct_access_to_MSR_IA32_SPEC_CTRL.patch queue-4.15/x86asm_Move_status_from_thread_struct_to_thread_info.patch queue-4.15/KVMx86_Add_IBPB_support.patch queue-4.15/KVMVMX_Emulate_MSR_IA32_ARCH_CAPABILITIES.patch queue-4.15/KVM_x86_Make_indirect_calls_in_emulator_speculation_safe.patch queue-4.15/x86entry64_Push_extra_regs_right_away.patch queue-4.15/objtool_Warn_on_stripped_section_symbol.patch queue-4.15/x86syscall_Sanitize_syscall_table_de-references_under_speculation.patch queue-4.15/objtool_Improve_retpoline_alternative_handling.patch
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.