|
Message-Id: <20200324203231.64324-5-keescook@chromium.org> Date: Tue, 24 Mar 2020 13:32:30 -0700 From: Kees Cook <keescook@...omium.org> To: Thomas Gleixner <tglx@...utronix.de> Cc: Kees Cook <keescook@...omium.org>, Elena Reshetova <elena.reshetova@...el.com>, x86@...nel.org, Andy Lutomirski <luto@...nel.org>, Peter Zijlstra <peterz@...radead.org>, Catalin Marinas <catalin.marinas@....com>, Will Deacon <will@...nel.org>, Mark Rutland <mark.rutland@....com>, Alexander Potapenko <glider@...gle.com>, Ard Biesheuvel <ard.biesheuvel@...aro.org>, Jann Horn <jannh@...gle.com>, "Perla, Enrico" <enrico.perla@...el.com>, kernel-hardening@...ts.openwall.com, linux-arm-kernel@...ts.infradead.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org Subject: [PATCH v2 4/5] x86/entry: Enable random_kstack_offset support Allow for a randomized stack offset on a per-syscall basis, with roughly 5 bits of entropy. Signed-off-by: Kees Cook <keescook@...omium.org> --- arch/x86/Kconfig | 1 + arch/x86/entry/common.c | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index beea77046f9b..b9d449581eb6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -150,6 +150,7 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_VMAP_STACK if X86_64 + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_WITHIN_STACK_FRAMES select HAVE_ASM_MODVERSIONS select HAVE_CMPXCHG_DOUBLE diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 9747876980b5..086d7af570af 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -26,6 +26,7 @@ #include <linux/livepatch.h> #include <linux/syscalls.h> #include <linux/uaccess.h> +#include <linux/randomize_kstack.h> #include <asm/desc.h> #include <asm/traps.h> @@ -189,6 +190,13 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) lockdep_assert_irqs_disabled(); lockdep_sys_exit(); + /* + * x86_64 stack alignment means 3 bits are ignored, so keep + * the top 5 bits. x86_32 needs only 2 bits of alignment, so + * the top 6 bits will be used. + */ + choose_random_kstack_offset(rdtsc() & 0xFF); + cached_flags = READ_ONCE(ti->flags); if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) @@ -283,6 +291,7 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) { struct thread_info *ti; + add_random_kstack_offset(); enter_from_user_mode(); local_irq_enable(); ti = current_thread_info(); @@ -355,6 +364,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) /* Handles int $0x80 */ __visible void do_int80_syscall_32(struct pt_regs *regs) { + add_random_kstack_offset(); enter_from_user_mode(); local_irq_enable(); do_syscall_32_irqs_on(regs); @@ -378,8 +388,8 @@ __visible long do_fast_syscall_32(struct pt_regs *regs) */ regs->ip = landing_pad; + add_random_kstack_offset(); enter_from_user_mode(); - local_irq_enable(); /* Fetch EBP from where the vDSO stashed it. */ -- 2.20.1
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.