|
Message-Id: <20170810172615.51965-9-thgarnie@google.com> Date: Thu, 10 Aug 2017 10:26:00 -0700 From: Thomas Garnier <thgarnie@...gle.com> To: Herbert Xu <herbert@...dor.apana.org.au>, "David S . Miller" <davem@...emloft.net>, Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, "H . Peter Anvin" <hpa@...or.com>, Peter Zijlstra <peterz@...radead.org>, Josh Poimboeuf <jpoimboe@...hat.com>, Arnd Bergmann <arnd@...db.de>, Thomas Garnier <thgarnie@...gle.com>, Matthias Kaehlcke <mka@...omium.org>, Boris Ostrovsky <boris.ostrovsky@...cle.com>, Juergen Gross <jgross@...e.com>, Paolo Bonzini <pbonzini@...hat.com>, Radim Krčmář <rkrcmar@...hat.com>, Joerg Roedel <joro@...tes.org>, Tom Lendacky <thomas.lendacky@....com>, Andy Lutomirski <luto@...nel.org>, Borislav Petkov <bp@...e.de>, Brian Gerst <brgerst@...il.com>, "Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>, "Rafael J . Wysocki" <rjw@...ysocki.net>, Len Brown <len.brown@...el.com>, Pavel Machek <pavel@....cz>, Tejun Heo <tj@...nel.org>, Christoph Lameter <cl@...ux.com>, Paul Gortmaker <paul.gortmaker@...driver.com>, Chris Metcalf <cmetcalf@...lanox.com>, Andrew Morton <akpm@...ux-foundation.org>, "Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>, Nicolas Pitre <nicolas.pitre@...aro.org>, Christopher Li <sparse@...isli.org>, "Rafael J . Wysocki" <rafael.j.wysocki@...el.com>, Lukas Wunner <lukas@...ner.de>, Mika Westerberg <mika.westerberg@...ux.intel.com>, Dou Liyang <douly.fnst@...fujitsu.com>, Daniel Borkmann <daniel@...earbox.net>, Alexei Starovoitov <ast@...nel.org>, Masahiro Yamada <yamada.masahiro@...ionext.com>, Markus Trippelsdorf <markus@...ppelsdorf.de>, Steven Rostedt <rostedt@...dmis.org>, Kees Cook <keescook@...omium.org>, Rik van Riel <riel@...hat.com>, David Howells <dhowells@...hat.com>, Waiman Long <longman@...hat.com>, Kyle Huey <me@...ehuey.com>, Peter Foley <pefoley2@...oley.com>, Tim Chen <tim.c.chen@...ux.intel.com>, Catalin Marinas <catalin.marinas@....com>, Ard Biesheuvel <ard.biesheuvel@...aro.org>, Michal Hocko <mhocko@...e.com>, Matthew Wilcox <mawilcox@...rosoft.com>, "H . J . Lu" <hjl.tools@...il.com>, Paul Bolle <pebolle@...cali.nl>, Rob Landley <rob@...dley.net>, Baoquan He <bhe@...hat.com>, Daniel Micay <danielmicay@...il.com> Cc: x86@...nel.org, linux-crypto@...r.kernel.org, linux-kernel@...r.kernel.org, xen-devel@...ts.xenproject.org, kvm@...r.kernel.org, linux-pm@...r.kernel.org, linux-arch@...r.kernel.org, linux-sparse@...r.kernel.org, kernel-hardening@...ts.openwall.com Subject: [RFC v2 08/23] x86/entry/64: Adapt assembly for PIE support Change the assembly code to use only relative references of symbols for the kernel to be PIE compatible. Position Independent Executable (PIE) support will allow to extended the KASLR randomization range below the -2G memory limit. Signed-off-by: Thomas Garnier <thgarnie@...gle.com> --- arch/x86/entry/entry_64.S | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index daf8936d0628..a3967a2af6ec 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -199,12 +199,15 @@ entry_SYSCALL_64_fastpath: ja 1f /* return -ENOSYS (already in pt_regs->ax) */ movq %r10, %rcx + /* Ensures the call is position independent */ + leaq sys_call_table(%rip), %r11 + /* * This call instruction is handled specially in stub_ptregs_64. * It might end up jumping to the slow path. If it jumps, RAX * and all argument registers are clobbered. */ - call *sys_call_table(, %rax, 8) + call *(%r11, %rax, 8) .Lentry_SYSCALL_64_after_fastpath_call: movq %rax, RAX(%rsp) @@ -339,7 +342,8 @@ ENTRY(stub_ptregs_64) * RAX stores a pointer to the C function implementing the syscall. * IRQs are on. */ - cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) + leaq .Lentry_SYSCALL_64_after_fastpath_call(%rip), %r11 + cmpq %r11, (%rsp) jne 1f /* @@ -1210,7 +1214,8 @@ ENTRY(error_entry) movl %ecx, %eax /* zero extend */ cmpq %rax, RIP+8(%rsp) je .Lbstep_iret - cmpq $.Lgs_change, RIP+8(%rsp) + leaq .Lgs_change(%rip), %rcx + cmpq %rcx, RIP+8(%rsp) jne .Lerror_entry_done /* @@ -1430,10 +1435,10 @@ ENTRY(nmi) * resume the outer NMI. */ - movq $repeat_nmi, %rdx + leaq repeat_nmi(%rip), %rdx cmpq 8(%rsp), %rdx ja 1f - movq $end_repeat_nmi, %rdx + leaq end_repeat_nmi(%rip), %rdx cmpq 8(%rsp), %rdx ja nested_nmi_out 1: @@ -1487,7 +1492,8 @@ nested_nmi: pushq %rdx pushfq pushq $__KERNEL_CS - pushq $repeat_nmi + leaq repeat_nmi(%rip), %rdx + pushq %rdx /* Put stack back */ addq $(6*8), %rsp @@ -1526,7 +1532,9 @@ first_nmi: addq $8, (%rsp) /* Fix up RSP */ pushfq /* RFLAGS */ pushq $__KERNEL_CS /* CS */ - pushq $1f /* RIP */ + pushq %rax /* Support Position Independent Code */ + leaq 1f(%rip), %rax /* RIP */ + xchgq %rax, (%rsp) /* Restore RAX, put 1f */ INTERRUPT_RETURN /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: -- 2.14.0.434.g98096fd7a8-goog
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.