|
Message-ID: <20170114001721.GA124667@beast> Date: Fri, 13 Jan 2017 16:17:21 -0800 From: Kees Cook <keescook@...omium.org> To: kernel-hardening@...ts.openwall.com Cc: x86@...nel.org, PaX Team <pageexec@...email.hu>, Emese Revfy <re.emese@...il.com>, Andy Lutomirski <luto@...nel.org>, linux-kernel@...r.kernel.org Subject: [RFC] gcc-plugins: Add initial x86_64 kernexec plugin This plugin, ported from PaX, instruments C function call sites (and returns) to set the high bit of the instruction pointer to force any attempts to execute userspace memory into the faulting non-canonical address range. This can be thought of as a weak form of SMEP emulation. For function pointer calls, the compiler attempts an optimization of "set high bit", which is an "or" when registers are available: 3bf: 48 b8 00 00 00 00 00 movabs $0x8000000000000000,%rax 3c6: 00 00 80 3c9: 48 09 d8 or %rbx,%rax 3cc: ff d0 callq *%rax And btsq for returns: 36: 48 0f ba 2c 24 3f btsq $0x3f,(%rsp) 3c: c3 retq Note: assembly instrumentation still needs to be ported from PaX. Without this, there is significant coverage missing. Signed-off-by: Kees Cook <keescook@...omium.org> --- arch/Kconfig | 17 ++ arch/x86/include/asm/module.h | 9 +- scripts/Makefile.gcc-plugins | 10 +- scripts/gcc-plugins/kernexec_plugin.c | 405 ++++++++++++++++++++++++++++++++++ scripts/link-vmlinux.sh | 2 +- 5 files changed, 439 insertions(+), 4 deletions(-) create mode 100644 scripts/gcc-plugins/kernexec_plugin.c diff --git a/arch/Kconfig b/arch/Kconfig index 99839c23d453..1181cb8b7154 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -410,6 +410,23 @@ config GCC_PLUGIN_LATENT_ENTROPY * https://grsecurity.net/ * https://pax.grsecurity.net/ +config GCC_PLUGIN_KERNEXEC + bool "Provide weak SMEP emulation by instrumenting function calls" + depends on GCC_PLUGINS && X86_64 + help + For x86_64 CPUs that lack Supervisor Mode Execute Prevention (SMEP), + the kernel can be instrumented so that C call sites set the high + bit on the target address. This means that attempts to execute + userspace memory will force the execution to land in the + non-canonical memory region and trigger a fault (instead of letting + an attacker execute userspace memory). + + For systems with SMEP, select "n"; this option is redundant. + + This plugin was ported from grsecurity/PaX. More information at: + * https://grsecurity.net/ + * https://pax.grsecurity.net/ + config HAVE_CC_STACKPROTECTOR bool help diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h index e3b7819caeef..87eee77daed3 100644 --- a/arch/x86/include/asm/module.h +++ b/arch/x86/include/asm/module.h @@ -5,6 +5,7 @@ #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ +#define MODULE_PROC_FAMILY "" #elif defined CONFIG_M486 #define MODULE_PROC_FAMILY "486 " #elif defined CONFIG_M586 @@ -57,8 +58,12 @@ #error unknown processor family #endif -#ifdef CONFIG_X86_32 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY +#ifdef CONFIG_GCC_PLUGIN_KERNEXEC +#define MODULE_GCC_PLUGIN_KERNEXEC "KERNEXEC_BTS " +#else +#define MODULE_GCC_PLUGIN_KERNEXEC "" #endif +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GCC_PLUGIN_KERNEXEC + #endif /* _ASM_X86_MODULE_H */ diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins index 060d2cb373db..fd666cfd3ae3 100644 --- a/scripts/Makefile.gcc-plugins +++ b/scripts/Makefile.gcc-plugins @@ -25,9 +25,16 @@ ifdef CONFIG_GCC_PLUGINS endif endif + gcc-plugin-$(CONFIG_GCC_PLUGIN_KERNEXEC) += kernexec_plugin.so + gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_KERNEXEC) += -DKERNEXEC_PLUGIN + gcc-plugin-aflags-$(CONFIG_GCC_PLUGIN_KERNEXEC) += -fplugin=$(objtree)/scripts/gcc-plugins/kernexec_plugin.so + gcc-plugin-aflags-$(CONFIG_GCC_PLUGIN_KERNEXEC) += -DKERNEXEC_PLUGIN + GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y)) + GCC_PLUGINS_AFLAGS := $(gcc-plugin-aflags-y) - export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR + export PLUGINCC GCC_PLUGIN GCC_PLUGIN_SUBDIR + export GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS export SANCOV_PLUGIN DISABLE_LATENT_ENTROPY_PLUGIN ifneq ($(PLUGINCC),) @@ -36,6 +43,7 @@ ifdef CONFIG_GCC_PLUGINS endif KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) + KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) GCC_PLUGIN := $(gcc-plugin-y) GCC_PLUGIN_SUBDIR := $(gcc-plugin-subdir-y) endif diff --git a/scripts/gcc-plugins/kernexec_plugin.c b/scripts/gcc-plugins/kernexec_plugin.c new file mode 100644 index 000000000000..9ee027dc2cb3 --- /dev/null +++ b/scripts/gcc-plugins/kernexec_plugin.c @@ -0,0 +1,405 @@ +/* + * Copyright 2011-2017 by the PaX Team <pageexec@...email.hu> + * Licensed under the GPL v2 + * + * Note: the choice of the license means that the compilation process is + * NOT 'eligible' as defined by gcc's library exception to the GPL v3, + * but for the kernel it doesn't matter since it doesn't link against + * any of the gcc libraries + * + * gcc plugin to instrument x86_64 kernel call sites to set the high bit + * on addresses so that attempts to execute userspace memory will fault + * into non-canonical memory. + * + * TODO: + * - port "OR" instrumentation from PaX. + * + * BUGS: + * - none known + */ + +#include "gcc-common.h" + +__visible int plugin_is_GPL_compatible; + +static struct plugin_info kernexec_plugin_info = { + .version = "201607271510vanilla", + .help = "method=[bts|or]\tinstrumentation method\n" +}; + +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *); +static void (*kernexec_instrument_retaddr)(rtx); + +/* + * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered + */ +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi) +{ + gimple stmt; + gasm *asm_movabs_stmt; + + /* build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : ); */ + stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL); + asm_movabs_stmt = as_a_gasm(stmt); + gimple_asm_set_volatile(asm_movabs_stmt, true); + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING); + update_stmt(asm_movabs_stmt); +} + +/* + * find all asm() stmts that clobber r12 and add a reload of r12 + */ +static unsigned int kernexec_reload_execute(void) +{ + basic_block bb; + + /* 1. loop through BBs and GIMPLE statements */ + FOR_EACH_BB_FN(bb, cfun) { + gimple_stmt_iterator gsi; + + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { + /* gimple match: __asm__ ("" : : : "r12"); */ + gimple stmt; + gasm *asm_stmt; + size_t nclobbers; + + /* is it an asm ... */ + stmt = gsi_stmt(gsi); + if (gimple_code(stmt) != GIMPLE_ASM) + continue; + + asm_stmt = as_a_gasm(stmt); + + /* ... clobbering r12 */ + nclobbers = gimple_asm_nclobbers(asm_stmt); + while (nclobbers--) { + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers); + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12")) + continue; + kernexec_reload_fptr_mask(&gsi); + break; + } + } + } + + return 0; +} + +/* + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will + * produce a non-canonical address from a userland ptr and will just trigger + * a GPF on dereference + */ +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi) +{ + gimple assign_intptr, assign_new_fptr; + gcall *call_stmt; + tree intptr, orptr, old_fptr, new_fptr, kernexec_mask; + + call_stmt = as_a_gcall(gsi_stmt(*gsi)); + old_fptr = gimple_call_fn(call_stmt); + + /* create temporary unsigned long variable used for bitops and cast fptr to it */ + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts"); + add_referenced_var(intptr); + intptr = make_ssa_name(intptr, NULL); + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr)); + SSA_NAME_DEF_STMT(intptr) = assign_intptr; + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); + update_stmt(assign_intptr); + + /* apply logical or to temporary unsigned long and bitmask */ + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL); + orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask); + intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL); + assign_intptr = gimple_build_assign(intptr, orptr); + SSA_NAME_DEF_STMT(intptr) = assign_intptr; + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT); + update_stmt(assign_intptr); + + /* cast temporary unsigned long back to a temporary fptr variable */ + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr"); + add_referenced_var(new_fptr); + new_fptr = make_ssa_name(new_fptr, NULL); + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr)); + SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr; + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT); + update_stmt(assign_new_fptr); + + /* replace call stmt fn with the new fptr */ + gimple_call_set_fn(call_stmt, new_fptr); + update_stmt(call_stmt); +} + +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi) +{ + gimple stmt; + gasm *asm_or_stmt; + gcall *call_stmt; + tree old_fptr, new_fptr, input, output; +#if BUILDING_GCC_VERSION <= 4007 + VEC(tree, gc) *inputs = NULL; + VEC(tree, gc) *outputs = NULL; +#else + vec<tree, va_gc> *inputs = NULL; + vec<tree, va_gc> *outputs = NULL; +#endif + + call_stmt = as_a_gcall(gsi_stmt(*gsi)); + old_fptr = gimple_call_fn(call_stmt); + + /* create temporary fptr variable */ + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or"); + add_referenced_var(new_fptr); + new_fptr = make_ssa_name(new_fptr, NULL); + + /* build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr)); */ + input = build_tree_list(NULL_TREE, build_const_char_string(2, "0")); + input = chainon(NULL_TREE, build_tree_list(input, old_fptr)); + output = build_tree_list(NULL_TREE, build_const_char_string(3, "=r")); + output = chainon(NULL_TREE, build_tree_list(output, new_fptr)); +#if BUILDING_GCC_VERSION <= 4007 + VEC_safe_push(tree, gc, inputs, input); + VEC_safe_push(tree, gc, outputs, output); +#else + vec_safe_push(inputs, input); + vec_safe_push(outputs, output); +#endif + stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL); + asm_or_stmt = as_a_gasm(stmt); + SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt; + gimple_asm_set_volatile(asm_or_stmt, true); + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT); + update_stmt(asm_or_stmt); + + /* replace call stmt fn with the new fptr */ + gimple_call_set_fn(call_stmt, new_fptr); + update_stmt(call_stmt); +} + +/* + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer + */ +static unsigned int kernexec_fptr_execute(void) +{ + basic_block bb; + + /* 1. loop through BBs and GIMPLE statements */ + FOR_EACH_BB_FN(bb, cfun) { + gimple_stmt_iterator gsi; + + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { + /* gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D)); */ + tree fn; + gimple stmt; + gcall *call_stmt; + + /* is it a call ... */ + stmt = gsi_stmt(gsi); + if (!is_gimple_call(stmt)) + continue; + call_stmt = as_a_gcall(stmt); + fn = gimple_call_fn(call_stmt); + if (!fn) + continue; + if (TREE_CODE(fn) == ADDR_EXPR) + continue; + if (TREE_CODE(fn) != SSA_NAME) + gcc_unreachable(); + + /* ... through a function pointer */ + if (SSA_NAME_VAR(fn) != NULL_TREE) { + fn = SSA_NAME_VAR(fn); + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) { + debug_tree(fn); + gcc_unreachable(); + } + } + fn = TREE_TYPE(fn); + if (TREE_CODE(fn) != POINTER_TYPE) + continue; + fn = TREE_TYPE(fn); + if (TREE_CODE(fn) != FUNCTION_TYPE) + continue; + + kernexec_instrument_fptr(&gsi); + +#if 0 + debug_tree(gimple_call_fn(call_stmt)); + print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO); +#endif + } + } + + return 0; +} + +/* add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn */ +static void kernexec_instrument_retaddr_bts(rtx insn) +{ + rtx btsq; + rtvec argvec, constraintvec, labelvec; + + /* create asm volatile("btsq $63,(%%rsp)":::) */ + argvec = rtvec_alloc(0); + constraintvec = rtvec_alloc(0); + labelvec = rtvec_alloc(0); + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, RTL_LOCATION(insn)); + MEM_VOLATILE_P(btsq) = 1; + emit_insn_before(btsq, insn); +} + +/* add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn */ +static void kernexec_instrument_retaddr_or(rtx insn) +{ + rtx orq; + rtvec argvec, constraintvec, labelvec; + + /* create asm volatile("orq %%r12,(%%rsp)":::) */ + argvec = rtvec_alloc(0); + constraintvec = rtvec_alloc(0); + labelvec = rtvec_alloc(0); + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, RTL_LOCATION(insn)); + MEM_VOLATILE_P(orq) = 1; + emit_insn_before(orq, insn); +} + +/* + * find all asm level function returns and forcibly set the highest bit of the return address + */ +static unsigned int kernexec_retaddr_execute(void) +{ + rtx_insn *insn; + + /* 1. find function returns */ + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { + /* + * rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) + * (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) + * (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return) + */ + rtx body; + + /* is it a retn */ + if (!JUMP_P(insn)) + continue; + body = PATTERN(insn); + if (GET_CODE(body) == PARALLEL) + body = XVECEXP(body, 0, 0); + if (!ANY_RETURN_P(body)) + continue; + kernexec_instrument_retaddr(insn); + } + +#if 0 + print_simple_rtl(stderr, get_insns()); + print_rtl(stderr, get_insns()); +#endif + + return 0; +} + +/* + * Do not instrument things that are expecting to jump to userspace, like + * vsyscall. + */ +static bool kernexec_cmodel_check(void) +{ + tree section; + + if (ix86_cmodel != CM_KERNEL) + return false; + + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl)); + if (!section || !TREE_VALUE(section)) + return true; + + section = TREE_VALUE(TREE_VALUE(section)); + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10)) + return true; + + return false; +} + +static bool kernexec_reload_gate(void) +{ + return kernexec_cmodel_check(); +} + +#define PASS_NAME kernexec_reload +#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi +#include "gcc-generate-gimple-pass.h" + +static bool kernexec_fptr_gate(void) +{ + return kernexec_cmodel_check(); +} + +#define PASS_NAME kernexec_fptr +#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi +#include "gcc-generate-gimple-pass.h" + +static bool kernexec_retaddr_gate(void) +{ + return kernexec_cmodel_check(); +} + +#define PASS_NAME kernexec_retaddr +#define TODO_FLAGS_FINISH TODO_dump_func | TODO_ggc_collect +#include "gcc-generate-rtl-pass.h" + +__visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) +{ + const char * const plugin_name = plugin_info->base_name; + const int argc = plugin_info->argc; + const struct plugin_argument * const argv = plugin_info->argv; + int i; + + PASS_INFO(kernexec_reload, "early_optimizations", 1, PASS_POS_INSERT_BEFORE); + PASS_INFO(kernexec_fptr, "early_optimizations", 1, PASS_POS_INSERT_BEFORE); + PASS_INFO(kernexec_retaddr, "pro_and_epilogue", 1, PASS_POS_INSERT_AFTER); + + if (!plugin_default_version_check(version, &gcc_version)) { + error(G_("incompatible gcc/plugin versions")); + return 1; + } + + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info); + + if (TARGET_64BIT == 0) + return 0; + + /* Upstream kernel only supports "BTS" right now. */ + kernexec_instrument_fptr = kernexec_instrument_fptr_bts; + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; + + for (i = 0; i < argc; ++i) { + if (!strcmp(argv[i].key, "method")) { + if (!argv[i].value) { + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); + continue; + } + if (!strcmp(argv[i].value, "bts") || !strcmp(argv[i].value, "\"bts\"")) { + kernexec_instrument_fptr = kernexec_instrument_fptr_bts; + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts; + } else if (!strcmp(argv[i].value, "or") || !strcmp(argv[i].value, "\"or\"")) { + error(G_("option '-fplugin-arg-%s-%s' not supported on upstream kernel"), plugin_name, argv[i].key); + kernexec_instrument_fptr = kernexec_instrument_fptr_or; + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or; + fix_register("r12", 1, 1); + } else + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value); + continue; + } + error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); + } + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr) + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name); + + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or) + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info); + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info); + + return 0; +} diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index c80291319cb2..716f8ca1b555 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -220,7 +220,7 @@ else fi; # final build of init/ -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}" archive_builtin -- 2.7.4 -- Kees Cook Nexus Security
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.