Follow @Openwall on Twitter for new release announcements and other news
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200617190757.27081-4-john.s.andersen@intel.com>
Date: Wed, 17 Jun 2020 12:07:56 -0700
From: John Andersen <john.s.andersen@...el.com>
To: corbet@....net,
	pbonzini@...hat.com,
	tglx@...utronix.de,
	mingo@...hat.com,
	bp@...en8.de,
	x86@...nel.org,
	hpa@...or.com,
	shuah@...nel.org,
	sean.j.christopherson@...el.com,
	liran.alon@...cle.com,
	drjones@...hat.com,
	rick.p.edgecombe@...el.com,
	kristen@...ux.intel.com
Cc: vkuznets@...hat.com,
	wanpengli@...cent.com,
	jmattson@...gle.com,
	joro@...tes.org,
	mchehab+huawei@...nel.org,
	gregkh@...uxfoundation.org,
	paulmck@...nel.org,
	pawan.kumar.gupta@...ux.intel.com,
	jgross@...e.com,
	mike.kravetz@...cle.com,
	oneukum@...e.com,
	luto@...nel.org,
	peterz@...radead.org,
	fenghua.yu@...el.com,
	reinette.chatre@...el.com,
	vineela.tummalapalli@...el.com,
	dave.hansen@...ux.intel.com,
	john.s.andersen@...el.com,
	arjan@...ux.intel.com,
	caoj.fnst@...fujitsu.com,
	bhe@...hat.com,
	nivedita@...m.mit.edu,
	keescook@...omium.org,
	dan.j.williams@...el.com,
	eric.auger@...hat.com,
	aaronlewis@...gle.com,
	peterx@...hat.com,
	makarandsonare@...gle.com,
	linux-doc@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	kvm@...r.kernel.org,
	linux-kselftest@...r.kernel.org,
	kernel-hardening@...ts.openwall.com
Subject: [PATCH 3/4] selftests: kvm: add test for CR pinning with SMM

Check that paravirtualized control register pinning blocks modifications
of pinned CR values stored in SMRAM on exit from SMM.

Signed-off-by: John Andersen <john.s.andersen@...el.com>
---
 tools/testing/selftests/kvm/.gitignore        |   1 +
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../selftests/kvm/include/x86_64/processor.h  |  13 ++
 .../selftests/kvm/x86_64/smm_cr_pin_test.c    | 207 ++++++++++++++++++
 4 files changed, 222 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/x86_64/smm_cr_pin_test.c

diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 452787152748..c0666c3efcbb 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -10,6 +10,7 @@
 /x86_64/platform_info_test
 /x86_64/set_sregs_test
 /x86_64/smm_test
+/x86_64/smm_cr_pin_test
 /x86_64/state_test
 /x86_64/vmx_preemption_timer_test
 /x86_64/svm_vmcall_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 4a166588d99f..c5c205637f38 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -45,6 +45,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
 TEST_GEN_PROGS_x86_64 += x86_64/smm_test
+TEST_GEN_PROGS_x86_64 += x86_64/smm_cr_pin_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 82b7fe16a824..8a2da0449772 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -200,6 +200,11 @@ static inline uint64_t get_cr0(void)
 	return cr0;
 }
 
+static inline void set_cr0(uint64_t val)
+{
+	__asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
+}
+
 static inline uint64_t get_cr3(void)
 {
 	uint64_t cr3;
@@ -383,4 +388,12 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
 /* VMX_EPT_VPID_CAP bits */
 #define VMX_EPT_VPID_CAP_AD_BITS       (1ULL << 21)
 
+/* KVM MSRs */
+#define MSR_KVM_CR0_PIN_ALLOWED	0x4b564d08
+#define MSR_KVM_CR4_PIN_ALLOWED	0x4b564d09
+#define MSR_KVM_CR0_PINNED_LOW	0x4b564d0a
+#define MSR_KVM_CR0_PINNED_HIGH	0x4b564d0b
+#define MSR_KVM_CR4_PINNED_LOW	0x4b564d0c
+#define MSR_KVM_CR4_PINNED_HIGH	0x4b564d0d
+
 #endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/x86_64/smm_cr_pin_test.c b/tools/testing/selftests/kvm/x86_64/smm_cr_pin_test.c
new file mode 100644
index 000000000000..a32f577ca1e5
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/smm_cr_pin_test.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tests for control register pinning not being affected by SMRAM writes.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <linux/stringify.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "processor.h"
+
+#define VCPU_ID	      1
+
+#define PAGE_SIZE  4096
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE_SUCCESS 0xfe
+#define SMRAM_STAGE_FAILURE 0xfd
+
+#define XSTR(s) __stringify(s)
+
+#define SYNC_PORT 0xe
+#define DONE 0xff
+
+#define CR0_PINNED X86_CR0_WP
+#define CR4_PINNED (X86_CR4_SMAP | X86_CR4_UMIP)
+#define CR4_ALL (CR4_PINNED | X86_CR4_SMEP)
+
+/*
+ * This is compiled as normal 64-bit code, however, SMI handler is executed
+ * in real-address mode. To stay simple we're limiting ourselves to a mode
+ * independent subset of asm here.
+ * SMI handler always report back fixed stage SMRAM_STAGE_SUCCESS.
+ */
+uint8_t smi_handler_success[] = {
+	0xb0, SMRAM_STAGE_SUCCESS,    /* mov $SMRAM_STAGE_SUCCESS, %al */
+	0xe4, SYNC_PORT,              /* in $SYNC_PORT, %al */
+	0x0f, 0xaa,                   /* rsm */
+};
+uint8_t smi_handler_fault[] = {
+	0xb0, SMRAM_STAGE_FAILURE,    /* mov $SMRAM_STAGE_FAILURE, %al */
+	0xe4, SYNC_PORT,              /* in $SYNC_PORT, %al */
+	0x0f, 0xaa,                   /* rsm */
+};
+
+/* We opt not to use GUEST_SYNC() here because we also have to make a sync call
+ * from SMM. As such, the address of the ucall struct we'd need to pass isn't
+ * something we can put into the above machine code in a maintainable way
+ */
+static inline void sync_with_host(uint64_t phase)
+{
+	asm volatile("in $" XSTR(SYNC_PORT)", %%al\n"
+		     : "+a" (phase));
+}
+
+void self_smi(void)
+{
+	wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
+	      APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
+}
+
+void guest_code(void)
+{
+	uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+
+	sync_with_host(1);
+
+	wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
+
+	sync_with_host(2);
+
+	set_cr0(get_cr0() | CR0_PINNED);
+
+	wrmsr(MSR_KVM_CR0_PINNED_HIGH, CR0_PINNED);
+
+	sync_with_host(3);
+
+	set_cr4(get_cr4() | CR4_PINNED);
+
+	sync_with_host(4);
+
+	/* Pin SMEP low */
+	wrmsr(MSR_KVM_CR4_PINNED_HIGH, CR4_PINNED);
+
+	sync_with_host(5);
+
+	self_smi();
+
+	sync_with_host(DONE);
+}
+
+int main(int argc, char *argv[])
+{
+	struct kvm_regs regs;
+	struct kvm_sregs sregs;
+	struct kvm_vm *vm;
+	struct kvm_run *run;
+	struct kvm_x86_state *state;
+	int failure, stage, stage_reported;
+	u64 *cr;
+
+	for (failure = 0; failure <= 1; failure++) {
+		stage_reported = 0;
+
+		/* Create VM */
+		vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+		vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+		run = vcpu_state(vm, VCPU_ID);
+
+		vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
+					    SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+		TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
+			    == SMRAM_GPA, "could not allocate guest physical addresses?");
+
+		memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
+		if (failure) {
+			memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler_fault,
+			       sizeof(smi_handler_fault));
+		} else {
+			memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler_success,
+			       sizeof(smi_handler_success));
+		}
+		vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+
+		for (stage = 1;; stage++) {
+			_vcpu_run(vm, VCPU_ID);
+
+			memset(&regs, 0, sizeof(regs));
+			vcpu_regs_get(vm, VCPU_ID, &regs);
+
+			memset(&sregs, 0, sizeof(sregs));
+			vcpu_sregs_get(vm, VCPU_ID, &sregs);
+
+			/* stage_reported is currrently the last stage reported */
+			if (failure && stage_reported == SMRAM_STAGE_FAILURE) {
+				/* Ensure that we exit on smram modification of CR4 */
+				TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR &&
+					    run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION,
+					    "Stage %d: unexpected exit reason: %u, suberror: %u (%s),\n",
+					    stage, run->exit_reason, run->internal.suberror,
+					    exit_reason_str(run->exit_reason));
+				if (run->exit_reason == KVM_EXIT_INTERNAL_ERROR)
+					goto done;
+			} else {
+				TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+					    "Stage %d: unexpected exit reason: %u (%s),\n",
+					    stage, run->exit_reason,
+					    exit_reason_str(run->exit_reason));
+			}
+
+			stage_reported = regs.rax & 0xff;
+
+			if (stage_reported == DONE) {
+				TEST_ASSERT((sregs.cr0 & CR0_PINNED) == CR0_PINNED,
+					    "Unexpected cr0. Bits missing: %llx",
+					    sregs.cr0 ^ (CR0_PINNED | sregs.cr0));
+				TEST_ASSERT((sregs.cr4 & CR4_ALL) == CR4_PINNED,
+					    "Unexpected cr4. Bits missing: %llx, cr4: %llx",
+					    sregs.cr4 ^ (CR4_ALL | sregs.cr4),
+					    sregs.cr4);
+				goto done;
+			}
+
+			TEST_ASSERT(stage_reported == stage ||
+				    stage_reported == SMRAM_STAGE_SUCCESS ||
+				    stage_reported == SMRAM_STAGE_FAILURE,
+				    "Unexpected stage: #%x, got %x",
+				    stage, stage_reported);
+
+			/* Within SMM modify CR0/4 to not contain pinned bits. */
+			if (stage_reported == SMRAM_STAGE_FAILURE) {
+				cr = (u64 *)(addr_gpa2hva(vm, SMRAM_GPA + 0x8000 + 0x7f58));
+				*cr &= ~CR0_PINNED;
+
+				cr = (u64 *)(addr_gpa2hva(vm, SMRAM_GPA + 0x8000 + 0x7f48));
+				/* Unset pinned, set one that was pinned low */
+				*cr &= ~CR4_PINNED;
+				*cr |= X86_CR4_SMEP;
+			}
+
+			state = vcpu_save_state(vm, VCPU_ID);
+			kvm_vm_release(vm);
+			kvm_vm_restart(vm, O_RDWR);
+			vm_vcpu_add(vm, VCPU_ID);
+			vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+			vcpu_load_state(vm, VCPU_ID, state);
+			run = vcpu_state(vm, VCPU_ID);
+			free(state);
+		}
+
+done:
+		kvm_vm_free(vm);
+	}
+}
-- 
2.21.0

Powered by blists - more mailing lists

Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.