Follow @Openwall on Twitter for new release announcements and other news
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <202008150939.A994680@keescook>
Date: Sat, 15 Aug 2020 09:52:29 -0700
From: Kees Cook <keescook@...omium.org>
To: Alexander Popov <alex.popov@...ux.com>
Cc: Jann Horn <jannh@...gle.com>, Will Deacon <will@...nel.org>,
	Andrey Ryabinin <aryabinin@...tuozzo.com>,
	Alexander Potapenko <glider@...gle.com>,
	Dmitry Vyukov <dvyukov@...gle.com>,
	Christoph Lameter <cl@...ux.com>, Pekka Enberg <penberg@...nel.org>,
	David Rientjes <rientjes@...gle.com>,
	Joonsoo Kim <iamjoonsoo.kim@....com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Masahiro Yamada <masahiroy@...nel.org>,
	Masami Hiramatsu <mhiramat@...nel.org>,
	Steven Rostedt <rostedt@...dmis.org>,
	Peter Zijlstra <peterz@...radead.org>,
	Krzysztof Kozlowski <krzk@...nel.org>,
	Patrick Bellasi <patrick.bellasi@....com>,
	David Howells <dhowells@...hat.com>,
	Eric Biederman <ebiederm@...ssion.com>,
	Johannes Weiner <hannes@...xchg.org>,
	Laura Abbott <labbott@...hat.com>, Arnd Bergmann <arnd@...db.de>,
	Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
	kasan-dev@...glegroups.com, linux-mm@...ck.org,
	kernel-hardening@...ts.openwall.com, linux-kernel@...r.kernel.org,
	notify@...nel.org
Subject: Re: [PATCH RFC 1/2] mm: Extract SLAB_QUARANTINE from KASAN

On Thu, Aug 13, 2020 at 06:19:21PM +0300, Alexander Popov wrote:
> Heap spraying is an exploitation technique that aims to put controlled
> bytes at a predetermined memory location on the heap. Heap spraying for
> exploiting use-after-free in the Linux kernel relies on the fact that on
> kmalloc(), the slab allocator returns the address of the memory that was
> recently freed. Allocating a kernel object with the same size and
> controlled contents allows overwriting the vulnerable freed object.
> 
> Let's extract slab freelist quarantine from KASAN functionality and
> call it CONFIG_SLAB_QUARANTINE. This feature breaks widespread heap
> spraying technique used for exploiting use-after-free vulnerabilities
> in the kernel code.
> 
> If this feature is enabled, freed allocations are stored in the quarantine
> and can't be instantly reallocated and overwritten by the exploit
> performing heap spraying.

It may be worth clarifying that this is specifically only direct UAF and
doesn't help with spray-and-overflow-into-a-neighboring-object attacks
(i.e. both tend to use sprays, but the former doesn't depend on a write
overflow).

> Signed-off-by: Alexander Popov <alex.popov@...ux.com>
> ---
>  include/linux/kasan.h      | 107 ++++++++++++++++++++-----------------
>  include/linux/slab_def.h   |   2 +-
>  include/linux/slub_def.h   |   2 +-
>  init/Kconfig               |  11 ++++
>  mm/Makefile                |   3 +-
>  mm/kasan/Makefile          |   2 +
>  mm/kasan/kasan.h           |  75 +++++++++++++-------------
>  mm/kasan/quarantine.c      |   2 +
>  mm/kasan/slab_quarantine.c |  99 ++++++++++++++++++++++++++++++++++
>  mm/slub.c                  |   2 +-
>  10 files changed, 216 insertions(+), 89 deletions(-)
>  create mode 100644 mm/kasan/slab_quarantine.c
> 
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 087fba34b209..b837216f760c 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -42,32 +42,14 @@ void kasan_unpoison_task_stack(struct task_struct *task);
>  void kasan_alloc_pages(struct page *page, unsigned int order);
>  void kasan_free_pages(struct page *page, unsigned int order);
>  
> -void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> -			slab_flags_t *flags);
> -
>  void kasan_poison_slab(struct page *page);
>  void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
>  void kasan_poison_object_data(struct kmem_cache *cache, void *object);
>  void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
>  					const void *object);
>  
> -void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
> -						gfp_t flags);
>  void kasan_kfree_large(void *ptr, unsigned long ip);
>  void kasan_poison_kfree(void *ptr, unsigned long ip);
> -void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
> -					size_t size, gfp_t flags);
> -void * __must_check kasan_krealloc(const void *object, size_t new_size,
> -					gfp_t flags);
> -
> -void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
> -					gfp_t flags);
> -bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
> -
> -struct kasan_cache {
> -	int alloc_meta_offset;
> -	int free_meta_offset;
> -};
>  
>  /*
>   * These functions provide a special case to support backing module
> @@ -107,10 +89,6 @@ static inline void kasan_disable_current(void) {}
>  static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
>  static inline void kasan_free_pages(struct page *page, unsigned int order) {}
>  
> -static inline void kasan_cache_create(struct kmem_cache *cache,
> -				      unsigned int *size,
> -				      slab_flags_t *flags) {}
> -
>  static inline void kasan_poison_slab(struct page *page) {}
>  static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
>  					void *object) {}
> @@ -122,17 +100,65 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
>  	return (void *)object;
>  }
>  
> +static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
> +static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
> +static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> +static inline void kasan_remove_zero_shadow(void *start, unsigned long size) {}
> +static inline void kasan_unpoison_slab(const void *ptr) {}
> +
> +static inline int kasan_module_alloc(void *addr, size_t size)
> +{
> +	return 0;
> +}
> +
> +static inline int kasan_add_zero_shadow(void *start, unsigned long size)
> +{
> +	return 0;
> +}
> +
> +static inline size_t kasan_metadata_size(struct kmem_cache *cache)
> +{
> +	return 0;
> +}
> +
> +#endif /* CONFIG_KASAN */
> +
> +struct kasan_cache {
> +	int alloc_meta_offset;
> +	int free_meta_offset;
> +};
> +
> +#if defined(CONFIG_KASAN) || defined(CONFIG_SLAB_QUARANTINE)
> +
> +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> +			slab_flags_t *flags);
> +void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
> +						gfp_t flags);
> +void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
> +					size_t size, gfp_t flags);
> +void * __must_check kasan_krealloc(const void *object, size_t new_size,
> +					gfp_t flags);
> +void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
> +					gfp_t flags);
> +bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
> +
> +#else /* CONFIG_KASAN || CONFIG_SLAB_QUARANTINE */
> +
> +static inline void kasan_cache_create(struct kmem_cache *cache,
> +				      unsigned int *size,
> +				      slab_flags_t *flags) {}
> +
>  static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
>  {
>  	return ptr;
>  }
> -static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
> -static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
> +
>  static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
>  				size_t size, gfp_t flags)
>  {
>  	return (void *)object;
>  }
> +
>  static inline void *kasan_krealloc(const void *object, size_t new_size,
>  				 gfp_t flags)
>  {
> @@ -144,43 +170,28 @@ static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
>  {
>  	return object;
>  }
> +
>  static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
>  				   unsigned long ip)
>  {
>  	return false;
>  }
> -
> -static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
> -static inline void kasan_free_shadow(const struct vm_struct *vm) {}
> -
> -static inline int kasan_add_zero_shadow(void *start, unsigned long size)
> -{
> -	return 0;
> -}
> -static inline void kasan_remove_zero_shadow(void *start,
> -					unsigned long size)
> -{}
> -
> -static inline void kasan_unpoison_slab(const void *ptr) { }
> -static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
> -
> -#endif /* CONFIG_KASAN */
> +#endif /* CONFIG_KASAN || CONFIG_SLAB_QUARANTINE */
>  
>  #ifdef CONFIG_KASAN_GENERIC
> -
>  #define KASAN_SHADOW_INIT 0
> -
> -void kasan_cache_shrink(struct kmem_cache *cache);
> -void kasan_cache_shutdown(struct kmem_cache *cache);
>  void kasan_record_aux_stack(void *ptr);
> -
>  #else /* CONFIG_KASAN_GENERIC */
> +static inline void kasan_record_aux_stack(void *ptr) {}
> +#endif /* CONFIG_KASAN_GENERIC */
>  
> +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_SLAB_QUARANTINE)
> +void kasan_cache_shrink(struct kmem_cache *cache);
> +void kasan_cache_shutdown(struct kmem_cache *cache);
> +#else /* CONFIG_KASAN_GENERIC || CONFIG_SLAB_QUARANTINE */
>  static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
>  static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
> -static inline void kasan_record_aux_stack(void *ptr) {}
> -
> -#endif /* CONFIG_KASAN_GENERIC */
> +#endif /* CONFIG_KASAN_GENERIC || CONFIG_SLAB_QUARANTINE */

In doing this extraction, I wonder if function naming should be changed?
If it's going to live a new life outside of KASAN proper, maybe call
these functions quarantine_cache_*()? But perhaps that's too much
churn...

>  #ifdef CONFIG_KASAN_SW_TAGS
>  
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index 9eb430c163c2..fc7548f27512 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -72,7 +72,7 @@ struct kmem_cache {
>  	int obj_offset;
>  #endif /* CONFIG_DEBUG_SLAB */
>  
> -#ifdef CONFIG_KASAN
> +#if defined(CONFIG_KASAN) || defined(CONFIG_SLAB_QUARANTINE)
>  	struct kasan_cache kasan_info;
>  #endif
>  
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> index 1be0ed5befa1..71020cee9fd2 100644
> --- a/include/linux/slub_def.h
> +++ b/include/linux/slub_def.h
> @@ -124,7 +124,7 @@ struct kmem_cache {
>  	unsigned int *random_seq;
>  #endif
>  
> -#ifdef CONFIG_KASAN
> +#if defined(CONFIG_KASAN) || defined(CONFIG_SLAB_QUARANTINE)
>  	struct kasan_cache kasan_info;
>  #endif
>  
> diff --git a/init/Kconfig b/init/Kconfig
> index d6a0b31b13dc..de5aa061762f 100644
> --- a/init/Kconfig
> +++ b/init/Kconfig
> @@ -1931,6 +1931,17 @@ config SLAB_FREELIST_HARDENED
>  	  sanity-checking than others. This option is most effective with
>  	  CONFIG_SLUB.
>  
> +config SLAB_QUARANTINE
> +	bool "Enable slab freelist quarantine"
> +	depends on !KASAN && (SLAB || SLUB)
> +	help
> +	  Enable slab freelist quarantine to break heap spraying technique
> +	  used for exploiting use-after-free vulnerabilities in the kernel
> +	  code. If this feature is enabled, freed allocations are stored
> +	  in the quarantine and can't be instantly reallocated and
> +	  overwritten by the exploit performing heap spraying.
> +	  This feature is a part of KASAN functionality.
> +

To make this available to distros, I think this needs to be more than
just a CONFIG. I'd love to see this CONFIG control the availability, but
have a boot param control a ro-after-init static branch for these
functions (like is done for init_on_alloc, hardened usercopy, etc). Then
the branch can be off by default for regular distro users, and more
cautious folks could enable it with a boot param without having to roll
their own kernels.

> [...]
> +struct kasan_track {
> +	u32 pid;

pid_t?

> +	depot_stack_handle_t stack;
> +};
> [...]
> +#if defined(CONFIG_KASAN_GENERIC) && \
> +	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB)) || \
> +	defined(CONFIG_SLAB_QUARANTINE)

This seems a bit messy. Perhaps an invisible CONFIG to do this logic and
then the files can test for that? CONFIG_USE_SLAB_QUARANTINE or
something?

> [...]
> + * Heap spraying is an exploitation technique that aims to put controlled
> + * bytes at a predetermined memory location on the heap. Heap spraying for
> + * exploiting use-after-free in the Linux kernel relies on the fact that on
> + * kmalloc(), the slab allocator returns the address of the memory that was
> + * recently freed. Allocating a kernel object with the same size and
> + * controlled contents allows overwriting the vulnerable freed object.
> + *
> + * If freed allocations are stored in the quarantine, they can't be
> + * instantly reallocated and overwritten by the exploit performing
> + * heap spraying.

I would clarify this with the details of what is actually happening: the
allocation isn't _moved_ to a quarantine, yes? It's only marked as not
available for allocation?

> + */
> +
> +#include <linux/kasan.h>
> +#include <linux/bug.h>
> +#include <linux/slab.h>
> +#include <linux/mm.h>
> +#include "../slab.h"
> +#include "kasan.h"
> +
> +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
> +			slab_flags_t *flags)
> +{
> +	cache->kasan_info.alloc_meta_offset = 0;
> +
> +	if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
> +	     cache->object_size < sizeof(struct kasan_free_meta)) {
> +		cache->kasan_info.free_meta_offset = *size;
> +		*size += sizeof(struct kasan_free_meta);
> +		BUG_ON(*size > KMALLOC_MAX_SIZE);

Please don't use BUG_ON()[1].

Interesting!

-Kees

[1] https://www.kernel.org/doc/html/latest/process/deprecated.html#bug-and-bug-on

-- 
Kees Cook

Powered by blists - more mailing lists

Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.