|
Message-ID: <alpine.DEB.2.20.1801161215500.2945@nuc-kabylake> Date: Tue, 16 Jan 2018 12:17:01 -0600 (CST) From: Christopher Lameter <cl@...ux.com> To: Matthew Wilcox <willy@...radead.org> cc: Kees Cook <keescook@...omium.org>, linux-kernel@...r.kernel.org, David Windsor <dave@...lcore.net>, Pekka Enberg <penberg@...nel.org>, David Rientjes <rientjes@...gle.com>, Joonsoo Kim <iamjoonsoo.kim@....com>, Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org, linux-xfs@...r.kernel.org, Linus Torvalds <torvalds@...ux-foundation.org>, Alexander Viro <viro@...iv.linux.org.uk>, Andy Lutomirski <luto@...nel.org>, Christoph Hellwig <hch@...radead.org>, "David S. Miller" <davem@...emloft.net>, Laura Abbott <labbott@...hat.com>, Mark Rutland <mark.rutland@....com>, "Martin K. Petersen" <martin.petersen@...cle.com>, Paolo Bonzini <pbonzini@...hat.com>, Christian Borntraeger <borntraeger@...ibm.com>, Christoffer Dall <christoffer.dall@...aro.org>, Dave Kleikamp <dave.kleikamp@...cle.com>, Jan Kara <jack@...e.cz>, Luis de Bethencourt <luisbg@...nel.org>, Marc Zyngier <marc.zyngier@....com>, Rik van Riel <riel@...hat.com>, Matthew Garrett <mjg59@...gle.com>, linux-fsdevel@...r.kernel.org, linux-arch@...r.kernel.org, netdev@...r.kernel.org, kernel-hardening@...ts.openwall.com Subject: Re: kmem_cache_attr (was Re: [PATCH 04/36] usercopy: Prepare for usercopy whitelisting) Draft patch of how the data structs could change. kmem_cache_attr is read only. Index: linux/include/linux/slab.h =================================================================== --- linux.orig/include/linux/slab.h +++ linux/include/linux/slab.h @@ -135,9 +135,17 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - slab_flags_t, - void (*)(void *)); +typedef kmem_cache_ctor void (*ctor)(void *); + +struct kmem_cache_attr { + char name[16]; + unsigned int size; + unsigned int align; + slab_flags_t flags; + kmem_cache_ctor ctor; +} + +struct kmem_cache *kmem_cache_create(const kmem_cache_attr *); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); Index: linux/include/linux/slab_def.h =================================================================== --- linux.orig/include/linux/slab_def.h +++ linux/include/linux/slab_def.h @@ -10,6 +10,7 @@ struct kmem_cache { struct array_cache __percpu *cpu_cache; + struct kmem_cache_attr *attr; /* 1) Cache tunables. Protected by slab_mutex */ unsigned int batchcount; @@ -35,14 +36,9 @@ struct kmem_cache { struct kmem_cache *freelist_cache; unsigned int freelist_size; - /* constructor func */ - void (*ctor)(void *obj); - /* 4) cache creation/removal */ - const char *name; struct list_head list; int refcount; - int object_size; int align; /* 5) statistics */ Index: linux/include/linux/slub_def.h =================================================================== --- linux.orig/include/linux/slub_def.h +++ linux/include/linux/slub_def.h @@ -83,9 +83,9 @@ struct kmem_cache { struct kmem_cache_cpu __percpu *cpu_slab; /* Used for retriving partial slabs etc */ slab_flags_t flags; + struct kmem_cache_attr *attr; unsigned long min_partial; int size; /* The size of an object including meta data */ - int object_size; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ #ifdef CONFIG_SLUB_CPU_PARTIAL int cpu_partial; /* Number of per cpu partial objects to keep around */ @@ -97,12 +97,10 @@ struct kmem_cache { struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ int reserved; /* Reserved bytes at the end of slabs */ int red_left_pad; /* Left redzone padding size */ - const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ Index: linux/mm/slab.h =================================================================== --- linux.orig/mm/slab.h +++ linux/mm/slab.h @@ -18,13 +18,11 @@ * SLUB is no longer needed. */ struct kmem_cache { - unsigned int object_size;/* The original size of the object */ + struct kmem_cache_attr *attr unsigned int size; /* The aligned/padded/added on size */ unsigned int align; /* Alignment as calculated */ slab_flags_t flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ struct list_head list; /* List of all slab caches on the system */ };
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.