|
Message-Id: <20180423125458.5338-3-igor.stoppa@huawei.com> Date: Mon, 23 Apr 2018 16:54:51 +0400 From: Igor Stoppa <igor.stoppa@...il.com> To: willy@...radead.org, keescook@...omium.org, paul@...l-moore.com, sds@...ho.nsa.gov, mhocko@...nel.org, corbet@....net Cc: labbott@...hat.com, linux-cc=david@...morbit.com, --cc=rppt@...ux.vnet.ibm.com, --security-module@...r.kernel.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org, kernel-hardening@...ts.openwall.com, igor.stoppa@...il.com, Igor Stoppa <igor.stoppa@...wei.com> Subject: [PATCH 2/9] vmalloc: rename llist field in vmap_area The vmap_area structure has a field of type struct llist_node, named purge_list and is used when performing lazy purge of the area. Such field is left unused during the actual utilization of the structure. This patch renames the field to a more generic "area_list", to allow for utilization outside of the purging phase. Since the purging happens after the vmap_area is dismissed, its use is mutually exclusive with any use performed while the area is allocated. Signed-off-by: Igor Stoppa <igor.stoppa@...wei.com> --- include/linux/vmalloc.h | 2 +- mm/vmalloc.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 1e5d8c392f15..2d07dfef3cfd 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -47,7 +47,7 @@ struct vmap_area { unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ - struct llist_node purge_list; /* "lazy purge" list */ + struct llist_node area_list; /* generic list of areas */ struct vm_struct *vm; struct rcu_head rcu_head; }; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 61a1ca22b0f6..1bb2233bb262 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -682,7 +682,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) lockdep_assert_held(&vmap_purge_lock); valist = llist_del_all(&vmap_purge_list); - llist_for_each_entry(va, valist, purge_list) { + llist_for_each_entry(va, valist, area_list) { if (va->va_start < start) start = va->va_start; if (va->va_end > end) @@ -696,7 +696,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) flush_tlb_kernel_range(start, end); spin_lock(&vmap_area_lock); - llist_for_each_entry_safe(va, n_va, valist, purge_list) { + llist_for_each_entry_safe(va, n_va, valist, area_list) { int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; __free_vmap_area(va); @@ -743,7 +743,7 @@ static void free_vmap_area_noflush(struct vmap_area *va) &vmap_lazy_nr); /* After this point, we may free va at any time */ - llist_add(&va->purge_list, &vmap_purge_list); + llist_add(&va->area_list, &vmap_purge_list); if (unlikely(nr_lazy > lazy_max_pages())) try_purge_vmap_area_lazy(); -- 2.14.1
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.