Follow @Openwall on Twitter for new release announcements and other news
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20170628165243.16502-1-amonakov@ispras.ru>
Date: Wed, 28 Jun 2017 19:52:43 +0300
From: Alexander Monakov <amonakov@...ras.ru>
To: musl@...ts.openwall.com
Cc: Timo Teräs <timo.teras@....fi>
Subject: [RFC PATCH] reduce severity of ldso reclaim_gaps hack

---
In part inspired by an earlier patch by Timo Teräs.

Without --gc-sections, __malloc_donate is dead weight in static linking. This
can be solved by moving it to a separate translation unit (but for that matter,
realloc is much heavier and can be factored out too).

Extra jumps from 'free' to 'bin_chunk' and their non-contiguous code layout is
admittedly a problem, but I believe the impact is miniscule. As a minor
consolation, entries to bin_chunk from malloc now require fewer branches, and
hopefully new code is easier to follow.

Alexander

 ldso/dynlink.c      | 15 +++--------
 src/malloc/malloc.c | 71 ++++++++++++++++++++++++++++++++++++-----------------
 2 files changed, 52 insertions(+), 34 deletions(-)

diff --git a/ldso/dynlink.c b/ldso/dynlink.c
index d20dbd87..985592ce 100644
--- a/ldso/dynlink.c
+++ b/ldso/dynlink.c
@@ -475,23 +475,14 @@ static void redo_lazy_relocs()
 /* A huge hack: to make up for the wastefulness of shared libraries
  * needing at least a page of dirty memory even if they have no global
  * data, we reclaim the gaps at the beginning and end of writable maps
- * and "donate" them to the heap by setting up minimal malloc
- * structures and then freeing them. */
+ * and "donate" them to the heap. */
 
 static void reclaim(struct dso *dso, size_t start, size_t end)
 {
-	size_t *a, *z;
+	void __malloc_donate(char *, char *);
 	if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
 	if (end   >= dso->relro_start && end   < dso->relro_end) end = dso->relro_start;
-	start = start + 6*sizeof(size_t)-1 & -4*sizeof(size_t);
-	end = (end & -4*sizeof(size_t)) - 2*sizeof(size_t);
-	if (start>end || end-start < 4*sizeof(size_t)) return;
-	a = laddr(dso, start);
-	z = laddr(dso, end);
-	a[-2] = 1;
-	a[-1] = z[0] = end-start + 2*sizeof(size_t) | 1;
-	z[1] = 1;
-	free(a);
+	__malloc_donate(laddr(dso, start), laddr(dso, end));
 }
 
 static void reclaim_gaps(struct dso *dso)
diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c
index ef4c7368..b56fdaa2 100644
--- a/src/malloc/malloc.c
+++ b/src/malloc/malloc.c
@@ -299,6 +299,8 @@ static int pretrim(struct chunk *self, size_t n, int i, int j)
 	return 1;
 }
 
+static void bin_chunk(struct chunk *);
+
 static void trim(struct chunk *self, size_t n)
 {
 	size_t n1 = CHUNK_SIZE(self);
@@ -314,7 +316,7 @@ static void trim(struct chunk *self, size_t n)
 	next->psize = n1-n | C_INUSE;
 	self->csize = n | C_INUSE;
 
-	free(CHUNK_TO_MEM(split));
+	bin_chunk(split);
 }
 
 void *malloc(size_t n)
@@ -410,10 +412,9 @@ void *realloc(void *p, size_t n)
 		size_t newlen = n + extra;
 		/* Crash on realloc of freed chunk */
 		if (extra & 1) a_crash();
-		if (newlen < PAGE_SIZE && (new = malloc(n))) {
-			memcpy(new, p, n-OVERHEAD);
-			free(p);
-			return new;
+		if (newlen < PAGE_SIZE && (new = malloc(n-OVERHEAD))) {
+			n0 = n;
+			goto copy_free_ret;
 		}
 		newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE;
 		if (oldlen == newlen) return p;
@@ -456,34 +457,20 @@ copy_realloc:
 	/* As a last resort, allocate a new chunk and copy to it. */
 	new = malloc(n-OVERHEAD);
 	if (!new) return 0;
+copy_free_ret:
 	memcpy(new, p, n0-OVERHEAD);
 	free(CHUNK_TO_MEM(self));
 	return new;
 }
 
-void free(void *p)
+static void bin_chunk(struct chunk *self)
 {
-	struct chunk *self, *next;
+	struct chunk *next = NEXT_CHUNK(self);
 	size_t final_size, new_size, size;
 	int reclaim=0;
 	int i;
 
-	if (!p) return;
-
-	self = MEM_TO_CHUNK(p);
-
-	if (IS_MMAPPED(self)) {
-		size_t extra = self->psize;
-		char *base = (char *)self - extra;
-		size_t len = CHUNK_SIZE(self) + extra;
-		/* Crash on double free */
-		if (extra & 1) a_crash();
-		__munmap(base, len);
-		return;
-	}
-
 	final_size = new_size = CHUNK_SIZE(self);
-	next = NEXT_CHUNK(self);
 
 	/* Crash on corrupted footer (likely from buffer overflow) */
 	if (next->psize != self->csize) a_crash();
@@ -544,3 +531,43 @@ void free(void *p)
 
 	unlock_bin(i);
 }
+
+#if defined(__GNUC__)
+__attribute__((cold))
+#endif
+static void unmap_chunk(struct chunk *self)
+{
+	size_t extra = self->psize;
+	char *base = (char *)self - extra;
+	size_t len = CHUNK_SIZE(self) + extra;
+	/* Crash on double free */
+	if (extra & 1) a_crash();
+	__munmap(base, len);
+}
+
+void free(void *p)
+{
+	if (!p) return;
+
+	struct chunk *self = MEM_TO_CHUNK(p);
+
+	if (IS_MMAPPED(self))
+		unmap_chunk(self);
+	else
+		bin_chunk(self);
+}
+
+void __malloc_donate(char *start, char *end)
+{
+	if (end - start < 2*OVERHEAD + SIZE_ALIGN) return;
+	start += OVERHEAD + SIZE_ALIGN - 1;
+	start -= (uintptr_t)start & (SIZE_ALIGN - 1);
+	end -= (uintptr_t)end & (SIZE_ALIGN - 1);
+	if (end - start < OVERHEAD + SIZE_ALIGN) return;
+	if (end - start >= MMAP_THRESHOLD) return;
+
+	struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
+	c->psize = n->csize = C_INUSE;
+	c->csize = n->psize = (end - start) | C_INUSE;
+	bin_chunk(c);
+}
-- 
2.11.0

Powered by blists - more mailing lists

Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.