|
|
Message-Id: <0ec8d5597db7fcc1090b7b2120e3d441624db68c.1498228733.git.Jens.Gustedt@inria.fr>
Date: Tue, 20 Jun 2017 21:44:13 +0200
From: Jens Gustedt <Jens.Gustedt@...ia.fr>
To: musl@...ts.openwall.com
Subject: [PATCH 5/8] separate the fast parts of __lock and __unlock into a
.h file that may be used by other TU
This provides two interfaces __lock_fast and __unlock_fast that are both
"static inline" and that should result in a better integration of the
lock in place. The slow path of the lock algorithm remains centralized,
here adding the overhead of a function call is not a big deal.
This should only be used by a TU that encapsulates all LOCK and
UNLOCK calls of a particular lock object.
---
src/internal/__lock.h | 22 ++++++++++++++++++++++
src/internal/libc.h | 1 +
src/thread/__lock.c | 22 ++++++----------------
src/thread/pthread_create.c | 4 ++--
4 files changed, 31 insertions(+), 18 deletions(-)
create mode 100644 src/internal/__lock.h
diff --git a/src/internal/__lock.h b/src/internal/__lock.h
new file mode 100644
index 00000000..c1f07fc0
--- /dev/null
+++ b/src/internal/__lock.h
@@ -0,0 +1,22 @@
+#include "pthread_impl.h"
+
+static inline void __lock_fast(volatile int *l)
+{
+ extern void __lock_slow(volatile int*, int);
+ if (!libc.threads_minus_1) return;
+ /* fast path: INT_MIN for holding the lock, +1 to count this
+ thread in the critical section. */
+ int current = a_cas(l, 0, INT_MIN + 1);
+ if (!current) return;
+ __lock_slow(l, current);
+}
+
+static inline void __unlock_fast(volatile int *l)
+{
+ /* We have to check if l[0] had been touched at all. */
+ if (l[0] < 0) {
+ if (a_fetch_add(l, -(INT_MIN + 1)) != (INT_MIN + 1)) {
+ __wake(l, 1, 1);
+ }
+ }
+}
diff --git a/src/internal/libc.h b/src/internal/libc.h
index 5e145183..a594d0c5 100644
--- a/src/internal/libc.h
+++ b/src/internal/libc.h
@@ -47,6 +47,7 @@ extern size_t __sysinfo ATTR_LIBC_VISIBILITY;
extern char *__progname, *__progname_full;
/* Designed to avoid any overhead in non-threaded processes */
+void __lock_slow(volatile int *, int) ATTR_LIBC_VISIBILITY;
void __lock(volatile int *) ATTR_LIBC_VISIBILITY;
void __unlock(volatile int *) ATTR_LIBC_VISIBILITY;
int __lockfile(FILE *) ATTR_LIBC_VISIBILITY;
diff --git a/src/thread/__lock.c b/src/thread/__lock.c
index 56092240..e612c6f9 100644
--- a/src/thread/__lock.c
+++ b/src/thread/__lock.c
@@ -1,12 +1,12 @@
#include "pthread_impl.h"
-void __lock(volatile int *l)
+#include "__lock.h"
+
+weak_alias(__lock_fast, __lock);
+weak_alias(__unlock_fast, __unlock);
+
+void __lock_slow(volatile int *l, int current)
{
- if (!libc.threads_minus_1) return;
- /* fast path: INT_MIN for holding the lock, +1 to count this
- thread in the critical section. */
- int current = a_cas(l, 0, INT_MIN + 1);
- if (!current) return;
/* A first spin lock acquisition loop, for the case of
medium congestion. */
for (unsigned i = 0; i < 10; ++i) {
@@ -35,13 +35,3 @@ void __lock(volatile int *l)
current = val;
}
}
-
-void __unlock(volatile int *l)
-{
- /* We have to check if l[0] had been touched at all. */
- if (l[0] < 0) {
- if (a_fetch_add(l, -(INT_MIN + 1)) != (INT_MIN + 1)) {
- __wake(l, 1, 1);
- }
- }
-}
diff --git a/src/thread/pthread_create.c b/src/thread/pthread_create.c
index 26945022..015d7bee 100644
--- a/src/thread/pthread_create.c
+++ b/src/thread/pthread_create.c
@@ -282,8 +282,8 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
if (!a_fetch_add(&libc.threads_minus_1, 1)) {
// As long as we only have one thread, test if this supports
// private futexes.
- __lock_t dummy = { 0 };
- if (__syscall(SYS_futex, dummy.lc, FUTEX_WAKE|FUTEX_PRIVATE, 0) != -ENOSYS)
+ volatile int dummy[1] = { 0 };
+ if (__syscall(SYS_futex, dummy, FUTEX_WAKE|FUTEX_PRIVATE, 0) != -ENOSYS)
__futex_private = FUTEX_PRIVATE;
}
ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, TP_ADJ(new), &new->tid);
Powered by blists - more mailing lists
Confused about mailing lists and their use? Read about mailing lists on Wikipedia and check out these guidelines on proper formatting of your messages.