diff options
Diffstat (limited to '0001-add-base-files-for-libphtread-condition-family.patch')
-rw-r--r-- | 0001-add-base-files-for-libphtread-condition-family.patch | 2321 |
1 files changed, 2321 insertions, 0 deletions
diff --git a/0001-add-base-files-for-libphtread-condition-family.patch b/0001-add-base-files-for-libphtread-condition-family.patch new file mode 100644 index 0000000..9ca2225 --- /dev/null +++ b/0001-add-base-files-for-libphtread-condition-family.patch @@ -0,0 +1,2321 @@ +From 76a50749f7af5935ba3739e815aa6a16ae4440d1 Mon Sep 17 00:00:00 2001 +From: Ulrich Drepper <drepper@redhat.com> +Date: Tue Nov 26 22:50:54 2002 +0000 +Subject: [PATCH 1/9] 0001 + +since https://sourceware.org/git/?p=glibc.git;a=commit;h=ed19993b5b0d05d62cc883571519a67dae481a14 +delete pthread_condtion function.However, using these interfaces has better performance. +Therefore, we add a subpacket to use these interfaces. +you can use it by adding LD_PRELOAD=./libpthreadcond.so in front of your program (eg: +LD_PRELOAD=./libpthreadcond.so ./test). use with-compat_2_17 to compile it. +WARNING:2.17 version does not meet the posix standard, you should pay attention when using it. +add pthread_cond_clockwait to prevent process hang up when libpthread-2.17 and libpthread-2.28 are used together. +use pthread_cond_common to implement the public functions of pthread_cond_clockwait,pthread_cond_clockwait and pthread_cond_timedwait. + +Add some base files for the libpthread_condition family. +Including but not limited to the following submission: +6efd481484e +a88c9263686 +76a50749f7a +69431c9a21f +5bd8a24966d + +--- + nptl_2_17/cancellation_2_17.c | 60 ++ + nptl_2_17/cleanup_compat_2_17.c | 50 ++ + nptl_2_17/pthread_cond_broadcast_2_17.c | 101 +++ + nptl_2_17/pthread_cond_destroy_2_17.c | 86 +++ + nptl_2_17/pthread_cond_init_2_17.c | 49 ++ + nptl_2_17/pthread_cond_signal_2_17.c | 84 +++ + nptl_2_17/pthread_cond_wait_2_17.c | 329 ++++++++++ + nptl_2_17/pthread_condattr_getclock_2_17.c | 28 + + nptl_2_17/pthread_condattr_getpshared_2_17.c | 28 + + nptl_2_17/pthread_condattr_init_2_17.c | 33 + + nptl_2_17/pthread_condattr_setclock_2_17.c | 45 ++ + nptl_2_17/pthread_mutex_cond_lock_2_17.c | 21 + + nptl_2_17/pthread_mutex_lock_2_17.c | 652 +++++++++++++++++++ + nptl_2_17/pthread_mutex_unlock_2_17.c | 361 ++++++++++ + nptl_2_17/tpp_2_17.c | 195 ++++++ + nptl_2_17/vars_2_17.c | 43 ++ + 16 files changed, 2165 insertions(+) + create mode 100644 nptl_2_17/cancellation_2_17.c + create mode 100644 nptl_2_17/cleanup_compat_2_17.c + create mode 100644 nptl_2_17/pthread_cond_broadcast_2_17.c + create mode 100644 nptl_2_17/pthread_cond_destroy_2_17.c + create mode 100644 nptl_2_17/pthread_cond_init_2_17.c + create mode 100644 nptl_2_17/pthread_cond_signal_2_17.c + create mode 100644 nptl_2_17/pthread_cond_wait_2_17.c + create mode 100644 nptl_2_17/pthread_condattr_getclock_2_17.c + create mode 100644 nptl_2_17/pthread_condattr_getpshared_2_17.c + create mode 100644 nptl_2_17/pthread_condattr_init_2_17.c + create mode 100644 nptl_2_17/pthread_condattr_setclock_2_17.c + create mode 100644 nptl_2_17/pthread_mutex_cond_lock_2_17.c + create mode 100644 nptl_2_17/pthread_mutex_lock_2_17.c + create mode 100644 nptl_2_17/pthread_mutex_unlock_2_17.c + create mode 100644 nptl_2_17/tpp_2_17.c + create mode 100644 nptl_2_17/vars_2_17.c + +diff --git a/nptl_2_17/cancellation_2_17.c b/nptl_2_17/cancellation_2_17.c +new file mode 100644 +index 00000000..5c9ce572 +--- /dev/null ++++ b/nptl_2_17/cancellation_2_17.c +@@ -0,0 +1,60 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <setjmp.h> ++#include <stdlib.h> ++#include <futex-internal.h> ++ ++int ++__pthread_enable_asynccancel (void) ++{ ++ struct pthread *self = THREAD_SELF; ++ ++ int oldval = THREAD_GETMEM (self, canceltype); ++ THREAD_SETMEM (self, canceltype, PTHREAD_CANCEL_ASYNCHRONOUS); ++ ++ int ch = THREAD_GETMEM (self, cancelhandling); ++ ++ if (self->cancelstate == PTHREAD_CANCEL_ENABLE ++ && (ch & CANCELED_BITMASK) ++ && !(ch & EXITING_BITMASK) ++ && !(ch & TERMINATED_BITMASK)) ++ { ++ THREAD_SETMEM (self, result, PTHREAD_CANCELED); ++ __do_cancel (); ++ } ++ ++ return oldval; ++} ++libc_hidden_def (__pthread_enable_asynccancel) ++ ++/* See the comment for __pthread_enable_asynccancel regarding ++ the AS-safety of this function. */ ++void ++__pthread_disable_asynccancel (int oldtype) ++{ ++ /* If asynchronous cancellation was enabled before we do not have ++ anything to do. */ ++ if (oldtype == PTHREAD_CANCEL_ASYNCHRONOUS) ++ return; ++ ++ struct pthread *self = THREAD_SELF; ++ self->canceltype = PTHREAD_CANCEL_DEFERRED; ++} ++libc_hidden_def (__pthread_disable_asynccancel) +diff --git a/nptl_2_17/cleanup_compat_2_17.c b/nptl_2_17/cleanup_compat_2_17.c +new file mode 100644 +index 00000000..53cf903d +--- /dev/null ++++ b/nptl_2_17/cleanup_compat_2_17.c +@@ -0,0 +1,50 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <stdlib.h> ++ ++ ++void ++_pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, ++ void (*routine) (void *), void *arg) ++{ ++ struct pthread *self = THREAD_SELF; ++ ++ buffer->__routine = routine; ++ buffer->__arg = arg; ++ buffer->__prev = THREAD_GETMEM (self, cleanup); ++ ++ THREAD_SETMEM (self, cleanup, buffer); ++} ++strong_alias (_pthread_cleanup_push, __pthread_cleanup_push) ++ ++ ++void ++_pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, int execute) ++{ ++ struct pthread *self __attribute ((unused)) = THREAD_SELF; ++ ++ THREAD_SETMEM (self, cleanup, buffer->__prev); ++ ++ /* If necessary call the cleanup routine after we removed the ++ current cleanup block from the list. */ ++ if (execute) ++ buffer->__routine (buffer->__arg); ++} ++strong_alias (_pthread_cleanup_pop, __pthread_cleanup_pop) +diff --git a/nptl_2_17/pthread_cond_broadcast_2_17.c b/nptl_2_17/pthread_cond_broadcast_2_17.c +new file mode 100644 +index 00000000..df39c99b +--- /dev/null ++++ b/nptl_2_17/pthread_cond_broadcast_2_17.c +@@ -0,0 +1,101 @@ ++/* Copyright (C) 2003-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "kernel-features_2_17.h" ++#include "pthread_2_17.h" ++#include "pthreadP_2_17.h" ++ ++#include <endian.h> ++#include <errno.h> ++#include <sysdep.h> ++#include <lowlevellock.h> ++#include <stap-probe.h> ++#include <atomic.h> ++ ++#include <shlib-compat.h> ++ ++#include <old_macros_2_17.h> ++ ++/* We do the following steps from __pthread_cond_signal in one critical ++ section: (1) signal all waiters in G1, (2) close G1 so that it can become ++ the new G2 and make G2 the new G1, and (3) signal all waiters in the new ++ G1. We don't need to do all these steps if there are no waiters in G1 ++ and/or G2. See __pthread_cond_signal for further details. */ ++int ++__pthread_cond_broadcast (pthread_cond_t *cond) ++{ ++ LIBC_PROBE (cond_broadcast, 1, cond); ++ ++ int pshared = (cond->__data.__mutex == (void *) ~0l) ++ ? LLL_SHARED : LLL_PRIVATE; ++ /* Make sure we are alone. */ ++ lll_lock (cond->__data.__lock, pshared); ++ ++ /* Are there any waiters to be woken? */ ++ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) ++ ++ { ++ /* Yes. Mark them all as woken. */ ++ cond->__data.__wakeup_seq = cond->__data.__total_seq; ++ cond->__data.__woken_seq = cond->__data.__total_seq; ++ cond->__data.__futex = (unsigned int) cond->__data.__total_seq * 2; ++ int futex_val = cond->__data.__futex; ++ /* Signal that a broadcast happened. */ ++ ++cond->__data.__broadcast_seq; ++ ++ /* We are done. */ ++ lll_unlock (cond->__data.__lock, pshared); ++ ++ /* Wake everybody. */ ++ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; ++ ++ /* Do not use requeue for pshared condvars. */ ++ if (mut == (void *) ~0l ++ || PTHREAD_MUTEX_PSHARED (mut) & PTHREAD_MUTEX_PSHARED_BIT) ++ goto wake_all; ++ ++#if (defined lll_futex_cmp_requeue_pi \ ++ && defined __ASSUME_REQUEUE_PI) ++ if (USE_REQUEUE_PI (mut)) ++ { ++ if (lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, INT_MAX, ++ &mut->__data.__lock, futex_val, ++ LLL_PRIVATE) == 0) ++ return 0; ++ } ++ else ++#endif ++ /* lll_futex_requeue returns 0 for success and non-zero ++ for errors. */ ++ if (!__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1, ++ INT_MAX, &mut->__data.__lock, ++ futex_val, LLL_PRIVATE), 0)) ++ return 0; ++ ++wake_all: ++ lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared); ++ return 0; ++ } ++ /* We are done. */ ++ lll_unlock (cond->__data.__lock, pshared); ++ ++ return 0; ++} ++ ++versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast, ++ GLIBC_2_3_2); +diff --git a/nptl_2_17/pthread_cond_destroy_2_17.c b/nptl_2_17/pthread_cond_destroy_2_17.c +new file mode 100644 +index 00000000..6342f471 +--- /dev/null ++++ b/nptl_2_17/pthread_cond_destroy_2_17.c +@@ -0,0 +1,86 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <errno.h> ++#include <shlib-compat.h> ++#include <stap-probe.h> ++#include <old_macros_2_17.h> ++int ++__pthread_cond_destroy (pthread_cond_t *cond) ++{ ++ int pshared = (cond->__data.__mutex == (void *) ~0l) ++ ? LLL_SHARED : LLL_PRIVATE; ++ ++ LIBC_PROBE (cond_destroy, 1, cond); ++ ++ /* Make sure we are alone. */ ++ lll_lock (cond->__data.__lock, pshared); ++ ++ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) ++ { ++ /* If there are still some waiters which have not been ++ woken up, this is an application bug. */ ++ lll_unlock (cond->__data.__lock, pshared); ++ return EBUSY; ++ } ++ ++ /* Tell pthread_cond_*wait that this condvar is being destroyed. */ ++ cond->__data.__total_seq = -1ULL; ++ ++ /* If there are waiters which have been already signalled or ++ broadcasted, but still are using the pthread_cond_t structure, ++ pthread_cond_destroy needs to wait for them. */ ++ unsigned int nwaiters = cond->__data.__nwaiters; ++ ++ if (nwaiters >= (1 << COND_NWAITERS_SHIFT)) ++ ++ { ++ /* Wake everybody on the associated mutex in case there are ++ threads that have been requeued to it. ++ Without this, pthread_cond_destroy could block potentially ++ for a long time or forever, as it would depend on other ++ thread's using the mutex. ++ When all threads waiting on the mutex are woken up, pthread_cond_wait ++ only waits for threads to acquire and release the internal ++ condvar lock. */ ++ if (cond->__data.__mutex != NULL ++ && cond->__data.__mutex != (void *) ~0l) ++ { ++ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; ++ lll_futex_wake (&mut->__data.__lock, INT_MAX, ++ PTHREAD_MUTEX_PSHARED (mut)); ++ } ++ ++ do ++ { ++ lll_unlock (cond->__data.__lock, pshared); ++ ++ lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared); ++ ++ lll_lock (cond->__data.__lock, pshared); ++ ++ nwaiters = cond->__data.__nwaiters; ++ } ++ while (nwaiters >= (1 << COND_NWAITERS_SHIFT)); ++ } ++ ++ return 0; ++} ++versioned_symbol (libpthread, __pthread_cond_destroy, ++ pthread_cond_destroy, GLIBC_2_3_2); +diff --git a/nptl_2_17/pthread_cond_init_2_17.c b/nptl_2_17/pthread_cond_init_2_17.c +new file mode 100644 +index 00000000..d590d1d0 +--- /dev/null ++++ b/nptl_2_17/pthread_cond_init_2_17.c +@@ -0,0 +1,49 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <shlib-compat.h> ++#include <stap-probe.h> ++ ++ ++int ++__pthread_cond_init (pthread_cond_t *cond, const pthread_condattr_t *cond_attr) ++{ ++ ASSERT_TYPE_SIZE (pthread_cond_t, __SIZEOF_PTHREAD_COND_T); ++ ++ struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr; ++ ++ cond->__data.__lock = LLL_LOCK_INITIALIZER; ++ cond->__data.__futex = 0; ++ cond->__data.__nwaiters = (icond_attr != NULL ++ ? ((icond_attr->value >> 1) & ((1 << COND_NWAITERS_SHIFT) - 1)) ++ : CLOCK_REALTIME); ++ cond->__data.__total_seq = 0; ++ cond->__data.__wakeup_seq = 0; ++ cond->__data.__woken_seq = 0; ++ cond->__data.__mutex = (icond_attr == NULL || (icond_attr->value & 1) == 0 ++ ? NULL : (void *) ~0l); ++ cond->__data.__broadcast_seq = 0; ++ ++ ++ LIBC_PROBE (cond_init, 2, cond, cond_attr); ++ ++ return 0; ++} ++versioned_symbol (libpthread, __pthread_cond_init, ++ pthread_cond_init, GLIBC_2_3_2); +diff --git a/nptl_2_17/pthread_cond_signal_2_17.c b/nptl_2_17/pthread_cond_signal_2_17.c +new file mode 100644 +index 00000000..e6f08ac8 +--- /dev/null ++++ b/nptl_2_17/pthread_cond_signal_2_17.c +@@ -0,0 +1,84 @@ ++/* Copyright (C) 2003-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "kernel-features_2_17.h" ++#include "pthread_2_17.h" ++#include "pthreadP_2_17.h" ++ ++#include <endian.h> ++#include <errno.h> ++#include <sysdep.h> ++#include <lowlevellock.h> ++ ++#include <shlib-compat.h> ++#include <stap-probe.h> ++ ++#include <old_macros_2_17.h> ++ ++int ++__pthread_cond_signal (pthread_cond_t *cond) ++{ ++ int pshared = (cond->__data.__mutex == (void *) ~0l) ++ ? LLL_SHARED : LLL_PRIVATE; ++ ++ LIBC_PROBE (cond_signal, 1, cond); ++ ++ /* Make sure we are alone. */ ++ lll_lock (cond->__data.__lock, pshared); ++ ++ /* Are there any waiters to be woken? */ ++ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) ++ { ++ /* Yes. Mark one of them as woken. */ ++ ++cond->__data.__wakeup_seq; ++ ++cond->__data.__futex; ++ ++#if (defined lll_futex_cmp_requeue_pi \ ++ && defined __ASSUME_REQUEUE_PI) ++ pthread_mutex_t *mut = cond->__data.__mutex; ++ ++ if (USE_REQUEUE_PI (mut) ++ /* This can only really fail with a ENOSYS, since nobody can modify ++ futex while we have the cond_lock. */ ++ && lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0, ++ &mut->__data.__lock, ++ cond->__data.__futex, pshared) == 0) ++ { ++ lll_unlock (cond->__data.__lock, pshared); ++ return 0; ++ } ++ else ++#endif ++ /* Wake one. */ ++ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, ++ 1, 1, ++ &cond->__data.__lock, ++ pshared), 0)) ++ return 0; ++ ++ /* Fallback if neither of them work. */ ++ lll_futex_wake (&cond->__data.__futex, 1, pshared); ++ } ++/* We are done. */ ++ lll_unlock (cond->__data.__lock, pshared); ++ ++ return 0; ++} ++ ++versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, ++ GLIBC_2_3_2); +diff --git a/nptl_2_17/pthread_cond_wait_2_17.c b/nptl_2_17/pthread_cond_wait_2_17.c +new file mode 100644 +index 00000000..ff651a00 +--- /dev/null ++++ b/nptl_2_17/pthread_cond_wait_2_17.c +@@ -0,0 +1,329 @@ ++/* Copyright (C) 2003-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "kernel-features_2_17.h" ++#include "pthread_2_17.h" ++#include "pthreadP_2_17.h" ++ ++#include <endian.h> ++#include <errno.h> ++#include <sysdep.h> ++#include <lowlevellock.h> ++#include <sys/time.h> ++#include <futex-internal.h> ++ ++#include <shlib-compat.h> ++#include <stap-probe.h> ++ ++#include <old_macros_2_17.h> ++ ++struct _condvar_cleanup_buffer ++{ ++ int oldtype; ++ pthread_cond_t *cond; ++ pthread_mutex_t *mutex; ++ unsigned int bc_seq; ++}; ++ ++void ++__attribute__ ((visibility ("hidden"))) ++__condvar_cleanup (void *arg) ++{ ++ struct _condvar_cleanup_buffer *cbuffer = ++ (struct _condvar_cleanup_buffer *) arg; ++ unsigned int destroying; ++ int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l) ++ ? LLL_SHARED : LLL_PRIVATE; ++ ++ /* We are going to modify shared data. */ ++ lll_lock (cbuffer->cond->__data.__lock, pshared); ++ ++ if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq) ++ { ++ /* This thread is not waiting anymore. Adjust the sequence counters ++ * appropriately. We do not increment WAKEUP_SEQ if this would ++ * bump it over the value of TOTAL_SEQ. This can happen if a thread ++ * was woken and then canceled. */ ++ if (cbuffer->cond->__data.__wakeup_seq ++ < cbuffer->cond->__data.__total_seq) ++ { ++ ++cbuffer->cond->__data.__wakeup_seq; ++ ++cbuffer->cond->__data.__futex; ++ } ++ ++cbuffer->cond->__data.__woken_seq; ++ } ++ ++ cbuffer->cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; ++ ++ /* If pthread_cond_destroy was called on this variable already, ++ notify the pthread_cond_destroy caller all waiters have left ++ and it can be successfully destroyed. */ ++ destroying = 0; ++ if (cbuffer->cond->__data.__total_seq == -1ULL ++ && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) ++ { ++ lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared); ++ destroying = 1; ++ } ++ ++ /* We are done. */ ++ lll_unlock (cbuffer->cond->__data.__lock, pshared); ++ ++ /* Wake everybody to make sure no condvar signal gets lost. */ ++ if (! destroying) ++ lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared); ++ ++ /* Get the mutex before returning unless asynchronous cancellation ++ is in effect. We don't try to get the mutex if we already own it. */ ++ if (!(USE_REQUEUE_PI (cbuffer->mutex)) ++ || ((cbuffer->mutex->__data.__lock & FUTEX_TID_MASK) ++ != THREAD_GETMEM (THREAD_SELF, tid))) ++ { ++ __pthread_mutex_cond_lock (cbuffer->mutex); ++ } ++ else ++ __pthread_mutex_cond_lock_adjust (cbuffer->mutex); ++} ++ ++static __always_inline int ++__pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, ++ clockid_t clockid, ++ const struct timespec *abstime) ++{ ++ struct _pthread_cleanup_buffer buffer; ++ struct _condvar_cleanup_buffer cbuffer; ++ int result = 0; ++ ++ int pshared = (cond->__data.__mutex == (void *) ~0l) ++ ? LLL_SHARED : LLL_PRIVATE; ++ ++ #if (defined lll_futex_wait_requeue_pi \ ++ && defined __ASSUME_REQUEUE_PI) ++ int pi_flag = 0; ++#endif ++ LIBC_PROBE (cond_wait, 2, cond, mutex); ++ /* clockid will already have been checked by ++ __pthread_cond_clockwait or pthread_condattr_setclock, or we ++ don't use it if abstime is NULL, so we don't need to check it ++ here. */ ++ /* Make sure we are alone. */ ++ lll_lock (cond->__data.__lock, pshared); ++ ++ /* Now we can release the mutex. */ ++ int err = __pthread_mutex_unlock_usercnt (mutex, 0); ++ if (__glibc_unlikely (err)) ++ { ++ lll_unlock (cond->__data.__lock, pshared); ++ return err; ++ } ++ ++ /* We have one new user of the condvar. */ ++ ++cond->__data.__total_seq; ++ ++cond->__data.__futex; ++ cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT; ++ ++ /* Work around the fact that the kernel rejects negative timeout values ++ despite them being valid. */ ++ if (abstime != NULL && __glibc_unlikely (abstime->tv_sec < 0)) ++ goto timeout; ++ ++ /* Remember the mutex we are using here. If there is already a ++ different address store this is a bad user bug. Do not store ++ anything for pshared condvars. */ ++ if (cond->__data.__mutex != (void *) ~0l) ++ cond->__data.__mutex = mutex; ++ ++ /* Prepare structure passed to cancellation handler. */ ++ cbuffer.cond = cond; ++ cbuffer.mutex = mutex; ++ ++ /* Before we block we enable cancellation. Therefore we have to ++ install a cancellation handler. */ ++ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer); ++ ++ /* The current values of the wakeup counter. The "woken" counter ++ must exceed this value. */ ++ unsigned long long int val; ++ unsigned long long int seq; ++ val = seq = cond->__data.__wakeup_seq; ++ /* Remember the broadcast counter. */ ++ cbuffer.bc_seq = cond->__data.__broadcast_seq; ++ ++ while (1) ++ { ++ unsigned int futex_val = cond->__data.__futex; ++ ++ /* Prepare to wait. Release the condvar futex. */ ++ lll_unlock (cond->__data.__lock, pshared); ++ ++ /* Enable asynchronous cancellation. Required by the standard. */ ++ cbuffer.oldtype = __pthread_enable_asynccancel (); ++ ++#if (defined lll_futex_wait_requeue_pi \ ++ && defined __ASSUME_REQUEUE_PI) ++ /* If pi_flag remained 1 then it means that we had the lock and the mutex ++ but a spurious waker raced ahead of us. Give back the mutex before ++ going into wait again. */ ++ if (pi_flag) ++ { ++ __pthread_mutex_cond_lock_adjust (mutex); ++ __pthread_mutex_unlock_usercnt (mutex, 0); ++ } ++ pi_flag = USE_REQUEUE_PI (mutex); ++ ++ if (pi_flag) ++ { ++ if (abstime == NULL) ++ { ++ err = lll_futex_wait_requeue_pi (&cond->__data.__futex, ++ futex_val, &mutex->__data.__lock, ++ pshared); ++ } ++ else ++ { ++ unsigned int clockbit = (clockid == CLOCK_REALTIME) ++ ? FUTEX_CLOCK_REALTIME : 0; ++ ++ err = lll_futex_timed_wait_requeue_pi (&cond->__data.__futex, ++ futex_val, abstime, clockbit, ++ &mutex->__data.__lock, ++ pshared); ++ } ++ pi_flag = (err == 0); ++ } ++ else ++#endif ++ /* Wait until woken by signal or broadcast. */ ++ { ++ if (abstime == NULL) ++ { ++ lll_futex_wait (&cond->__data.__futex, futex_val, pshared); ++ } ++ else ++ { ++ err = lll_futex_clock_wait_bitset (&cond->__data.__futex, futex_val, ++ clockid, abstime, pshared); ++ } ++ } ++ /* Disable asynchronous cancellation. */ ++ __pthread_disable_asynccancel (cbuffer.oldtype); ++ ++ /* We are going to look at shared data again, so get the lock. */ ++ lll_lock (cond->__data.__lock, pshared); ++ ++ /* If a broadcast happened, we are done. */ ++ if (cbuffer.bc_seq != cond->__data.__broadcast_seq) ++ goto bc_out; ++ ++ /* Check whether we are eligible for wakeup. */ ++ val = cond->__data.__wakeup_seq; ++ if (val != seq && cond->__data.__woken_seq != val) ++ break; ++ ++ /* Not woken yet. Maybe the time expired? */ ++ if (abstime != NULL && __glibc_unlikely (err == -ETIMEDOUT)) ++ { ++ timeout: ++ /* Yep. Adjust the counters. */ ++ ++cond->__data.__wakeup_seq; ++ ++cond->__data.__futex; ++ ++ /* The error value. */ ++ result = ETIMEDOUT; ++ break; ++ } ++ } ++ ++ /* Another thread woken up. */ ++ ++cond->__data.__woken_seq; ++ ++bc_out: ++ cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; ++ ++ /* If pthread_cond_destroy was called on this variable already, ++ notify the pthread_cond_destroy caller all waiters have left ++ and it can be successfully destroyed. */ ++ if (cond->__data.__total_seq == -1ULL ++ && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) ++ lll_futex_wake (&cond->__data.__nwaiters, 1, pshared); ++ ++ /* We are done with the condvar. */ ++ lll_unlock (cond->__data.__lock, pshared); ++ ++ /* The cancellation handling is back to normal, remove the handler. */ ++ __pthread_cleanup_pop (&buffer, 0); ++ ++ /* Get the mutex before returning. */ ++#if (defined lll_futex_wait_requeue_pi \ ++ && defined __ASSUME_REQUEUE_PI) ++ if (pi_flag) ++ { ++ __pthread_mutex_cond_lock_adjust (mutex); ++ err = 0; ++ } ++ else ++#endif ++ err = __pthread_mutex_cond_lock (mutex); ++ return err ?: result; ++} ++/* See __pthread_cond_wait_common. */ ++int ++__pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) ++{ ++ /* clockid is unused when abstime is NULL. */ ++ return __pthread_cond_wait_common (cond, mutex, 0, NULL); ++} ++ ++/* See __pthread_cond_wait_common. */ ++int ++__pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, ++ const struct timespec *abstime) ++{ ++ /* Check parameter validity. This should also tell the compiler that ++ it can assume that abstime is not NULL. */ ++ if (abstime == NULL || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) ++ return EINVAL; ++ ++ clockid_t clockid = cond->__data.__nwaiters & 1; ++ ++ return __pthread_cond_wait_common (cond, mutex, clockid, abstime); ++} ++ ++/* See __pthread_cond_wait_common. */ ++int ++__pthread_cond_clockwait (pthread_cond_t *cond, pthread_mutex_t *mutex, ++ clockid_t clockid, ++ const struct timespec *abstime) ++{ ++ /* Check parameter validity. This should also tell the compiler that ++ it can assume that abstime is not NULL. */ ++ if (abstime == NULL || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) ++ return EINVAL; ++ ++ if (!futex_abstimed_supported_clockid (clockid)) ++ return EINVAL; ++ ++ return __pthread_cond_wait_common (cond, mutex, clockid, abstime); ++} ++ ++versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, ++ GLIBC_2_3_2); ++versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, ++ GLIBC_2_3_2); ++versioned_symbol (libpthread, __pthread_cond_clockwait, pthread_cond_clockwait, ++ GLIBC_2_34); +diff --git a/nptl_2_17/pthread_condattr_getclock_2_17.c b/nptl_2_17/pthread_condattr_getclock_2_17.c +new file mode 100644 +index 00000000..414a6856 +--- /dev/null ++++ b/nptl_2_17/pthread_condattr_getclock_2_17.c +@@ -0,0 +1,28 @@ ++/* Copyright (C) 2003-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++ ++ ++int ++pthread_condattr_getclock (const pthread_condattr_t *attr, clockid_t *clock_id) ++{ ++ *clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1) ++ & ((1 << COND_NWAITERS_SHIFT) - 1)); ++ return 0; ++} +diff --git a/nptl_2_17/pthread_condattr_getpshared_2_17.c b/nptl_2_17/pthread_condattr_getpshared_2_17.c +new file mode 100644 +index 00000000..2b85506f +--- /dev/null ++++ b/nptl_2_17/pthread_condattr_getpshared_2_17.c +@@ -0,0 +1,28 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++ ++ ++int ++pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared) ++{ ++ *pshared = ((const struct pthread_condattr *) attr)->value & 1; ++ ++ return 0; ++} +diff --git a/nptl_2_17/pthread_condattr_init_2_17.c b/nptl_2_17/pthread_condattr_init_2_17.c +new file mode 100644 +index 00000000..c2765e96 +--- /dev/null ++++ b/nptl_2_17/pthread_condattr_init_2_17.c +@@ -0,0 +1,33 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <string.h> ++ ++ ++int ++__pthread_condattr_init (pthread_condattr_t *attr) ++{ ++ ASSERT_TYPE_SIZE (pthread_condattr_t, __SIZEOF_PTHREAD_CONDATTR_T); ++ ASSERT_PTHREAD_INTERNAL_SIZE (pthread_condattr_t, ++ struct pthread_condattr); ++ ++ memset (attr, '\0', sizeof (*attr)); ++ return 0; ++} ++strong_alias (__pthread_condattr_init, pthread_condattr_init) +diff --git a/nptl_2_17/pthread_condattr_setclock_2_17.c b/nptl_2_17/pthread_condattr_setclock_2_17.c +new file mode 100644 +index 00000000..69c64dcb +--- /dev/null ++++ b/nptl_2_17/pthread_condattr_setclock_2_17.c +@@ -0,0 +1,45 @@ ++/* Copyright (C) 2003-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2003. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <assert.h> ++#include <errno.h> ++#include <stdbool.h> ++#include <time.h> ++#include <sysdep.h> ++ ++ ++int ++pthread_condattr_setclock (pthread_condattr_t *attr, clockid_t clock_id) ++{ ++ /* Only a few clocks are allowed. */ ++ if (clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_REALTIME) ++ /* If more clocks are allowed some day the storing of the clock ID ++ in the pthread_cond_t structure needs to be adjusted. */ ++ return EINVAL; ++ ++ /* Make sure the value fits in the bits we reserved. */ ++ assert (clock_id < (1 << COND_NWAITERS_SHIFT)); ++ ++ int *valuep = &((struct pthread_condattr *) attr)->value; ++ ++ *valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1)) ++ | (clock_id << 1)); ++ ++ return 0; ++} +diff --git a/nptl_2_17/pthread_mutex_cond_lock_2_17.c b/nptl_2_17/pthread_mutex_cond_lock_2_17.c +new file mode 100644 +index 00000000..87734543 +--- /dev/null ++++ b/nptl_2_17/pthread_mutex_cond_lock_2_17.c +@@ -0,0 +1,21 @@ ++#include <pthreadP_2_17.h> ++ ++#define LLL_MUTEX_LOCK(mutex) \ ++ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) ++ ++/* Not actually elided so far. Needed? */ ++#define LLL_MUTEX_LOCK_ELISION(mutex) \ ++ ({ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); 0; }) ++ ++#define LLL_MUTEX_TRYLOCK(mutex) \ ++ lll_cond_trylock ((mutex)->__data.__lock) ++#define LLL_MUTEX_TRYLOCK_ELISION(mutex) LLL_MUTEX_TRYLOCK(mutex) ++ ++/* We need to assume that there are other threads blocked on the futex. ++ See __pthread_mutex_lock_full for further details. */ ++#define LLL_ROBUST_MUTEX_LOCK_MODIFIER FUTEX_WAITERS ++#define __pthread_mutex_lock __pthread_mutex_cond_lock ++#define __pthread_mutex_lock_full __pthread_mutex_cond_lock_full ++#define NO_INCR ++ ++#include <nptl_2_17/pthread_mutex_lock_2_17.c> +diff --git a/nptl_2_17/pthread_mutex_lock_2_17.c b/nptl_2_17/pthread_mutex_lock_2_17.c +new file mode 100644 +index 00000000..b08a2472 +--- /dev/null ++++ b/nptl_2_17/pthread_mutex_lock_2_17.c +@@ -0,0 +1,652 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <assert.h> ++#include <errno.h> ++#include <stdlib.h> ++#include <unistd.h> ++#include <sys/param.h> ++#include <not-cancel.h> ++#include <atomic.h> ++#include <lowlevellock.h> ++#include <stap-probe.h> ++ ++#include <old_macros_2_17.h> ++#ifndef lll_lock_elision ++#define lll_lock_elision(lock, try_lock, private) ({ \ ++ lll_lock (lock, private); 0; }) ++#endif ++ ++#ifndef lll_trylock_elision ++#define lll_trylock_elision(a,t) lll_trylock(a) ++#endif ++ ++/* Some of the following definitions differ when pthread_mutex_cond_lock.c ++ includes this file. */ ++#ifndef LLL_MUTEX_LOCK ++/* lll_lock with single-thread optimization. */ ++static inline void ++lll_mutex_lock_optimized (pthread_mutex_t *mutex) ++{ ++ /* The single-threaded optimization is only valid for private ++ mutexes. For process-shared mutexes, the mutex could be in a ++ shared mapping, so synchronization with another process is needed ++ even without any threads. If the lock is already marked as ++ acquired, POSIX requires that pthread_mutex_lock deadlocks for ++ normal mutexes, so skip the optimization in that case as ++ well. */ ++ int private = PTHREAD_MUTEX_PSHARED (mutex); ++ if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0) ++ mutex->__data.__lock = 1; ++ else ++ lll_lock (mutex->__data.__lock, private); ++} ++# define LLL_MUTEX_LOCK(mutex) \ ++ lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) ++# define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex) ++# define LLL_MUTEX_TRYLOCK(mutex) \ ++ lll_trylock ((mutex)->__data.__lock) ++# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0 ++# define LLL_MUTEX_LOCK_ELISION(mutex) \ ++ lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \ ++ PTHREAD_MUTEX_PSHARED (mutex)) ++# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \ ++ lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \ ++ PTHREAD_MUTEX_PSHARED (mutex)) ++# define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock ++# define PTHREAD_MUTEX_VERSIONS 1 ++#endif ++ ++static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) ++ __attribute_noinline__; ++ ++int ++__pthread_mutex_lock (pthread_mutex_t *mutex) ++{ ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ ++ unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); ++ ++ LIBC_PROBE (mutex_entry, 1, mutex); ++ ++ if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP ++ | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) ++ return __pthread_mutex_lock_full (mutex); ++ ++ if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP)) ++ { ++ FORCE_ELISION (mutex, goto elision); ++ simple: ++ /* Normal mutex. */ ++ LLL_MUTEX_LOCK (mutex); ++ assert (mutex->__data.__owner == 0); ++ } ++#ifdef ENABLE_ELISION_SUPPORT ++ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) ++ { ++ elision: __attribute__((unused)) ++ /* This case can never happen on a system without elision, ++ as the mutex type initialization functions will not ++ allow to set the elision flags. */ ++ /* Don't record owner or users for elision case. This is a ++ tail call. */ ++ return LLL_MUTEX_LOCK_ELISION (mutex); ++ } ++#endif ++ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) ++ == PTHREAD_MUTEX_RECURSIVE_NP, 1)) ++ { ++ /* Recursive mutex. */ ++ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); ++ ++ /* Check whether we already hold the mutex. */ ++ if (mutex->__data.__owner == id) ++ { ++ /* Just bump the counter. */ ++ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) ++ /* Overflow of the counter. */ ++ return EAGAIN; ++ ++ ++mutex->__data.__count; ++ ++ return 0; ++ } ++ ++ /* We have to get the mutex. */ ++ LLL_MUTEX_LOCK (mutex); ++ ++ assert (mutex->__data.__owner == 0); ++ mutex->__data.__count = 1; ++ } ++ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) ++ == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) ++ { ++ if (! __is_smp) ++ goto simple; ++ ++ if (LLL_MUTEX_TRYLOCK (mutex) != 0) ++ { ++ int cnt = 0; ++ int max_cnt = MIN (MAX_ADAPTIVE_COUNT, ++ mutex->__data.__spins * 2 + 10); ++ do ++ { ++ if (cnt++ >= max_cnt) ++ { ++ LLL_MUTEX_LOCK (mutex); ++ break; ++ } ++ atomic_spin_nop (); ++ } ++ while (LLL_MUTEX_TRYLOCK (mutex) != 0); ++ ++ mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; ++ } ++ assert (mutex->__data.__owner == 0); ++ } ++ else ++ { ++ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); ++ assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); ++ /* Check whether we already hold the mutex. */ ++ if (__glibc_unlikely (mutex->__data.__owner == id)) ++ return EDEADLK; ++ goto simple; ++ } ++ ++ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); ++ ++ /* Record the ownership. */ ++ mutex->__data.__owner = id; ++#ifndef NO_INCR ++ ++mutex->__data.__nusers; ++#endif ++ ++ LIBC_PROBE (mutex_acquired, 1, mutex); ++ ++ return 0; ++} ++ ++static int ++__pthread_mutex_lock_full (pthread_mutex_t *mutex) ++{ ++ int oldval; ++ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); ++ ++ switch (PTHREAD_MUTEX_TYPE (mutex)) ++ { ++ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: ++ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_ROBUST_NORMAL_NP: ++ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ &mutex->__data.__list.__next); ++ /* We need to set op_pending before starting the operation. Also ++ see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ ++ oldval = mutex->__data.__lock; ++ /* This is set to FUTEX_WAITERS iff we might have shared the ++ FUTEX_WAITERS flag with other threads, and therefore need to keep it ++ set to avoid lost wake-ups. We have the same requirement in the ++ simple mutex algorithm. ++ We start with value zero for a normal mutex, and FUTEX_WAITERS if we ++ are building the special case mutexes for use from within condition ++ variables. */ ++ unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER; ++ while (1) ++ { ++ /* Try to acquire the lock through a CAS from 0 (not acquired) to ++ our TID | assume_other_futex_waiters. */ ++ if (__glibc_likely (oldval == 0)) ++ { ++ oldval ++ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ++ id | assume_other_futex_waiters, 0); ++ if (__glibc_likely (oldval == 0)) ++ break; ++ } ++ ++ if ((oldval & FUTEX_OWNER_DIED) != 0) ++ { ++ /* The previous owner died. Try locking the mutex. */ ++ int newval = id; ++#ifdef NO_INCR ++ /* We are not taking assume_other_futex_waiters into accoount ++ here simply because we'll set FUTEX_WAITERS anyway. */ ++ newval |= FUTEX_WAITERS; ++#else ++ newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters; ++#endif ++ ++ newval ++ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ++ newval, oldval); ++ ++ if (newval != oldval) ++ { ++ oldval = newval; ++ continue; ++ } ++ ++ /* We got the mutex. */ ++ mutex->__data.__count = 1; ++ /* But it is inconsistent unless marked otherwise. */ ++ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; ++ ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ ENQUEUE_MUTEX (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ ++ /* Note that we deliberately exit here. If we fall ++ through to the end of the function __nusers would be ++ incremented which is not correct because the old ++ owner has to be discounted. If we are not supposed ++ to increment __nusers we actually have to decrement ++ it here. */ ++#ifdef NO_INCR ++ --mutex->__data.__nusers; ++#endif ++ ++ return EOWNERDEAD; ++ } ++ ++ /* Check whether we already hold the mutex. */ ++ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) ++ { ++ int kind = PTHREAD_MUTEX_TYPE (mutex); ++ if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) ++ { ++ /* We do not need to ensure ordering wrt another memory ++ access. Also see comments at ENQUEUE_MUTEX. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ NULL); ++ return EDEADLK; ++ } ++ ++ if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) ++ { ++ /* We do not need to ensure ordering wrt another memory ++ access. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ NULL); ++ ++ /* Just bump the counter. */ ++ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) ++ /* Overflow of the counter. */ ++ return EAGAIN; ++ ++ ++mutex->__data.__count; ++ ++ return 0; ++ } ++ } ++ ++ /* We cannot acquire the mutex nor has its owner died. Thus, try ++ to block using futexes. Set FUTEX_WAITERS if necessary so that ++ other threads are aware that there are potentially threads ++ blocked on the futex. Restart if oldval changed in the ++ meantime. */ ++ if ((oldval & FUTEX_WAITERS) == 0) ++ { ++ if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, ++ oldval | FUTEX_WAITERS, ++ oldval) ++ != 0) ++ { ++ oldval = mutex->__data.__lock; ++ continue; ++ } ++ oldval |= FUTEX_WAITERS; ++ } ++ ++ /* It is now possible that we share the FUTEX_WAITERS flag with ++ another thread; therefore, update assume_other_futex_waiters so ++ that we do not forget about this when handling other cases ++ above and thus do not cause lost wake-ups. */ ++ assume_other_futex_waiters |= FUTEX_WAITERS; ++ ++ /* Block using the futex and reload current lock value. */ ++ lll_futex_wait (&mutex->__data.__lock, oldval, ++ PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); ++ oldval = mutex->__data.__lock; ++ } ++ ++ /* We have acquired the mutex; check if it is still consistent. */ ++ if (__builtin_expect (mutex->__data.__owner ++ == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) ++ { ++ /* This mutex is now not recoverable. */ ++ mutex->__data.__count = 0; ++ int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); ++ lll_unlock (mutex->__data.__lock, private); ++ /* FIXME This violates the mutex destruction requirements. See ++ __pthread_mutex_unlock_full. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ return ENOTRECOVERABLE; ++ } ++ ++ mutex->__data.__count = 1; ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ ENQUEUE_MUTEX (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ break; ++ ++ /* The PI support requires the Linux futex system call. If that's not ++ available, pthread_mutex_init should never have allowed the type to ++ be set. So it will get the default case for an invalid type. */ ++#ifdef __NR_futex ++ case PTHREAD_MUTEX_PI_RECURSIVE_NP: ++ case PTHREAD_MUTEX_PI_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_PI_NORMAL_NP: ++ case PTHREAD_MUTEX_PI_ADAPTIVE_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: ++ { ++ int kind, robust; ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; ++ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ } ++ ++ if (robust) ++ { ++ /* Note: robust PI futexes are signaled by setting bit 0. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ (void *) (((uintptr_t) &mutex->__data.__list.__next) ++ | 1)); ++ /* We need to set op_pending before starting the operation. Also ++ see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ } ++ ++ oldval = mutex->__data.__lock; ++ ++ /* Check whether we already hold the mutex. */ ++ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) ++ { ++ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) ++ { ++ /* We do not need to ensure ordering wrt another memory ++ access. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ return EDEADLK; ++ } ++ ++ if (kind == PTHREAD_MUTEX_RECURSIVE_NP) ++ { ++ /* We do not need to ensure ordering wrt another memory ++ access. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ ++ /* Just bump the counter. */ ++ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) ++ /* Overflow of the counter. */ ++ return EAGAIN; ++ ++ ++mutex->__data.__count; ++ ++ return 0; ++ } ++ } ++ ++ int newval = id; ++# ifdef NO_INCR ++ newval |= FUTEX_WAITERS; ++# endif ++ oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ++ newval, 0); ++ ++ if (oldval != 0) ++ { ++ /* The mutex is locked. The kernel will now take care of ++ everything. */ ++ int private = (robust ++ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) ++ : PTHREAD_MUTEX_PSHARED (mutex)); ++ INTERNAL_SYSCALL_DECL (__err); ++ int e = INTERNAL_SYSCALL (futex, 4, &mutex->__data.__lock, ++ __lll_private_flag (FUTEX_LOCK_PI, ++ private), 1, 0); ++ ++ if (INTERNAL_SYSCALL_ERROR_P (e) ++ && (INTERNAL_SYSCALL_ERRNO (e) == ESRCH ++ || INTERNAL_SYSCALL_ERRNO (e) == EDEADLK)) ++ { ++ assert (INTERNAL_SYSCALL_ERRNO (e) != EDEADLK ++ || (kind != PTHREAD_MUTEX_ERRORCHECK_NP ++ && kind != PTHREAD_MUTEX_RECURSIVE_NP)); ++ /* ESRCH can happen only for non-robust PI mutexes where ++ the owner of the lock died. */ ++ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust); ++ ++ /* Delay the thread indefinitely. */ ++ while (1) ++ __pause_nocancel (); ++ } ++ ++ oldval = mutex->__data.__lock; ++ ++ assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); ++ } ++ ++ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) ++ { ++ atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); ++ ++ /* We got the mutex. */ ++ mutex->__data.__count = 1; ++ /* But it is inconsistent unless marked otherwise. */ ++ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; ++ ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ ENQUEUE_MUTEX_PI (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ ++ /* Note that we deliberately exit here. If we fall ++ through to the end of the function __nusers would be ++ incremented which is not correct because the old owner ++ has to be discounted. If we are not supposed to ++ increment __nusers we actually have to decrement it here. */ ++# ifdef NO_INCR ++ --mutex->__data.__nusers; ++# endif ++ ++ return EOWNERDEAD; ++ } ++ ++ if (robust ++ && __builtin_expect (mutex->__data.__owner ++ == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) ++ { ++ /* This mutex is now not recoverable. */ ++ mutex->__data.__count = 0; ++ ++ INTERNAL_SYSCALL_DECL (__err); ++ INTERNAL_SYSCALL (futex, 4, &mutex->__data.__lock, ++ __lll_private_flag (FUTEX_UNLOCK_PI, ++ PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), ++ 0, 0); ++ ++ /* To the kernel, this will be visible after the kernel has ++ acquired the mutex in the syscall. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ return ENOTRECOVERABLE; ++ } ++ ++ mutex->__data.__count = 1; ++ if (robust) ++ { ++ /* We must not enqueue the mutex before we have acquired it. ++ Also see comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ ENQUEUE_MUTEX_PI (mutex); ++ /* We need to clear op_pending after we enqueue the mutex. */ ++ __asm ("" ::: "memory"); ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ } ++ } ++ break; ++#endif /* __NR_futex. */ ++ ++ case PTHREAD_MUTEX_PP_RECURSIVE_NP: ++ case PTHREAD_MUTEX_PP_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_PP_NORMAL_NP: ++ case PTHREAD_MUTEX_PP_ADAPTIVE_NP: ++ { ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_KIND_MASK_NP; ++ ++ oldval = mutex->__data.__lock; ++ ++ /* Check whether we already hold the mutex. */ ++ if (mutex->__data.__owner == id) ++ { ++ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) ++ return EDEADLK; ++ ++ if (kind == PTHREAD_MUTEX_RECURSIVE_NP) ++ { ++ /* Just bump the counter. */ ++ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) ++ /* Overflow of the counter. */ ++ return EAGAIN; ++ ++ ++mutex->__data.__count; ++ ++ return 0; ++ } ++ } ++ ++ int oldprio = -1, ceilval; ++ do ++ { ++ int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) ++ >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; ++ ++ if (__pthread_current_priority () > ceiling) ++ { ++ if (oldprio != -1) ++ __pthread_tpp_change_priority (oldprio, -1); ++ return EINVAL; ++ } ++ ++ int retval = __pthread_tpp_change_priority (oldprio, ceiling); ++ if (retval) ++ return retval; ++ ++ ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; ++ oldprio = ceiling; ++ ++ oldval ++ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ++#ifdef NO_INCR ++ ceilval | 2, ++#else ++ ceilval | 1, ++#endif ++ ceilval); ++ ++ if (oldval == ceilval) ++ break; ++ ++ do ++ { ++ oldval ++ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ++ ceilval | 2, ++ ceilval | 1); ++ ++ if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) ++ break; ++ ++ if (oldval != ceilval) ++ lll_futex_wait (&mutex->__data.__lock, ceilval | 2, ++ PTHREAD_MUTEX_PSHARED (mutex)); ++ } ++ while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, ++ ceilval | 2, ceilval) ++ != ceilval); ++ } ++ while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); ++ ++ assert (mutex->__data.__owner == 0); ++ mutex->__data.__count = 1; ++ } ++ break; ++ ++ default: ++ /* Correct code cannot set any other type. */ ++ return EINVAL; ++ } ++ ++ /* Record the ownership. */ ++ mutex->__data.__owner = id; ++#ifndef NO_INCR ++ ++mutex->__data.__nusers; ++#endif ++ ++ LIBC_PROBE (mutex_acquired, 1, mutex); ++ ++ return 0; ++} ++#ifndef __pthread_mutex_lock ++weak_alias (__pthread_mutex_lock, pthread_mutex_lock) ++hidden_def (__pthread_mutex_lock) ++#endif ++ ++ ++#ifdef NO_INCR ++void ++__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) ++{ ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); ++ assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); ++ assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); ++ assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); ++ ++ /* Record the ownership. */ ++ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); ++ mutex->__data.__owner = id; ++ ++ if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) ++ ++mutex->__data.__count; ++} ++#endif +diff --git a/nptl_2_17/pthread_mutex_unlock_2_17.c b/nptl_2_17/pthread_mutex_unlock_2_17.c +new file mode 100644 +index 00000000..00729d32 +--- /dev/null ++++ b/nptl_2_17/pthread_mutex_unlock_2_17.c +@@ -0,0 +1,361 @@ ++/* Copyright (C) 2002-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <assert.h> ++#include <errno.h> ++#include <stdlib.h> ++#include <lowlevellock.h> ++#include <stap-probe.h> ++ ++#include <old_macros_2_17.h> ++ ++#ifndef lll_unlock_elision ++#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; }) ++#endif ++ ++static int ++__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr) ++ __attribute_noinline__; ++ ++int ++attribute_hidden ++__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr) ++{ ++ /* See concurrency notes regarding mutex type which is loaded from __kind ++ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); ++ if (__builtin_expect (type & ++ ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) ++ return __pthread_mutex_unlock_full (mutex, decr); ++ ++ if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP) ++ == PTHREAD_MUTEX_TIMED_NP) ++ { ++ /* Always reset the owner field. */ ++ normal: ++ mutex->__data.__owner = 0; ++ if (decr) ++ /* One less user. */ ++ --mutex->__data.__nusers; ++ ++ /* Unlock. */ ++ lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); ++ ++ LIBC_PROBE (mutex_release, 1, mutex); ++ ++ return 0; ++ } ++ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) ++ { ++ /* Don't reset the owner/users fields for elision. */ ++ return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision, ++ PTHREAD_MUTEX_PSHARED (mutex)); ++ } ++ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) ++ == PTHREAD_MUTEX_RECURSIVE_NP, 1)) ++ { ++ /* Recursive mutex. */ ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) ++ return EPERM; ++ ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return 0; ++ goto normal; ++ } ++ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) ++ == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) ++ goto normal; ++ else ++ { ++ /* Error checking mutex. */ ++ assert (type == PTHREAD_MUTEX_ERRORCHECK_NP); ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid) ++ || ! lll_islocked (mutex->__data.__lock)) ++ return EPERM; ++ goto normal; ++ } ++} ++ ++ ++static int ++__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr) ++{ ++ int newowner = 0; ++ int private; ++ ++ switch (PTHREAD_MUTEX_TYPE (mutex)) ++ { ++ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: ++ /* Recursive mutex. */ ++ if ((mutex->__data.__lock & FUTEX_TID_MASK) ++ == THREAD_GETMEM (THREAD_SELF, tid) ++ && __builtin_expect (mutex->__data.__owner ++ == PTHREAD_MUTEX_INCONSISTENT, 0)) ++ { ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return ENOTRECOVERABLE; ++ ++ goto notrecoverable; ++ } ++ ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) ++ return EPERM; ++ ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return 0; ++ ++ goto robust; ++ ++ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_ROBUST_NORMAL_NP: ++ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: ++ if ((mutex->__data.__lock & FUTEX_TID_MASK) ++ != THREAD_GETMEM (THREAD_SELF, tid) ++ || ! lll_islocked (mutex->__data.__lock)) ++ return EPERM; ++ ++ /* If the previous owner died and the caller did not succeed in ++ making the state consistent, mark the mutex as unrecoverable ++ and make all waiters. */ ++ if (__builtin_expect (mutex->__data.__owner ++ == PTHREAD_MUTEX_INCONSISTENT, 0)) ++ notrecoverable: ++ newowner = PTHREAD_MUTEX_NOTRECOVERABLE; ++ ++ robust: ++ /* Remove mutex from the list. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ &mutex->__data.__list.__next); ++ /* We must set op_pending before we dequeue the mutex. Also see ++ comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ DEQUEUE_MUTEX (mutex); ++ ++ mutex->__data.__owner = newowner; ++ if (decr) ++ /* One less user. */ ++ --mutex->__data.__nusers; ++ ++ /* Unlock by setting the lock to 0 (not acquired); if the lock had ++ FUTEX_WAITERS set previously, then wake any waiters. ++ The unlock operation must be the last access to the mutex to not ++ violate the mutex destruction requirements (see __lll_unlock). */ ++ private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); ++ if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0) ++ & FUTEX_WAITERS) != 0)) ++ lll_futex_wake (&mutex->__data.__lock, 1, private); ++ ++ /* We must clear op_pending after we release the mutex. ++ FIXME However, this violates the mutex destruction requirements ++ because another thread could acquire the mutex, destroy it, and ++ reuse the memory for something else; then, if this thread crashes, ++ and the memory happens to have a value equal to the TID, the kernel ++ will believe it is still related to the mutex (which has been ++ destroyed already) and will modify some other random object. */ ++ __asm ("" ::: "memory"); ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ break; ++ ++ /* The PI support requires the Linux futex system call. If that's not ++ available, pthread_mutex_init should never have allowed the type to ++ be set. So it will get the default case for an invalid type. */ ++#ifdef __NR_futex ++ case PTHREAD_MUTEX_PI_RECURSIVE_NP: ++ /* Recursive mutex. */ ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) ++ return EPERM; ++ ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return 0; ++ goto continue_pi_non_robust; ++ ++ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: ++ /* Recursive mutex. */ ++ if ((mutex->__data.__lock & FUTEX_TID_MASK) ++ == THREAD_GETMEM (THREAD_SELF, tid) ++ && __builtin_expect (mutex->__data.__owner ++ == PTHREAD_MUTEX_INCONSISTENT, 0)) ++ { ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return ENOTRECOVERABLE; ++ ++ goto pi_notrecoverable; ++ } ++ ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) ++ return EPERM; ++ ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return 0; ++ ++ goto continue_pi_robust; ++ ++ case PTHREAD_MUTEX_PI_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_PI_NORMAL_NP: ++ case PTHREAD_MUTEX_PI_ADAPTIVE_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: ++ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: ++ if ((mutex->__data.__lock & FUTEX_TID_MASK) ++ != THREAD_GETMEM (THREAD_SELF, tid) ++ || ! lll_islocked (mutex->__data.__lock)) ++ return EPERM; ++ ++ /* If the previous owner died and the caller did not succeed in ++ making the state consistent, mark the mutex as unrecoverable ++ and make all waiters. */ ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0 ++ && __builtin_expect (mutex->__data.__owner ++ == PTHREAD_MUTEX_INCONSISTENT, 0)) ++ pi_notrecoverable: ++ newowner = PTHREAD_MUTEX_NOTRECOVERABLE; ++ ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ if ((atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0) ++ { ++ continue_pi_robust: ++ /* Remove mutex from the list. ++ Note: robust PI futexes are signaled by setting bit 0. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, ++ (void *) (((uintptr_t) &mutex->__data.__list.__next) ++ | 1)); ++ /* We must set op_pending before we dequeue the mutex. Also see ++ comments at ENQUEUE_MUTEX. */ ++ __asm ("" ::: "memory"); ++ DEQUEUE_MUTEX (mutex); ++ } ++ ++ continue_pi_non_robust: ++ mutex->__data.__owner = newowner; ++ if (decr) ++ /* One less user. */ ++ --mutex->__data.__nusers; ++ ++ /* Unlock. Load all necessary mutex data before releasing the mutex ++ to not violate the mutex destruction requirements (see ++ lll_unlock). */ ++ /* See concurrency notes regarding __kind in struct __pthread_mutex_s ++ in sysdeps/nptl/bits/thread-shared-types.h. */ ++ int robust = atomic_load_relaxed (&(mutex->__data.__kind)) ++ & PTHREAD_MUTEX_ROBUST_NORMAL_NP; ++ private = (robust ++ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) ++ : PTHREAD_MUTEX_PSHARED (mutex)); ++ /* Unlock the mutex using a CAS unless there are futex waiters or our ++ TID is not the value of __lock anymore, in which case we let the ++ kernel take care of the situation. Use release MO in the CAS to ++ synchronize with acquire MO in lock acquisitions. */ ++ int l = atomic_load_relaxed (&mutex->__data.__lock); ++ do ++ { ++ if (((l & FUTEX_WAITERS) != 0) ++ || (l != THREAD_GETMEM (THREAD_SELF, tid))) ++ { ++ INTERNAL_SYSCALL_DECL (__err); ++ INTERNAL_SYSCALL (futex, 2, &mutex->__data.__lock, ++ __lll_private_flag (FUTEX_UNLOCK_PI, private)); ++ break; ++ } ++ } ++ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock, ++ &l, 0)); ++ ++ /* This happens after the kernel releases the mutex but violates the ++ mutex destruction requirements; see comments in the code handling ++ PTHREAD_MUTEX_ROBUST_NORMAL_NP. */ ++ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); ++ break; ++#endif /* __NR_futex. */ ++ ++ case PTHREAD_MUTEX_PP_RECURSIVE_NP: ++ /* Recursive mutex. */ ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) ++ return EPERM; ++ ++ if (--mutex->__data.__count != 0) ++ /* We still hold the mutex. */ ++ return 0; ++ goto pp; ++ ++ case PTHREAD_MUTEX_PP_ERRORCHECK_NP: ++ /* Error checking mutex. */ ++ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid) ++ || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0) ++ return EPERM; ++ /* FALLTHROUGH */ ++ ++ case PTHREAD_MUTEX_PP_NORMAL_NP: ++ case PTHREAD_MUTEX_PP_ADAPTIVE_NP: ++ /* Always reset the owner field. */ ++ pp: ++ mutex->__data.__owner = 0; ++ ++ if (decr) ++ /* One less user. */ ++ --mutex->__data.__nusers; ++ ++ /* Unlock. Use release MO in the CAS to synchronize with acquire MO in ++ lock acquisitions. */ ++ int newval; ++ int oldval = atomic_load_relaxed (&mutex->__data.__lock); ++ do ++ { ++ newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK; ++ } ++ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock, ++ &oldval, newval)); ++ ++ if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1) ++ lll_futex_wake (&mutex->__data.__lock, 1, ++ PTHREAD_MUTEX_PSHARED (mutex)); ++ ++ int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; ++ ++ LIBC_PROBE (mutex_release, 1, mutex); ++ ++ return __pthread_tpp_change_priority (oldprio, -1); ++ ++ default: ++ /* Correct code cannot set any other type. */ ++ return EINVAL; ++ } ++ ++ LIBC_PROBE (mutex_release, 1, mutex); ++ return 0; ++} ++ ++ ++int ++__pthread_mutex_unlock (pthread_mutex_t *mutex) ++{ ++ return __pthread_mutex_unlock_usercnt (mutex, 1); ++} ++weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock) ++hidden_def (__pthread_mutex_unlock) +diff --git a/nptl_2_17/tpp_2_17.c b/nptl_2_17/tpp_2_17.c +new file mode 100644 +index 00000000..45fff81a +--- /dev/null ++++ b/nptl_2_17/tpp_2_17.c +@@ -0,0 +1,195 @@ ++/* Thread Priority Protect helpers. ++ Copyright (C) 2006-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ Contributed by Jakub Jelinek <jakub@redhat.com>, 2006. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <assert.h> ++#include <atomic.h> ++#include <errno.h> ++#include <sched.h> ++#include <stdlib.h> ++#include <atomic.h> ++ ++ ++int __sched_fifo_min_prio = -1; ++int __sched_fifo_max_prio = -1; ++ ++/* We only want to initialize __sched_fifo_min_prio and __sched_fifo_max_prio ++ once. The standard solution would be similar to pthread_once, but then ++ readers would need to use an acquire fence. In this specific case, ++ initialization is comprised of just idempotent writes to two variables ++ that have an initial value of -1. Therefore, we can treat each variable as ++ a separate, at-least-once initialized value. This enables using just ++ relaxed MO loads and stores, but requires that consumers check for ++ initialization of each value that is to be used; see ++ __pthread_tpp_change_priority for an example. ++ */ ++void ++__init_sched_fifo_prio (void) ++{ ++ atomic_store_relaxed (&__sched_fifo_max_prio, ++ __sched_get_priority_max (SCHED_FIFO)); ++ atomic_store_relaxed (&__sched_fifo_min_prio, ++ __sched_get_priority_min (SCHED_FIFO)); ++} ++ ++int ++__pthread_tpp_change_priority (int previous_prio, int new_prio) ++{ ++ struct pthread *self = THREAD_SELF; ++ struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); ++ int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); ++ int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); ++ ++ if (tpp == NULL) ++ { ++ /* See __init_sched_fifo_prio. We need both the min and max prio, ++ so need to check both, and run initialization if either one is ++ not initialized. The memory model's write-read coherence rule ++ makes this work. */ ++ if (fifo_min_prio == -1 || fifo_max_prio == -1) ++ { ++ __init_sched_fifo_prio (); ++ fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); ++ fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); ++ } ++ ++ size_t size = sizeof *tpp; ++ size += (fifo_max_prio - fifo_min_prio + 1) ++ * sizeof (tpp->priomap[0]); ++ tpp = calloc (size, 1); ++ if (tpp == NULL) ++ return ENOMEM; ++ tpp->priomax = fifo_min_prio - 1; ++ THREAD_SETMEM (self, tpp, tpp); ++ } ++ ++ assert (new_prio == -1 ++ || (new_prio >= fifo_min_prio ++ && new_prio <= fifo_max_prio)); ++ assert (previous_prio == -1 ++ || (previous_prio >= fifo_min_prio ++ && previous_prio <= fifo_max_prio)); ++ ++ int priomax = tpp->priomax; ++ int newpriomax = priomax; ++ if (new_prio != -1) ++ { ++ if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0) ++ return EAGAIN; ++ ++tpp->priomap[new_prio - fifo_min_prio]; ++ if (new_prio > priomax) ++ newpriomax = new_prio; ++ } ++ ++ if (previous_prio != -1) ++ { ++ if (--tpp->priomap[previous_prio - fifo_min_prio] == 0 ++ && priomax == previous_prio ++ && previous_prio > new_prio) ++ { ++ int i; ++ for (i = previous_prio - 1; i >= fifo_min_prio; --i) ++ if (tpp->priomap[i - fifo_min_prio]) ++ break; ++ newpriomax = i; ++ } ++ } ++ ++ if (priomax == newpriomax) ++ return 0; ++ ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ ++ lll_lock (self->lock, LLL_PRIVATE); ++ ++ tpp->priomax = newpriomax; ++ ++ int result = 0; ++ ++ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) ++ { ++ if (__sched_getparam (self->tid, &self->schedparam) != 0) ++ result = errno; ++ else ++ self->flags |= ATTR_FLAG_SCHED_SET; ++ } ++ ++ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) ++ { ++ self->schedpolicy = __sched_getscheduler (self->tid); ++ if (self->schedpolicy == -1) ++ result = errno; ++ else ++ self->flags |= ATTR_FLAG_POLICY_SET; ++ } ++ ++ if (result == 0) ++ { ++ struct sched_param sp = self->schedparam; ++ if (sp.sched_priority < newpriomax || sp.sched_priority < priomax) ++ { ++ if (sp.sched_priority < newpriomax) ++ sp.sched_priority = newpriomax; ++ ++ if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0) ++ result = errno; ++ } ++ } ++ ++ lll_unlock (self->lock, LLL_PRIVATE); ++ ++ return result; ++} ++ ++int ++__pthread_current_priority (void) ++{ ++ struct pthread *self = THREAD_SELF; ++ if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) ++ == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) ++ return self->schedparam.sched_priority; ++ ++ int result = 0; ++ ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ ++ lll_lock (self->lock, LLL_PRIVATE); ++ ++ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) ++ { ++ if (__sched_getparam (self->tid, &self->schedparam) != 0) ++ result = -1; ++ else ++ self->flags |= ATTR_FLAG_SCHED_SET; ++ } ++ ++ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) ++ { ++ self->schedpolicy = __sched_getscheduler (self->tid); ++ if (self->schedpolicy == -1) ++ result = -1; ++ else ++ self->flags |= ATTR_FLAG_POLICY_SET; ++ } ++ ++ if (result != -1) ++ result = self->schedparam.sched_priority; ++ ++ lll_unlock (self->lock, LLL_PRIVATE); ++ ++ return result; ++} +diff --git a/nptl_2_17/vars_2_17.c b/nptl_2_17/vars_2_17.c +new file mode 100644 +index 00000000..ae60c0f8 +--- /dev/null ++++ b/nptl_2_17/vars_2_17.c +@@ -0,0 +1,43 @@ ++/* Copyright (C) 2004-2018 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include "pthreadP_2_17.h" ++#include <stdlib.h> ++#include <tls.h> ++#include <unistd.h> ++ ++/* Default thread attributes for the case when the user does not ++ provide any. */ ++struct pthread_attr __default_pthread_attr attribute_hidden; ++ ++/* Mutex protecting __default_pthread_attr. */ ++int __default_pthread_attr_lock = LLL_LOCK_INITIALIZER; ++ ++/* Flag whether the machine is SMP or not. */ ++int __is_smp attribute_hidden; ++ ++#ifndef TLS_MULTIPLE_THREADS_IN_TCB ++/* Variable set to a nonzero value either if more than one thread runs or ran, ++ or if a single-threaded process is trying to cancel itself. See ++ nptl/descr.h for more context on the single-threaded process case. */ ++int __pthread_multiple_threads attribute_hidden; ++#endif ++ ++/* Table of the key information. */ ++struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX] ++ __attribute__ ((nocommon)); ++hidden_data_def (__pthread_keys) +-- +2.30.0 + |