@@ -27,6 +27,3 @@ creat (const char *file, mode_t mode)
{
return __open (file, O_WRONLY|O_CREAT|O_TRUNC, mode);
}
-
-/* __open handles cancellation. */
-LIBC_CANCEL_HANDLED ();
@@ -70,7 +70,5 @@ ppoll (struct pollfd *fds, nfds_t nfds, const struct timespec *timeout,
}
#ifndef ppoll
-/* __poll handles cancellation. */
-LIBC_CANCEL_HANDLED ();
libc_hidden_def (ppoll);
#endif
@@ -2531,13 +2531,13 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c sigemptyset ok
@c sigaddset ok
@c setjmp ok
-@c CANCEL_ASYNC -> pthread_enable_asynccancel ok
+@c __pthread_setcanceltype ok
@c do_cancel ok
@c pthread_unwind ok
@c Unwind_ForcedUnwind or longjmp ok [@ascuheap @acsmem?]
@c lll_lock @asulock @aculock
@c lll_unlock @asulock @aculock
-@c CANCEL_RESET -> pthread_disable_asynccancel ok
+@c __pthread_setcanceltype ok
@c lll_futex_wait ok
@c ->start_routine ok -----
@c call_tls_dtors @asulock @ascuheap @aculock @acsmem
@@ -73,6 +73,4 @@ __pselect (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
}
#ifndef __pselect
weak_alias (__pselect, pselect)
-/* __select handles cancellation. */
-LIBC_CANCEL_HANDLED ();
#endif
@@ -30,7 +30,7 @@ install-lib-ldscripts := libpthread.so
routines = alloca_cutoff forward libc-lowlevellock libc-cancellation \
libc-cleanup libc_pthread_init libc_multiple_threads \
- register-atfork pthread_self
+ register-atfork pthread_self syscall_cancel
shared-only-routines = forward
# We need to provide certain routines for compatibility with existing
@@ -117,7 +117,6 @@ libpthread-routines = nptl-init vars events version pt-interp \
cleanup cleanup_defer cleanup_compat \
cleanup_defer_compat unwind \
pt-longjmp pt-cleanup\
- cancellation \
lowlevellock \
lll_timedlock_wait lll_timedwait_tid \
pt-fork pt-vfork \
@@ -166,7 +165,6 @@ CFLAGS-pthread_setcanceltype.c += -fexceptions -fasynchronous-unwind-tables
# These are internal functions which similar functionality as setcancelstate
# and setcanceltype.
-CFLAGS-cancellation.c += -fasynchronous-unwind-tables
CFLAGS-libc-cancellation.c += -fasynchronous-unwind-tables
# Calling pthread_exit() must cause the registered cancel handlers to
@@ -225,6 +223,8 @@ CFLAGS-fsync.c += -fexceptions -fasynchronous-unwind-tables
CFLAGS-pt-system.c += -fexceptions
+CFLAGS-syscall_cancel.c = -fexceptions -fasynchronous-unwind-tables
+
LDLIBS-tst-once5 = -lstdc++
CFLAGS-tst-thread_local1.o = -std=gnu++11
LDLIBS-tst-thread_local1 = -lstdc++
@@ -277,7 +277,7 @@ tests = tst-attr1 tst-attr2 tst-attr3 tst-default-attr \
tst-cancel11 tst-cancel12 tst-cancel13 tst-cancel14 tst-cancel15 \
tst-cancel16 tst-cancel17 tst-cancel18 tst-cancel19 tst-cancel20 \
tst-cancel21 tst-cancel22 tst-cancel23 tst-cancel24 tst-cancel25 \
- tst-cancel26 tst-cancel27 \
+ tst-cancel26 tst-cancel27 tst-cancel28 \
tst-cancel-self tst-cancel-self-cancelstate \
tst-cancel-self-canceltype tst-cancel-self-testcancel \
tst-cleanup0 tst-cleanup1 tst-cleanup2 tst-cleanup3 tst-cleanup4 \
@@ -36,6 +36,9 @@ libc {
__libc_pthread_init;
__libc_current_sigrtmin_private; __libc_current_sigrtmax_private;
__libc_allocate_rtsig_private;
+ __syscall_cancel;
+ __syscall_cancel_arch_start;
+ __syscall_cancel_arch_end;
}
}
deleted file mode 100644
@@ -1,101 +0,0 @@
-/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <setjmp.h>
-#include <stdlib.h>
-#include "pthreadP.h"
-#include <futex-internal.h>
-
-
-/* The next two functions are similar to pthread_setcanceltype() but
- more specialized for the use in the cancelable functions like write().
- They do not need to check parameters etc. */
-int
-attribute_hidden
-__pthread_enable_asynccancel (void)
-{
- struct pthread *self = THREAD_SELF;
- int oldval = THREAD_GETMEM (self, cancelhandling);
-
- while (1)
- {
- int newval = oldval | CANCELTYPE_BITMASK;
-
- if (newval == oldval)
- break;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (__glibc_likely (curval == oldval))
- {
- if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
- {
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
- __do_cancel ();
- }
-
- break;
- }
-
- /* Prepare the next round. */
- oldval = curval;
- }
-
- return oldval;
-}
-
-
-void
-attribute_hidden
-__pthread_disable_asynccancel (int oldtype)
-{
- /* If asynchronous cancellation was enabled before we do not have
- anything to do. */
- if (oldtype & CANCELTYPE_BITMASK)
- return;
-
- struct pthread *self = THREAD_SELF;
- int newval;
-
- int oldval = THREAD_GETMEM (self, cancelhandling);
-
- while (1)
- {
- newval = oldval & ~CANCELTYPE_BITMASK;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (__glibc_likely (curval == oldval))
- break;
-
- /* Prepare the next round. */
- oldval = curval;
- }
-
- /* We cannot return when we are being canceled. Upon return the
- thread might be things which would have to be undone. The
- following loop should loop until the cancellation signal is
- delivered. */
- while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
- == CANCELING_BITMASK, 0))
- {
- futex_wait_simple ((unsigned int *) &self->cancelhandling, newval,
- FUTEX_PRIVATE);
- newval = THREAD_GETMEM (self, cancelhandling);
- }
-}
@@ -278,23 +278,20 @@ struct pthread
/* Bit set if asynchronous cancellation mode is selected. */
#define CANCELTYPE_BIT 1
#define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT)
- /* Bit set if canceling has been initiated. */
-#define CANCELING_BIT 2
-#define CANCELING_BITMASK (0x01 << CANCELING_BIT)
- /* Bit set if canceled. */
-#define CANCELED_BIT 3
+ /* Bit set if threads is canceled. */
+#define CANCELED_BIT 2
#define CANCELED_BITMASK (0x01 << CANCELED_BIT)
/* Bit set if thread is exiting. */
-#define EXITING_BIT 4
+#define EXITING_BIT 3
#define EXITING_BITMASK (0x01 << EXITING_BIT)
/* Bit set if thread terminated and TCB is freed. */
-#define TERMINATED_BIT 5
+#define TERMINATED_BIT 4
#define TERMINATED_BITMASK (0x01 << TERMINATED_BIT)
/* Bit set if thread is supposed to change XID. */
-#define SETXID_BIT 6
+#define SETXID_BIT 5
#define SETXID_BITMASK (0x01 << SETXID_BIT)
/* Mask for the rest. Helps the compiler to optimize. */
-#define CANCEL_RESTMASK 0xffffff80
+#define CANCEL_RESTMASK 0xffffffc0
#define CANCEL_ENABLED_AND_CANCELED(value) \
(((value) & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK \
@@ -16,9 +16,50 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#include <setjmp.h>
+#include <stdlib.h>
#include "pthreadP.h"
+/* Cancellation function called by all cancellable syscalls. */
+long int
+__syscall_cancel (__syscall_arg_t nr, __syscall_arg_t a1,
+ __syscall_arg_t a2, __syscall_arg_t a3,
+ __syscall_arg_t a4, __syscall_arg_t a5,
+ __syscall_arg_t a6)
+{
+ pthread_t self = (pthread_t) THREAD_SELF;
+ volatile struct pthread *pd = (volatile struct pthread *) self;
+ long int result;
-#define __pthread_enable_asynccancel __libc_enable_asynccancel
-#define __pthread_disable_asynccancel __libc_disable_asynccancel
-#include <nptl/cancellation.c>
+ /* If cancellation is not enabled, call the syscall directly. */
+ if (pd->cancelhandling & CANCELSTATE_BITMASK)
+ {
+ INTERNAL_SYSCALL_DECL (err);
+ result = INTERNAL_SYSCALL_NCS_CALL (nr, err, a1, a2, a3, a4, a5, a6);
+ if (INTERNAL_SYSCALL_ERROR_P (result, err))
+ return -INTERNAL_SYSCALL_ERRNO (result, err);
+ return result;
+ }
+
+ /* Call the arch-specific entry points that contains the globals markers
+ to be checked by SIGCANCEL handler. */
+ result = __syscall_cancel_arch (&pd->cancelhandling, nr, a1, a2, a3, a4, a5,
+ a6);
+
+ if ((result == -EINTR)
+ && (pd->cancelhandling & CANCELED_BITMASK)
+ && !(pd->cancelhandling & CANCELSTATE_BITMASK))
+ __syscall_do_cancel ();
+
+ return result;
+}
+libc_hidden_def (__syscall_cancel)
+
+/* Since __do_cancel is a always inline function, this creates a symbol the
+ arch-specific symbol can call to cancel the thread. */
+void
+__cleanup_fct_attribute attribute_hidden __attribute ((noreturn))
+__syscall_do_cancel (void)
+{
+ __do_cancel ();
+}
@@ -52,7 +52,7 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
return ETIMEDOUT;
/* If *futex == 2, wait until woken or timeout. */
- lll_futex_timed_wait (futex, 2, &rt, private);
+ lll_futex_timed_wait_cancel (futex, 2, &rt, private);
}
return 0;
@@ -62,7 +62,8 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
The kernel up to version 3.16.3 does not use the private futex
operations for futex wake-up when the clone terminates.
*/
- if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
+ if (lll_futex_timed_wait_cancel (tidp, tid, &rt, LLL_SHARED)
+ == -ETIMEDOUT)
return ETIMEDOUT;
}
@@ -38,6 +38,7 @@
#include <kernel-features.h>
#include <libc-pointer-arith.h>
#include <pthread-pids.h>
+#include <sigcontextinfo.h>
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
/* Pointer to the corresponding variable in libc. */
@@ -175,49 +176,65 @@ __nptl_set_robust (struct pthread *self)
#ifdef SIGCANCEL
+
+extern const char __syscall_cancel_arch_start[1];
+extern const char __syscall_cancel_arch_end[1];
+
+/* Workaround for architectures which either does not define the mask
+ as a sigset (alpha) or does not call sa_sigaction with a ucontext_t
+ as third argument (sparc). */
+# ifndef UCONTEXT_SIGMASK
+# define UCONTEXT_SIGMASK(cxt) \
+ &(((ucontext_t*) (cxt))->uc_sigmask)
+# endif
+
/* For asynchronous cancellation we use a signal. This is the handler. */
static void
sigcancel_handler (int sig, siginfo_t *si, void *ctx)
{
+ INTERNAL_SYSCALL_DECL (err);
+ pid_t pid = INTERNAL_SYSCALL_CALL (getpid, err);
+
/* Safety check. It would be possible to call this function for
other signals and send a signal from another process. This is not
correct and might even be a security problem. Try to catch as
many incorrect invocations as possible. */
if (sig != SIGCANCEL
- || si->si_pid != __getpid()
+ || si->si_pid != pid
|| si->si_code != SI_TKILL)
return;
struct pthread *self = THREAD_SELF;
+ volatile struct pthread *pd = (volatile struct pthread *) self;
- int oldval = THREAD_GETMEM (self, cancelhandling);
- while (1)
+ if (((pd->cancelhandling & (CANCELSTATE_BITMASK)) != 0)
+ || ((pd->cancelhandling & CANCELED_BITMASK) == 0))
+ return;
+
+ /* Add SIGCANCEL on ignored sigmask to avoid the handler to be called
+ again. */
+ sigset_t *set = UCONTEXT_SIGMASK (ctx);
+ __sigaddset (set, SIGCANCEL);
+
+ /* Check if asynchronous cancellation mode is set and if interrupted
+ instruction pointer falls within the cancellable syscall bridge. For
+ interruptable syscalls that might generate external side-effects (partial
+ reads or writes, for instance), the kernel will set the IP to after
+ '__syscall_cancel_arch_end', thus disabling the cancellation and allowing
+ the process to handle such conditions. */
+ uintptr_t pc = ucontext_get_pc (ctx);
+ if (pd->cancelhandling & CANCELTYPE_BITMASK
+ || (pc >= (uintptr_t) __syscall_cancel_arch_start
+ && pc < (uintptr_t) __syscall_cancel_arch_end))
{
- /* We are canceled now. When canceled by another thread this flag
- is already set but if the signal is directly send (internally or
- from another process) is has to be done here. */
- int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
-
- if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
- /* Already canceled or exiting. */
- break;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (curval == oldval)
- {
- /* Set the return value. */
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-
- /* Make sure asynchronous cancellation is still enabled. */
- if ((newval & CANCELTYPE_BITMASK) != 0)
- /* Run the registered destructors and terminate the thread. */
- __do_cancel ();
-
- break;
- }
-
- oldval = curval;
+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED);
+
+ INTERNAL_SYSCALL_CALL (rt_sigprocmask, err, SIG_SETMASK, set, NULL,
+ _NSIG / 8);
+
+ __do_cancel ();
+ return;
}
}
#endif
@@ -372,7 +389,10 @@ __pthread_initialize_minimal_internal (void)
cannot install the handler we do not abort. Maybe we should, but
it is only asynchronous cancellation which is affected. */
sa.sa_sigaction = sigcancel_handler;
- sa.sa_flags = SA_SIGINFO;
+ /* The signal handle should be non-interruptible to avoid the risk of
+ spurious EINTR caused by SIGCANCEL sent to process or if pthread_cancel
+ is called while cancellation is disabled in the target thread. */
+ sa.sa_flags = SA_SIGINFO | SA_RESTART;
(void) __libc_sigaction (SIGCANCEL, &sa, NULL);
# endif
@@ -291,51 +291,37 @@ __do_cancel (void)
{
struct pthread *self = THREAD_SELF;
- /* Make sure we get no more cancellations. */
- THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+ /* Make sure we get no more cancellations by clearing the cancel
+ state. */
+ int oldval = THREAD_GETMEM (self, cancelhandling);
+ while (1)
+ {
+ int newval = (oldval | CANCELSTATE_BITMASK);
+ newval &= ~(CANCELTYPE_BITMASK);
+ if (oldval == newval)
+ break;
+
+ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
+ oldval);
+ if (__glibc_likely (curval == oldval))
+ break;
+ oldval = curval;
+ }
+
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED);
__pthread_unwind ((__pthread_unwind_buf_t *)
THREAD_GETMEM (self, cleanup_jmp_buf));
}
-/* Set cancellation mode to asynchronous. */
-#define CANCEL_ASYNC() \
- __pthread_enable_asynccancel ()
-/* Reset to previous cancellation mode. */
-#define CANCEL_RESET(oldtype) \
- __pthread_disable_asynccancel (oldtype)
-
-#if IS_IN (libc)
-/* Same as CANCEL_ASYNC, but for use in libc.so. */
-# define LIBC_CANCEL_ASYNC() \
- __libc_enable_asynccancel ()
-/* Same as CANCEL_RESET, but for use in libc.so. */
-# define LIBC_CANCEL_RESET(oldtype) \
- __libc_disable_asynccancel (oldtype)
-# define LIBC_CANCEL_HANDLED() \
- __asm (".globl " __SYMBOL_PREFIX "__libc_enable_asynccancel"); \
- __asm (".globl " __SYMBOL_PREFIX "__libc_disable_asynccancel")
-#elif IS_IN (libpthread)
-# define LIBC_CANCEL_ASYNC() CANCEL_ASYNC ()
-# define LIBC_CANCEL_RESET(val) CANCEL_RESET (val)
-# define LIBC_CANCEL_HANDLED() \
- __asm (".globl " __SYMBOL_PREFIX "__pthread_enable_asynccancel"); \
- __asm (".globl " __SYMBOL_PREFIX "__pthread_disable_asynccancel")
-#elif IS_IN (librt)
-# define LIBC_CANCEL_ASYNC() \
- __librt_enable_asynccancel ()
-# define LIBC_CANCEL_RESET(val) \
- __librt_disable_asynccancel (val)
-# define LIBC_CANCEL_HANDLED() \
- __asm (".globl " __SYMBOL_PREFIX "__librt_enable_asynccancel"); \
- __asm (".globl " __SYMBOL_PREFIX "__librt_disable_asynccancel")
-#else
-# define LIBC_CANCEL_ASYNC() 0 /* Just a dummy value. */
-# define LIBC_CANCEL_RESET(val) ((void)(val)) /* Nothing, but evaluate it. */
-# define LIBC_CANCEL_HANDLED() /* Nothing. */
-#endif
+extern long int __syscall_cancel_arch (volatile int *, __syscall_arg_t nr,
+ __syscall_arg_t arg1, __syscall_arg_t arg2, __syscall_arg_t arg3,
+ __syscall_arg_t arg4, __syscall_arg_t arg5, __syscall_arg_t arg6);
+libc_hidden_proto (__syscall_cancel_arch);
+extern void __syscall_do_cancel (void)
+ __cleanup_fct_attribute attribute_hidden __attribute ((__noreturn__));
/* Internal prototypes. */
@@ -503,8 +489,6 @@ extern int __pthread_kill (pthread_t threadid, int signo);
extern void __pthread_exit (void *value) __attribute__ ((__noreturn__));
extern int __pthread_join (pthread_t threadid, void **thread_return);
extern int __pthread_setcanceltype (int type, int *oldtype);
-extern int __pthread_enable_asynccancel (void) attribute_hidden;
-extern void __pthread_disable_asynccancel (int oldtype) attribute_hidden;
extern void __pthread_testcancel (void);
extern int __pthread_timedjoin_ex (pthread_t, void **, const struct timespec *,
bool);
@@ -543,15 +527,6 @@ extern int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond,
extern int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize,
cpu_set_t *cpuset);
-/* The two functions are in libc.so and not exported. */
-extern int __libc_enable_asynccancel (void) attribute_hidden;
-extern void __libc_disable_asynccancel (int oldtype) attribute_hidden;
-
-
-/* The two functions are in librt.so and not exported. */
-extern int __librt_enable_asynccancel (void) attribute_hidden;
-extern void __librt_disable_asynccancel (int oldtype) attribute_hidden;
-
#if IS_IN (libpthread)
/* Special versions which use non-exported functions. */
extern void __pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
@@ -37,67 +37,23 @@ __pthread_cancel (pthread_t th)
#ifdef SHARED
pthread_cancel_init ();
#endif
- int result = 0;
- int oldval;
- int newval;
- do
- {
- again:
- oldval = pd->cancelhandling;
- newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
- /* Avoid doing unnecessary work. The atomic operation can
- potentially be expensive if the bug has to be locked and
- remote cache lines have to be invalidated. */
- if (oldval == newval)
- break;
+ THREAD_ATOMIC_BIT_SET (pd, cancelhandling, CANCELED_BIT);
- /* If the cancellation is handled asynchronously just send a
- signal. We avoid this if possible since it's more
- expensive. */
- if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
- {
- /* Mark the cancellation as "in progress". */
- if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling,
- oldval | CANCELING_BITMASK,
- oldval))
- goto again;
-
-#ifdef SIGCANCEL
- /* The cancellation handler will take care of marking the
- thread as canceled. */
- pid_t pid = __getpid ();
-
- INTERNAL_SYSCALL_DECL (err);
- int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, pd->tid,
- SIGCANCEL);
- if (INTERNAL_SYSCALL_ERROR_P (val, err))
- result = INTERNAL_SYSCALL_ERRNO (val, err);
-#else
- /* It should be impossible to get here at all, since
- pthread_setcanceltype should never have allowed
- PTHREAD_CANCEL_ASYNCHRONOUS to be set. */
- abort ();
-#endif
-
- break;
- }
-
- /* A single-threaded process should be able to kill itself, since
- there is nothing in the POSIX specification that says that it
- cannot. So we set multiple_threads to true so that cancellation
- points get executed. */
- THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
+ /* A single-threaded process should be able to kill itself, since there is
+ nothing in the POSIX specification that says that it cannot. So we set
+ multiple_threads to true so that cancellation points get executed. */
+ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
- __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
+ __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
#endif
- }
- /* Mark the thread as canceled. This has to be done
- atomically since other bits could be modified as well. */
- while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval,
- oldval));
- return result;
+ /* Avoid signaling when thread attempts cancel itself (pthread_kill
+ is expensive). */
+ if (pd == THREAD_SELF && !(pd->cancelhandling & CANCELTYPE_BITMASK))
+ return 0;
+
+ return __pthread_kill (th, SIGCANCEL);
}
weak_alias (__pthread_cancel, pthread_cancel)
@@ -412,7 +412,7 @@ START_THREAD_DEFN
/* If the parent was running cancellation handlers while creating
the thread the new thread inherited the signal mask. Reset the
cancellation signal mask. */
- if (__glibc_unlikely (pd->parent_cancelhandling & CANCELING_BITMASK))
+ if (__glibc_unlikely (pd->parent_cancelhandling & CANCELED_BITMASK))
{
INTERNAL_SYSCALL_DECL (err);
sigset_t mask;
@@ -444,7 +444,8 @@ START_THREAD_DEFN
have ownership (see CONCURRENCY NOTES above). */
if (__glibc_unlikely (pd->stopped_start))
{
- int oldtype = CANCEL_ASYNC ();
+ int ct;
+ __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
/* Get the lock the parent locked to force synchronization. */
lll_lock (pd->lock, LLL_PRIVATE);
@@ -454,7 +455,7 @@ START_THREAD_DEFN
/* And give it up right away. */
lll_unlock (pd->lock, LLL_PRIVATE);
- CANCEL_RESET (oldtype);
+ __pthread_setcanceltype (ct, NULL);
}
LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
@@ -23,9 +23,14 @@
void
__pthread_exit (void *value)
{
- THREAD_SETMEM (THREAD_SELF, result, value);
+ struct pthread *self = THREAD_SELF;
- __do_cancel ();
+ THREAD_SETMEM (self, result, value);
+
+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
+
+ __pthread_unwind ((__pthread_unwind_buf_t *)
+ THREAD_GETMEM (self, cleanup_jmp_buf));
}
weak_alias (__pthread_exit, pthread_exit)
@@ -54,7 +54,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
if ((pd == self
|| (self->joinid == pd
&& (pd->cancelhandling
- & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
+ & (CANCELED_BITMASK | EXITING_BITMASK
| TERMINATED_BITMASK)) == 0))
&& !CANCEL_ENABLED_AND_CANCELED (self->cancelhandling))
/* This is a deadlock situation. The threads are waiting for each
@@ -81,14 +81,15 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
un-wait-ed for again. */
pthread_cleanup_push (cleanup, &pd->joinid);
- int oldtype = CANCEL_ASYNC ();
+ int ct;
+ __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &ct);
if (abstime != NULL)
result = lll_timedwait_tid (pd->tid, abstime);
else
lll_wait_tid (pd->tid);
- CANCEL_RESET (oldtype);
+ __pthread_setcanceltype (ct, NULL);
pthread_cleanup_pop (0);
}
@@ -56,14 +56,8 @@ __old_sem_wait (sem_t *sem)
if (atomic_decrement_if_positive (futex) > 0)
return 0;
- /* Enable asynchronous cancellation. Required by the standard. */
- int oldtype = __pthread_enable_asynccancel ();
-
/* Always assume the semaphore is shared. */
- err = lll_futex_wait (futex, 0, LLL_SHARED);
-
- /* Disable asynchronous cancellation. */
- __pthread_disable_asynccancel (oldtype);
+ err = lll_futex_wait_cancel (futex, 0, LLL_SHARED);
}
while (err == 0 || err == -EWOULDBLOCK);
new file mode 100644
@@ -0,0 +1,94 @@
+/* Check side-effect act for cancellable syscalls (BZ #12683).
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* This testcase checks if there is resource leakage if the syscall has
+ returned from kernelspace, but before userspace saves the return
+ value. The 'leaker' thread should be able to close the file descriptor
+ if the resource is already allocated, meaning that if the cancellation
+ signal arrives *after* the open syscal return from kernel, the
+ side-effect should be visible to application. */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#include <support/xthread.h>
+#include <support/check.h>
+#include <support/temp_file.h>
+
+static void *
+writeopener (void *arg)
+{
+ int fd;
+ for (;;)
+ {
+ fd = open (arg, O_WRONLY);
+ close (fd);
+ }
+ return NULL;
+}
+
+static void *
+leaker (void *arg)
+{
+ int fd = open (arg, O_RDONLY);
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, 0);
+ close (fd);
+ return NULL;
+}
+
+
+#define ITER_COUNT 1000
+#define MAX_FILENO 1024
+
+static int
+do_test (void)
+{
+ struct stat st;
+ int i;
+
+ char *name = NULL;
+ support_create_temp_fifo ("tst-cancel28", &name);
+
+ srand (1);
+
+ xpthread_create (NULL, writeopener, name);
+ for (i = 0; i < ITER_COUNT; i++)
+ {
+ pthread_t td = xpthread_create (NULL, leaker, name);
+ struct timespec ts =
+ { .tv_nsec = rand () % 100000, .tv_sec = 0 };
+ nanosleep (&ts, NULL);
+ /* Ignore pthread_cancel result because it might be the
+ case when pthread_cancel is called when thread is already
+ exited. */
+ pthread_cancel (td);
+ xpthread_join (td);
+ }
+
+ for (i = STDERR_FILENO+1; i < MAX_FILENO; i++)
+ if (fstat (i, &st) == 0)
+ FAIL_EXIT1 ("leaked fd %d", i);
+
+ return 0;
+}
+
+#define TIMEOUT 10
+#include <support/test-driver.c>
@@ -64,7 +64,6 @@ CFLAGS-aio_suspend.c += -fexceptions
CFLAGS-mq_timedreceive.c += -fexceptions -fasynchronous-unwind-tables
CFLAGS-mq_timedsend.c += -fexceptions -fasynchronous-unwind-tables
CFLAGS-clock_nanosleep.c += -fexceptions -fasynchronous-unwind-tables
-CFLAGS-librt-cancellation.c += -fasynchronous-unwind-tables
LDFLAGS-rt.so = -Wl,--enable-new-dtags,-z,nodelete
@@ -86,6 +86,29 @@ create_temp_file (const char *base, char **filename)
return fd;
}
+int
+support_create_temp_fifo (const char *base, char **fifoname)
+{
+ char *fname = xasprintf ("%s/%sXXXXXX", test_dir, base);
+ mktemp (fname);
+
+ int fd = mkfifo (fname, 0600);
+ if (fd == -1)
+ {
+ printf ("cannot open temporary fifo '%s': %m\n", fname);
+ free (fname);
+ return -1;
+ }
+
+ add_temp_file (fname);
+ if (fifoname != NULL)
+ *fifoname = fname;
+ else
+ free (fname);
+
+ return fd;
+}
+
char *
support_create_temp_directory (const char *base)
{
@@ -32,6 +32,12 @@ void add_temp_file (const char *name);
*FILENAME. */
int create_temp_file (const char *base, char **filename);
+/* Create a temporary fifo. Return the opened file descriptor on
+ success, or -1 on failure. Write the file name to *FILENAME if
+ FILENAME is not NULL. In this case, the caller is expected to free
+ *FILENAME. */
+int support_create_temp_fifo (const char *name, char **fifoname);
+
/* Create a temporary directory and schedule it for deletion. BASE is
used as a prefix for the unique directory name, which the function
returns. The caller should free this string. */
@@ -16,6 +16,11 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#ifndef _SIGCONTEXTINFO_H
+#define _SIGCONTEXTINFO_H
+
+#include <stdint.h>
+
/* In general we cannot provide any information. */
#define SIGCONTEXT struct sigcontext *
#define SIGCONTEXT_EXTRA_ARGS
@@ -24,3 +29,13 @@
#define GET_STACK(ctx) ((void *) 0)
#define CALL_SIGHANDLER(handler, signo, ctx) \
(handler)((signo), SIGCONTEXT_EXTRA_ARGS (ctx))
+
+/* Obtain the Program Counter from third argument in signal handler set
+ with SA_SIGINFO. */
+static inline uintptr_t
+ucontext_get_pc (void *ctx)
+{
+ return 0;
+}
+
+#endif
@@ -3,6 +3,3 @@
/* No multi-thread handling enabled. */
#define SINGLE_THREAD_P (1)
#define RTLD_SINGLE_THREAD_P (1)
-#define LIBC_CANCEL_ASYNC() 0 /* Just a dummy value. */
-#define LIBC_CANCEL_RESET(val) ((void)(val)) /* Nothing, but evaluate it. */
-#define LIBC_CANCEL_HANDLED() /* Nothing. */
@@ -21,8 +21,7 @@ libpthread-sysdep_routines += errno-loc
endif
ifeq ($(subdir),rt)
-librt-sysdep_routines += timer_routines librt-cancellation
-CFLAGS-librt-cancellation.c += -fexceptions -fasynchronous-unwind-tables
+librt-sysdep_routines += timer_routines
tests += tst-mqueue8x
CFLAGS-tst-mqueue8x.c += -fexceptions
@@ -34,22 +34,18 @@
#define AIO_MISC_WAIT(result, futex, timeout, cancel) \
do { \
- volatile unsigned int *futexaddr = &futex; \
+ unsigned int *futexaddr = (unsigned int *)&futex; \
unsigned int oldval = futex; \
\
if (oldval != 0) \
{ \
pthread_mutex_unlock (&__aio_requests_mutex); \
\
- int oldtype; \
- if (cancel) \
- oldtype = LIBC_CANCEL_ASYNC (); \
- \
int status; \
do \
{ \
- status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
- timeout, FUTEX_PRIVATE); \
+ status = futex_reltimed_wait_cancelable (futexaddr, oldval, \
+ timeout, FUTEX_PRIVATE); \
if (status != EAGAIN) \
break; \
\
@@ -57,9 +53,6 @@
} \
while (oldval != 0); \
\
- if (cancel) \
- LIBC_CANCEL_RESET (oldtype); \
- \
if (status == EINTR) \
result = EINTR; \
else if (status == ETIMEDOUT) \
@@ -35,22 +35,18 @@
#define GAI_MISC_WAIT(result, futex, timeout, cancel) \
do { \
- volatile unsigned int *futexaddr = &futex; \
+ unsigned int *futexaddr = (unsigned int *)&futex; \
unsigned int oldval = futex; \
\
if (oldval != 0) \
{ \
pthread_mutex_unlock (&__gai_requests_mutex); \
\
- int oldtype; \
- if (cancel) \
- oldtype = LIBC_CANCEL_ASYNC (); \
- \
int status; \
do \
{ \
- status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
- timeout, FUTEX_PRIVATE); \
+ status = futex_reltimed_wait_cancelable (futexaddr, oldval, \
+ timeout, FUTEX_PRIVATE); \
if (status != EAGAIN) \
break; \
\
@@ -58,9 +54,6 @@
} \
while (oldval != 0); \
\
- if (cancel) \
- LIBC_CANCEL_RESET (oldtype); \
- \
if (status == EINTR) \
result = EINTR; \
else if (status == ETIMEDOUT) \
deleted file mode 100644
@@ -1,24 +0,0 @@
-/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <nptl/pthreadP.h>
-
-
-#define __pthread_enable_asynccancel __librt_enable_asynccancel
-#define __pthread_disable_asynccancel __librt_disable_asynccancel
-#include <nptl/cancellation.c>
@@ -180,12 +180,13 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,
wake-up when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero by the kernel
afterwards. The kernel up to version 3.16.3 does not use the private futex
- operations for futex wake-up when the clone terminates. */
+ operations for futex wake-up when the clone terminates.
+ Both lll_wait_tid and lll_timewait_tid acts as cancellation points. */
#define lll_wait_tid(tid) \
do { \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid, LLL_SHARED);\
+ lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED);\
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -34,16 +34,8 @@ __libc_open64 (const char *file, int oflag, ...)
va_end (arg);
}
- if (SINGLE_THREAD_P)
- return __libc_open (file, oflag | O_LARGEFILE, mode);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = __libc_open (file, oflag | O_LARGEFILE, mode);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ /* __libc_open should be a cancellation point. */
+ return __libc_open (file, oflag | O_LARGEFILE, mode);
}
weak_alias (__libc_open64, __open64)
libc_hidden_weak (__open64)
@@ -38,5 +38,3 @@ __libc_pause (void)
return __sigsuspend (&set);
}
weak_alias (__libc_pause, pause)
-
-LIBC_CANCEL_HANDLED (); /* sigsuspend handles our cancellation. */
@@ -70,6 +70,3 @@ __xpg_sigpause (int sig)
return __sigpause (sig, 1);
}
strong_alias (__xpg_sigpause, __libc___xpg_sigpause)
-
-/* __sigsuspend handles cancellation. */
-LIBC_CANCEL_HANDLED ();
@@ -88,13 +88,8 @@ __sigwait (const sigset_t *set, int *sig)
if (SINGLE_THREAD_P)
return do_sigwait (set, sig);
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_sigwait (set, sig);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ /* do_sigwait should be a cancellation point. */
+ return do_sigwait (set, sig);
}
libc_hidden_def (__sigwait)
weak_alias (__sigwait, sigwait)
@@ -151,16 +151,7 @@ OUR_WAITID (idtype_t idtype, id_t id, siginfo_t *infop, int options)
int
__waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
{
- if (SINGLE_THREAD_P)
- return do_waitid (idtype, id, infop, options);
-
- int oldtype = LIBC_CANCEL_ASYNC ();
-
- int result = do_waitid (idtype, id, infop, options);
-
- LIBC_CANCEL_RESET (oldtype);
-
- return result;
+ return do_waitid (idtype, id, infop, options);
}
weak_alias (__waitid, waitid)
strong_alias (__waitid, __libc_waitid)
@@ -122,7 +122,8 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
/* Wait until thread terminates. The kernel so far does not use
the private futex operations for this. */
- if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
+ if (lll_futex_timed_wait_cancel (tidp, tid, &rt, LLL_SHARED)
+ == -ETIMEDOUT)
return ETIMEDOUT;
}
@@ -24,6 +24,9 @@
#define SYSCALL__(name, args) PSEUDO (__##name, name, args)
#define SYSCALL(name, args) PSEUDO (name, name, args)
+#ifndef __ASSEMBLER__
+# include <errno.h>
+
#define __SYSCALL_CONCAT_X(a,b) a##b
#define __SYSCALL_CONCAT(a,b) __SYSCALL_CONCAT_X (a, b)
@@ -57,6 +60,29 @@
#define INTERNAL_SYSCALL_CALL(...) \
__INTERNAL_SYSCALL_DISP (__INTERNAL_SYSCALL, __VA_ARGS__)
+#define __INTERNAL_SYSCALL_NCS0(name, err) \
+ INTERNAL_SYSCALL_NCS (name, err, 0)
+#define __INTERNAL_SYSCALL_NCS1(name, err, a1) \
+ INTERNAL_SYSCALL_NCS (name, err, 1, a1)
+#define __INTERNAL_SYSCALL_NCS2(name, err, a1, a2) \
+ INTERNAL_SYSCALL_NCS (name, err, 2, a1, a2)
+#define __INTERNAL_SYSCALL_NCS3(name, err, a1, a2, a3) \
+ INTERNAL_SYSCALL_NCS (name, err, 3, a1, a2, a3)
+#define __INTERNAL_SYSCALL_NCS4(name, err, a1, a2, a3, a4) \
+ INTERNAL_SYSCALL_NCS (name, err, 4, a1, a2, a3, a4)
+#define __INTERNAL_SYSCALL_NCS5(name, err, a1, a2, a3, a4, a5) \
+ INTERNAL_SYSCALL_NCS (name, err, 5, a1, a2, a3, a4, a5)
+#define __INTERNAL_SYSCALL_NCS6(name, err, a1, a2, a3, a4, a5, a6) \
+ INTERNAL_SYSCALL_NCS (name, err, 6, a1, a2, a3, a4, a5, a6)
+#define __INTERNAL_SYSCALL_NCS7(name, err, a1, a2, a3, a4, a5, a6, a7) \
+ INTERNAL_SYSCALL_NCS (name, err, 7, a1, a2, a3, a4, a5, a6, a7)
+
+/* Issue a syscall defined by syscall number plus any other argument required.
+ It is similar to INTERNAL_SYSCALL_NCS macro, but without the need to pass
+ the expected argument number as third parameter. */
+#define INTERNAL_SYSCALL_NCS_CALL(...) \
+ __INTERNAL_SYSCALL_DISP (__INTERNAL_SYSCALL_NCS, __VA_ARGS__)
+
#define __INLINE_SYSCALL0(name) \
INLINE_SYSCALL (name, 0)
#define __INLINE_SYSCALL1(name, a1) \
@@ -88,19 +114,65 @@
#define INLINE_SYSCALL_CALL(...) \
__INLINE_SYSCALL_DISP (__INLINE_SYSCALL, __VA_ARGS__)
-#define SYSCALL_CANCEL(...) \
- ({ \
- long int sc_ret; \
- if (SINGLE_THREAD_P) \
- sc_ret = INLINE_SYSCALL_CALL (__VA_ARGS__); \
- else \
- { \
- int sc_cancel_oldtype = LIBC_CANCEL_ASYNC (); \
- sc_ret = INLINE_SYSCALL_CALL (__VA_ARGS__); \
- LIBC_CANCEL_RESET (sc_cancel_oldtype); \
- } \
- sc_ret; \
+
+/* Cancellation macros. */
+#ifndef __SSC
+typedef long int __syscall_arg_t;
+# define __SSC(__x) ((__syscall_arg_t) (__x))
+#endif
+
+long int __syscall_cancel (__syscall_arg_t nr, __syscall_arg_t arg1,
+ __syscall_arg_t arg2, __syscall_arg_t arg3,
+ __syscall_arg_t arg4, __syscall_arg_t arg5,
+ __syscall_arg_t arg6);
+libc_hidden_proto (__syscall_cancel);
+
+#define __SYSCALL_CANCEL0(name) \
+ (__syscall_cancel)(__NR_##name, 0, 0, 0, 0, 0, 0)
+#define __SYSCALL_CANCEL1(name, a1) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), 0, 0, 0, 0, 0)
+#define __SYSCALL_CANCEL2(name, a1, a2) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), 0, 0, 0, 0)
+#define __SYSCALL_CANCEL3(name, a1, a2, a3) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), 0, 0, 0)
+#define __SYSCALL_CANCEL4(name, a1, a2, a3, a4) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+ __SSC(a4), 0, 0)
+#define __SYSCALL_CANCEL5(name, a1, a2, a3, a4, a5) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+ __SSC(a4), __SSC(a5), 0)
+#define __SYSCALL_CANCEL6(name, a1, a2, a3, a4, a5, a6) \
+ (__syscall_cancel)(__NR_##name, __SSC(a1), __SSC(a2), __SSC(a3), \
+ __SSC(a4), __SSC(a5), __SSC(a6))
+
+#define __SYSCALL_CANCEL_NARGS_X(a,b,c,d,e,f,g,h,n,...) n
+#define __SYSCALL_CANCEL_NARGS(...) \
+ __SYSCALL_CANCEL_NARGS_X (__VA_ARGS__,7,6,5,4,3,2,1,0,)
+#define __SYSCALL_CANCEL_CONCAT_X(a,b) a##b
+#define __SYSCALL_CANCEL_CONCAT(a,b) __SYSCALL_CANCEL_CONCAT_X (a, b)
+#define __SYSCALL_CANCEL_DISP(b,...) \
+ __SYSCALL_CANCEL_CONCAT (b,__SYSCALL_CANCEL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define __SYSCALL_CANCEL_CALL(...) \
+ __SYSCALL_CANCEL_DISP (__SYSCALL_CANCEL, __VA_ARGS__)
+
+#define SYSCALL_CANCEL_NCS(name, nr, args...) \
+ __SYSCALL_CANCEL_CALL (name, nr, args)
+
+
+/* The loader does not need to handle thread cancellation, use direct
+ syscall instead. */
+#if IS_IN (rtld)
+# define SYSCALL_CANCEL(...) INLINE_SYSCALL_CALL (__VA_ARGS__)
+#else
+# define SYSCALL_CANCEL(...) \
+ ({ \
+ long int sc_ret = __SYSCALL_CANCEL_CALL (__VA_ARGS__); \
+ SYSCALL_CANCEL_RET ((sc_ret)); \
})
+#endif
+
+#endif /* __ASSEMBLER__ */
/* Machine-dependent sysdep.h files are expected to define the macro
PSEUDO (function_name, syscall_name) to emit assembly code to define the
@@ -28,27 +28,13 @@ int
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
struct timespec *rem)
{
- INTERNAL_SYSCALL_DECL (err);
- int r;
-
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
return EINVAL;
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
- if (SINGLE_THREAD_P)
- r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
- else
- {
- int oldstate = LIBC_CANCEL_ASYNC ();
-
- r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
- rem);
-
- LIBC_CANCEL_RESET (oldstate);
- }
-
- return (INTERNAL_SYSCALL_ERROR_P (r, err)
- ? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
+ /* If the call is interrupted by a signal handler or encounters an error,
+ it returns a positive value similar to errno. */
+ return -SYSCALL_CANCEL_NCS (clock_nanosleep, clock_id, flags, req, rem);
}
weak_alias (__clock_nanosleep, clock_nanosleep)
@@ -35,6 +35,4 @@ __creat (const char *file, mode_t mode)
}
weak_alias (__creat, creat)
-LIBC_CANCEL_HANDLED ();
-
#endif
@@ -37,5 +37,3 @@ weak_alias (__creat64, creat64)
strong_alias (__creat64, __creat)
weak_alias (__creat64, creat)
#endif
-
-LIBC_CANCEL_HANDLED ();
@@ -83,10 +83,7 @@ static __always_inline int
futex_wait_cancelable (unsigned int *futex_word, unsigned int expected,
int private)
{
- int oldtype;
- oldtype = __pthread_enable_asynccancel ();
- int err = lll_futex_timed_wait (futex_word, expected, NULL, private);
- __pthread_disable_asynccancel (oldtype);
+ int err = lll_futex_timed_wait_cancel (futex_word, expected, NULL, private);
switch (err)
{
case 0:
@@ -137,10 +134,7 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
unsigned int expected,
const struct timespec *reltime, int private)
{
- int oldtype;
- oldtype = __pthread_enable_asynccancel ();
- int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
- __pthread_disable_asynccancel (oldtype);
+ int err = lll_futex_timed_wait_cancel (futex_word, expected, reltime, private);
switch (err)
{
case 0:
@@ -200,11 +194,9 @@ futex_abstimed_wait_cancelable (unsigned int *futex_word,
despite them being valid. */
if (__glibc_unlikely ((abstime != NULL) && (abstime->tv_sec < 0)))
return ETIMEDOUT;
- int oldtype;
- oldtype = __pthread_enable_asynccancel ();
- int err = lll_futex_timed_wait_bitset (futex_word, expected, abstime,
- FUTEX_CLOCK_REALTIME, private);
- __pthread_disable_asynccancel (oldtype);
+ int err = lll_futex_timed_wait_bitset_cancel (futex_word, expected, abstime,
+ FUTEX_CLOCK_REALTIME,
+ private);
switch (err)
{
case 0:
@@ -89,6 +89,12 @@
? -INTERNAL_SYSCALL_ERRNO (__ret, __err) : 0); \
})
+#define lll_futex_syscall_cp(...) \
+ ({ \
+ long int __ret = __SYSCALL_CANCEL_CALL (__VA_ARGS__); \
+ __ret; \
+ })
+
#define lll_futex_wait(futexp, val, private) \
lll_futex_timed_wait (futexp, val, NULL, private)
@@ -140,6 +146,32 @@
private), \
nr_wake, nr_move, mutex, val)
-#endif /* !__ASSEMBLER__ */
+/* Cancellable futex macros. */
+#define lll_futex_wait_cancel(futexp, val, private) \
+ lll_futex_timed_wait_cancel (futexp, val, NULL, private)
+
+#define lll_futex_timed_wait_cancel(futexp, val, timeout, private) \
+ ({ \
+ long int __ret; \
+ int __op = FUTEX_WAIT; \
+ __ret = lll_futex_syscall_cp (futex, futexp, \
+ __lll_private_flag (__op, private), \
+ val, timeout); \
+ __ret; \
+ })
+
+#define lll_futex_timed_wait_bitset_cancel(futexp, val, timeout, \
+ clockbit, private) \
+ ({ \
+ long int __ret; \
+ int __op = FUTEX_WAIT_BITSET | clockbit; \
+ __ret = lll_futex_syscall_cp (futex, futexp, \
+ __lll_private_flag (__op, private), \
+ val, timeout, 0, \
+ FUTEX_BITSET_MATCH_ANY); \
+ __ret; \
+ })
+
+# endif /* !__ASSEMBLER__ */
#endif /* lowlevellock-futex.h */
@@ -42,9 +42,8 @@ __pthread_kill (pthread_t threadid, int signo)
/* Not a valid thread handle. */
return ESRCH;
- /* Disallow sending the signal we use for cancellation, timers,
- for the setxid implementation. */
- if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
+ /* Disallow sending the signal we use for setxid implementation. */
+ if (signo == SIGSETXID)
return EINVAL;
/* We have a special syscall to do the work. */
@@ -37,6 +37,3 @@ __sigwait (const sigset_t *set, int *sig)
libc_hidden_def (__sigwait)
weak_alias (__sigwait, sigwait)
strong_alias (__sigwait, __libc_sigwait)
-
-/* __sigtimedwait handles cancellation. */
-LIBC_CANCEL_HANDLED ();
@@ -28,6 +28,3 @@ __sigwaitinfo (const sigset_t *set, siginfo_t *info)
libc_hidden_def (__sigwaitinfo)
weak_alias (__sigwaitinfo, sigwaitinfo)
strong_alias (__sigwaitinfo, __libc_sigwaitinfo)
-
-/* __sigtimedwait handles cancellation. */
-LIBC_CANCEL_HANDLED ();
@@ -87,16 +87,39 @@
})
-#if IS_IN (libc)
-# define __pthread_enable_asynccancel __libc_enable_asynccancel
-# define __pthread_disable_asynccancel __libc_disable_asynccancel
-#endif
+#define __SOCKETCALL_CANCEL1(__name, __a1) \
+ SYSCALL_CANCEL_NCS (socketcall, __name, \
+ ((long int [1]) { (long int) __a1 }))
+#define __SOCKETCALL_CANCEL2(__name, __a1, __a2) \
+ SYSCALL_CANCEL_NCS (socketcall, __name, \
+ ((long int [2]) { (long int) __a1, (long int) __a2 }))
+#define __SOCKETCALL_CANCEL3(__name, __a1, __a2, __a3) \
+ SYSCALL_CANCEL_NCS (socketcall, __name, \
+ ((long int [3]) { (long int) __a1, (long int) __a2, (long int) __a3 }))
+#define __SOCKETCALL_CANCEL4(__name, __a1, __a2, __a3, __a4) \
+ SYSCALL_CANCEL_NCS (socketcall, __name, \
+ ((long int [4]) { (long int) __a1, (long int) __a2, (long int) __a3, \
+ (long int) __a4 }))
+#define __SOCKETCALL_CANCEL5(__name, __a1, __a2, __a3, __a4, __a5) \
+ SYSCALL_CANCEL_NCS (socketcall, __name, \
+ ((long int [5]) { (long int) __a1, (long int) __a2, (long int) __a3, \
+ (long int) __a4, (long int) __a5 }))
+#define __SOCKETCALL_CANCEL6(__name, __a1, __a2, __a3, __a4, __a5, __a6) \
+ SYSCALL_CANCEL_NCS (socketcall, __name, \
+ ((long int [6]) { (long int) __a1, (long int) __a2, (long int) __a3, \
+ (long int) __a4, (long int) __a5, (long int) __a6 }))
+
+#define __SOCKETCALL_CANCEL(...) __SOCKETCALL_DISP (__SOCKETCALL_CANCEL,\
+ __VA_ARGS__)
#define SOCKETCALL_CANCEL(name, args...) \
({ \
- int oldtype = LIBC_CANCEL_ASYNC (); \
- long int sc_ret = __SOCKETCALL (SOCKOP_##name, args); \
- LIBC_CANCEL_RESET (oldtype); \
+ long int sc_ret = __SOCKETCALL_CANCEL (SOCKOP_##name, args); \
+ if (sc_ret > -4096UL) \
+ { \
+ __set_errno (-sc_ret); \
+ sc_ret = -1L; \
+ } \
sc_ret; \
})
new file mode 100644
@@ -0,0 +1,62 @@
+/* Default cancellation syscall bridge.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <pthreadP.h>
+
+#define ADD_LABEL(__label) \
+ asm volatile ( \
+ ".global " __label "\t\n" \
+ ".type " __label ",\%function\t\n" \
+ __label ":\n");
+
+/* This is the generic version of the cancellable syscall code which
+ adds the label guards (__syscall_cancel_arch_{start,end}) used
+ on SIGCANCEL handler (sigcancel_handler at nptl-init.c) to check if
+ the cancelled syscall have side-effects that need to be signaled to
+ program.
+
+ An important contrainst should be observed when using this generic
+ implementation: the __syscall_cancel_arch_end should point to the
+ immediate next instruction after the syscall one. This is because
+ kernel will signal interrupted syscall with side effects by setting
+ the signal frame Program Counter right after the syscall instruction.
+
+ If the INTERNAL_SYSCALL_NCS macro use more instructions to get the
+ error condition from kernel (as for powerpc and sparc), uses an
+ out of the line helper (as for ARM thumb), or uses a kernel helper
+ gate (as for i686 or ia64) the architecture should either adjust the
+ macro or provide a custom __syscall_cancel_arch implementation. */
+long int
+__syscall_cancel_arch (volatile int *ch, __syscall_arg_t nr,
+ __syscall_arg_t a1, __syscall_arg_t a2,
+ __syscall_arg_t a3, __syscall_arg_t a4,
+ __syscall_arg_t a5, __syscall_arg_t a6)
+{
+ ADD_LABEL ("__syscall_cancel_arch_start");
+ if (__glibc_unlikely (*ch & CANCELED_BITMASK))
+ __syscall_do_cancel();
+
+ INTERNAL_SYSCALL_DECL(err);
+ long int result = INTERNAL_SYSCALL_NCS (nr, err, 6, a1, a2, a3, a4, a5, a6);
+ ADD_LABEL ("__syscall_cancel_arch_end");
+ if (INTERNAL_SYSCALL_ERROR_P (result, err))
+ return -INTERNAL_SYSCALL_ERRNO (result, err);
+ return result;
+}
+libc_hidden_def (__syscall_cancel_arch)
@@ -27,6 +27,23 @@
-1l; \
})
+/* Check error from cancellable syscall and set errno accordingly.
+ Linux uses a negative return value to indicate syscall errors
+ and since version 2.1 the return value of a system call might be
+ negative even if the call succeeded (e.g., the `lseek' system call
+ might return a large offset).
+ Current contract is kernel make sure the no syscall returns a value
+ in -1 .. -4095 as a valid result so we can savely test with -4095. */
+#define SYSCALL_CANCEL_RET(__ret) \
+ ({ \
+ if (__ret > -4096UL) \
+ { \
+ __set_errno (-__ret); \
+ __ret = -1; \
+ } \
+ __ret; \
+ })
+
/* Provide a dummy argument that can be used to force register
alignment for register pairs if required by the syscall ABI. */
#ifdef __ASSUME_ALIGNED_REGISTER_PAIRS