2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
28 #include <shlib-compat.h>
30 #include "internals.h"
39 #if !defined __SIGRTMIN || (__SIGRTMAX - __SIGRTMIN) < 3
40 # error "This must not happen"
43 #if !(USE_TLS && HAVE___THREAD)
44 /* These variables are used by the setup code. */
48 /* We need the global/static resolver state here. */
52 extern struct __res_state _res;
57 /* We need only a few variables. */
58 static pthread_descr manager_thread;
62 /* Descriptor of the initial thread */
64 struct _pthread_descr_struct __pthread_initial_thread = {
65 .p_header.data.self = &__pthread_initial_thread,
66 .p_nextlive = &__pthread_initial_thread,
67 .p_prevlive = &__pthread_initial_thread,
68 .p_tid = PTHREAD_THREADS_MAX,
69 .p_lock = &__pthread_handles[0].h_lock,
70 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(NULL),
71 #if !(USE_TLS && HAVE___THREAD)
73 .p_h_errnop = &_h_errno,
77 .p_resume_count = __ATOMIC_INITIALIZER,
78 .p_alloca_cutoff = __MAX_ALLOCA_CUTOFF
81 /* Descriptor of the manager thread; none of this is used but the error
82 variables, the p_pid and p_priority fields,
83 and the address for identification. */
85 #define manager_thread (&__pthread_manager_thread)
86 struct _pthread_descr_struct __pthread_manager_thread = {
87 .p_header.data.self = &__pthread_manager_thread,
88 .p_header.data.multiple_threads = 1,
89 .p_lock = &__pthread_handles[1].h_lock,
90 .p_start_args = PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
91 #if !(USE_TLS && HAVE___THREAD)
92 .p_errnop = &__pthread_manager_thread.p_errno,
95 .p_resume_count = __ATOMIC_INITIALIZER,
96 .p_alloca_cutoff = PTHREAD_STACK_MIN / 4
100 /* Pointer to the main thread (the father of the thread manager thread) */
101 /* Originally, this is the initial thread, but this changes after fork() */
104 pthread_descr __pthread_main_thread;
106 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
109 /* Limit between the stack of the initial thread (above) and the
110 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
112 char *__pthread_initial_thread_bos;
114 /* File descriptor for sending requests to the thread manager. */
115 /* Initially -1, meaning that the thread manager is not running. */
117 int __pthread_manager_request = -1;
119 int __pthread_multiple_threads attribute_hidden;
121 /* Other end of the pipe for sending requests to the thread manager. */
123 int __pthread_manager_reader;
125 /* Limits of the thread manager stack */
127 char *__pthread_manager_thread_bos;
128 char *__pthread_manager_thread_tos;
130 /* For process-wide exit() */
132 int __pthread_exit_requested;
133 int __pthread_exit_code;
135 /* Maximum stack size. */
136 size_t __pthread_max_stacksize;
138 /* Nozero if the machine has more than one processor. */
139 int __pthread_smp_kernel;
142 #if !__ASSUME_REALTIME_SIGNALS
143 /* Pointers that select new or old suspend/resume functions
144 based on availability of rt signals. */
146 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
147 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
148 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
149 #endif /* __ASSUME_REALTIME_SIGNALS */
151 /* Communicate relevant LinuxThreads constants to gdb */
153 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
154 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
155 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
157 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
159 const int __linuxthreads_pthread_sizeof_descr
160 = sizeof(struct _pthread_descr_struct);
162 const int __linuxthreads_initial_report_events;
164 const char __linuxthreads_version[] = VERSION;
166 /* Forward declarations */
168 static void pthread_onexit_process(int retcode, void *arg);
169 #ifndef HAVE_Z_NODELETE
170 static void pthread_atexit_process(void *arg, int retcode);
171 static void pthread_atexit_retcode(void *arg, int retcode);
173 static void pthread_handle_sigcancel(int sig);
174 static void pthread_handle_sigrestart(int sig);
175 static void pthread_handle_sigdebug(int sig);
177 /* Signal numbers used for the communication.
178 In these variables we keep track of the used variables. If the
179 platform does not support any real-time signals we will define the
180 values to some unreasonable value which will signal failing of all
181 the functions below. */
182 int __pthread_sig_restart = __SIGRTMIN;
183 int __pthread_sig_cancel = __SIGRTMIN + 1;
184 int __pthread_sig_debug = __SIGRTMIN + 2;
186 extern int __libc_current_sigrtmin_private (void);
188 #if !__ASSUME_REALTIME_SIGNALS
189 static int rtsigs_initialized;
194 if (rtsigs_initialized)
197 if (__libc_current_sigrtmin_private () == -1)
199 __pthread_sig_restart = SIGUSR1;
200 __pthread_sig_cancel = SIGUSR2;
201 __pthread_sig_debug = 0;
205 __pthread_restart = __pthread_restart_new;
206 __pthread_suspend = __pthread_wait_for_restart_signal;
207 __pthread_timedsuspend = __pthread_timedsuspend_new;
210 rtsigs_initialized = 1;
215 /* Initialize the pthread library.
216 Initialization is split in two functions:
217 - a constructor function that blocks the __pthread_sig_restart signal
218 (must do this very early, since the program could capture the signal
219 mask with e.g. sigsetjmp before creating the first thread);
220 - a regular function called from pthread_create when needed. */
222 static void pthread_initialize(void) __attribute__((constructor));
224 #ifndef HAVE_Z_NODELETE
225 extern void *__dso_handle __attribute__ ((weak));
229 #if defined USE_TLS && !defined SHARED
230 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
233 struct pthread_functions __pthread_functions =
235 #if !(USE_TLS && HAVE___THREAD)
236 .ptr_pthread_internal_tsd_set = __pthread_internal_tsd_set,
237 .ptr_pthread_internal_tsd_get = __pthread_internal_tsd_get,
238 .ptr_pthread_internal_tsd_address = __pthread_internal_tsd_address,
240 .ptr_pthread_fork = __pthread_fork,
241 .ptr_pthread_attr_destroy = __pthread_attr_destroy,
242 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
243 .ptr___pthread_attr_init_2_0 = __pthread_attr_init_2_0,
245 .ptr___pthread_attr_init_2_1 = __pthread_attr_init_2_1,
246 .ptr_pthread_attr_getdetachstate = __pthread_attr_getdetachstate,
247 .ptr_pthread_attr_setdetachstate = __pthread_attr_setdetachstate,
248 .ptr_pthread_attr_getinheritsched = __pthread_attr_getinheritsched,
249 .ptr_pthread_attr_setinheritsched = __pthread_attr_setinheritsched,
250 .ptr_pthread_attr_getschedparam = __pthread_attr_getschedparam,
251 .ptr_pthread_attr_setschedparam = __pthread_attr_setschedparam,
252 .ptr_pthread_attr_getschedpolicy = __pthread_attr_getschedpolicy,
253 .ptr_pthread_attr_setschedpolicy = __pthread_attr_setschedpolicy,
254 .ptr_pthread_attr_getscope = __pthread_attr_getscope,
255 .ptr_pthread_attr_setscope = __pthread_attr_setscope,
256 .ptr_pthread_condattr_destroy = __pthread_condattr_destroy,
257 .ptr_pthread_condattr_init = __pthread_condattr_init,
258 .ptr___pthread_cond_broadcast = __pthread_cond_broadcast,
259 .ptr___pthread_cond_destroy = __pthread_cond_destroy,
260 .ptr___pthread_cond_init = __pthread_cond_init,
261 .ptr___pthread_cond_signal = __pthread_cond_signal,
262 .ptr___pthread_cond_wait = __pthread_cond_wait,
263 .ptr_pthread_equal = __pthread_equal,
264 .ptr___pthread_exit = __pthread_exit,
265 .ptr_pthread_getschedparam = __pthread_getschedparam,
266 .ptr_pthread_setschedparam = __pthread_setschedparam,
267 .ptr_pthread_mutex_destroy = __pthread_mutex_destroy,
268 .ptr_pthread_mutex_init = __pthread_mutex_init,
269 .ptr_pthread_mutex_lock = __pthread_mutex_lock,
270 .ptr_pthread_mutex_trylock = __pthread_mutex_trylock,
271 .ptr_pthread_mutex_unlock = __pthread_mutex_unlock,
272 .ptr_pthread_self = __pthread_self,
273 .ptr_pthread_setcancelstate = __pthread_setcancelstate,
274 .ptr_pthread_setcanceltype = __pthread_setcanceltype,
275 .ptr_pthread_do_exit = __pthread_do_exit,
276 .ptr_pthread_thread_self = __pthread_thread_self,
277 .ptr_pthread_cleanup_upto = __pthread_cleanup_upto,
278 .ptr_pthread_sigaction = __pthread_sigaction,
279 .ptr_pthread_sigwait = __pthread_sigwait,
280 .ptr_pthread_raise = __pthread_raise
283 # define ptr_pthread_functions &__pthread_functions
285 # define ptr_pthread_functions NULL
288 static int *__libc_multiple_threads_ptr;
290 /* Do some minimal initialization which has to be done during the
291 startup of the C library. */
293 __pthread_initialize_minimal(void)
298 /* First of all init __pthread_handles[0] and [1] if needed. */
299 # if __LT_SPINLOCK_INIT != 0
300 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
301 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
304 /* Unlike in the dynamically linked case the dynamic linker has not
305 taken care of initializing the TLS data structures. */
306 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
308 if (__builtin_expect (GL(dl_tls_max_dtv_idx) == 0, 0))
312 /* There is no actual TLS being used, so the thread register
313 was not initialized in the dynamic linker. */
315 /* We need to install special hooks so that the malloc and memalign
316 calls in _dl_tls_setup and _dl_allocate_tls won't cause full
317 malloc initialization that will try to set up its thread state. */
319 extern void __libc_malloc_pthread_startup (bool first_time);
320 __libc_malloc_pthread_startup (true);
322 if (__builtin_expect (_dl_tls_setup (), 0)
323 || __builtin_expect ((tcbp = _dl_allocate_tls (NULL)) == NULL, 0))
325 static const char msg[] = "\
326 cannot allocate TLS data structures for initial thread\n";
327 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
328 msg, sizeof msg - 1));
331 const char *lossage = TLS_INIT_TP (tcbp, 0);
332 if (__builtin_expect (lossage != NULL, 0))
334 static const char msg[] = "cannot set up thread-local storage: ";
335 const char nl = '\n';
336 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
337 msg, sizeof msg - 1));
338 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO,
339 lossage, strlen (lossage)));
340 TEMP_FAILURE_RETRY (__libc_write (STDERR_FILENO, &nl, 1));
343 /* Though it was allocated with libc's malloc, that was done without
344 the user's __malloc_hook installed. A later realloc that uses
345 the hooks might not work with that block from the plain malloc.
346 So we record this block as unfreeable just as the dynamic linker
347 does when it allocates the DTV before the libc malloc exists. */
348 GL(dl_initial_dtv) = GET_DTV (tcbp);
350 __libc_malloc_pthread_startup (false);
356 /* The memory for the thread descriptor was allocated elsewhere as
357 part of the TLS allocation. We have to initialize the data
358 structure by hand. This initialization must mirror the struct
360 self->p_nextlive = self->p_prevlive = self;
361 self->p_tid = PTHREAD_THREADS_MAX;
362 self->p_lock = &__pthread_handles[0].h_lock;
363 # ifndef HAVE___THREAD
364 self->p_errnop = &_errno;
365 self->p_h_errnop = &_h_errno;
367 /* self->p_start_args need not be initialized, it's all zero. */
368 self->p_userstack = 1;
369 # if __LT_SPINLOCK_INIT != 0
370 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
372 self->p_alloca_cutoff = __MAX_ALLOCA_CUTOFF;
374 /* Another variable which points to the thread descriptor. */
375 __pthread_main_thread = self;
377 /* And fill in the pointer the the thread __pthread_handles array. */
378 __pthread_handles[0].h_descr = self;
382 /* First of all init __pthread_handles[0] and [1]. */
383 # if __LT_SPINLOCK_INIT != 0
384 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
385 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
387 __pthread_handles[0].h_descr = &__pthread_initial_thread;
388 __pthread_handles[1].h_descr = &__pthread_manager_thread;
390 /* If we have special thread_self processing, initialize that for the
392 # ifdef INIT_THREAD_SELF
393 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
399 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
401 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
405 __libc_multiple_threads_ptr = __libc_pthread_init (ptr_pthread_functions);
410 __pthread_init_max_stacksize(void)
415 getrlimit(RLIMIT_STACK, &limit);
416 #ifdef FLOATING_STACKS
417 if (limit.rlim_cur == RLIM_INFINITY)
418 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
419 # ifdef NEED_SEPARATE_REGISTER_STACK
420 max_stack = limit.rlim_cur / 2;
422 max_stack = limit.rlim_cur;
425 /* Play with the stack size limit to make sure that no stack ever grows
426 beyond STACK_SIZE minus one page (to act as a guard page). */
427 # ifdef NEED_SEPARATE_REGISTER_STACK
428 /* STACK_SIZE bytes hold both the main stack and register backing
429 store. The rlimit value applies to each individually. */
430 max_stack = STACK_SIZE/2 - __getpagesize ();
432 max_stack = STACK_SIZE - __getpagesize();
434 if (limit.rlim_cur > max_stack) {
435 limit.rlim_cur = max_stack;
436 setrlimit(RLIMIT_STACK, &limit);
439 __pthread_max_stacksize = max_stack;
440 if (max_stack / 4 < __MAX_ALLOCA_CUTOFF)
443 pthread_descr self = THREAD_SELF;
444 self->p_alloca_cutoff = max_stack / 4;
446 __pthread_initial_thread.p_alloca_cutoff = max_stack / 4;
453 /* When using __thread for this, we do it in libc so as not
454 to give libpthread its own TLS segment just for this. */
455 extern void **__libc_dl_error_tsd (void) __attribute__ ((const));
457 static void ** __attribute__ ((const))
458 __libc_dl_error_tsd (void)
460 return &thread_self ()->p_libc_specific[_LIBC_TSD_KEY_DL_ERROR];
465 static void pthread_initialize(void)
470 /* If already done (e.g. by a constructor called earlier!), bail out */
471 if (__pthread_initial_thread_bos != NULL) return;
472 #ifdef TEST_FOR_COMPARE_AND_SWAP
473 /* Test if compare-and-swap is available */
474 __pthread_has_cas = compare_and_swap_is_available();
476 #ifdef FLOATING_STACKS
477 /* We don't need to know the bottom of the stack. Give the pointer some
478 value to signal that initialization happened. */
479 __pthread_initial_thread_bos = (void *) -1l;
481 /* Determine stack size limits . */
482 __pthread_init_max_stacksize ();
483 # ifdef _STACK_GROWS_UP
484 /* The initial thread already has all the stack it needs */
485 __pthread_initial_thread_bos = (char *)
486 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
488 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
489 below the current stack address, and align that on a
490 STACK_SIZE boundary. */
491 __pthread_initial_thread_bos =
492 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
496 /* Update the descriptor for the initial thread. */
497 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
498 # ifndef HAVE___THREAD
499 /* Likewise for the resolver state _res. */
500 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
503 /* Update the descriptor for the initial thread. */
504 __pthread_initial_thread.p_pid = __getpid();
505 /* Likewise for the resolver state _res. */
506 __pthread_initial_thread.p_resp = &_res;
508 #if !__ASSUME_REALTIME_SIGNALS
509 /* Initialize real-time signals. */
512 /* Setup signal handlers for the initial thread.
513 Since signal handlers are shared between threads, these settings
514 will be inherited by all other threads. */
515 sa.sa_handler = pthread_handle_sigrestart;
516 sigemptyset(&sa.sa_mask);
518 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
519 sa.sa_handler = pthread_handle_sigcancel;
521 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
522 if (__pthread_sig_debug > 0) {
523 sa.sa_handler = pthread_handle_sigdebug;
524 sigemptyset(&sa.sa_mask);
526 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
528 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
530 sigaddset(&mask, __pthread_sig_restart);
531 sigprocmask(SIG_BLOCK, &mask, NULL);
532 /* Register an exit function to kill all other threads. */
533 /* Do it early so that user-registered atexit functions are called
534 before pthread_*exit_process. */
535 #ifndef HAVE_Z_NODELETE
536 if (__builtin_expect (&__dso_handle != NULL, 1))
537 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
541 __on_exit (pthread_onexit_process, NULL);
542 /* How many processors. */
543 __pthread_smp_kernel = is_smp_system ();
546 /* Transfer the old value from the dynamic linker's internal location. */
547 *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
548 GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;
552 void __pthread_initialize(void)
554 pthread_initialize();
557 int __pthread_initialize_manager(void)
561 struct pthread_request request;
568 __pthread_multiple_threads = 1;
569 __pthread_main_thread->p_header.data.multiple_threads = 1;
570 * __libc_multiple_threads_ptr = 1;
572 #ifndef HAVE_Z_NODELETE
573 if (__builtin_expect (&__dso_handle != NULL, 1))
574 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
578 if (__pthread_max_stacksize == 0)
579 __pthread_init_max_stacksize ();
580 /* If basic initialization not done yet (e.g. we're called from a
581 constructor run before our constructor), do it now */
582 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
583 /* Setup stack for thread manager */
584 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
585 if (__pthread_manager_thread_bos == NULL) return -1;
586 __pthread_manager_thread_tos =
587 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
588 /* Setup pipe to communicate with thread manager */
589 if (pipe(manager_pipe) == -1) {
590 free(__pthread_manager_thread_bos);
595 /* Allocate memory for the thread descriptor and the dtv. */
596 tcbp = _dl_allocate_tls (NULL);
598 free(__pthread_manager_thread_bos);
599 __libc_close(manager_pipe[0]);
600 __libc_close(manager_pipe[1]);
605 mgr = (pthread_descr) tcbp;
607 /* pthread_descr is located right below tcbhead_t which _dl_allocate_tls
609 mgr = (pthread_descr) tcbp - 1;
611 __pthread_handles[1].h_descr = manager_thread = mgr;
613 /* Initialize the descriptor. */
614 mgr->p_header.data.tcb = tcbp;
615 mgr->p_header.data.self = mgr;
616 mgr->p_header.data.multiple_threads = 1;
617 mgr->p_lock = &__pthread_handles[1].h_lock;
618 # ifndef HAVE___THREAD
619 mgr->p_errnop = &mgr->p_errno;
621 mgr->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
623 # if __LT_SPINLOCK_INIT != 0
624 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
626 mgr->p_alloca_cutoff = PTHREAD_STACK_MIN / 4;
628 mgr = &__pthread_manager_thread;
631 __pthread_manager_request = manager_pipe[1]; /* writing end */
632 __pthread_manager_reader = manager_pipe[0]; /* reading end */
634 /* Start the thread manager */
637 if (__linuxthreads_initial_report_events != 0)
638 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
639 __linuxthreads_initial_report_events);
640 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
642 if (__linuxthreads_initial_report_events != 0)
643 __pthread_initial_thread.p_report_events
644 = __linuxthreads_initial_report_events;
645 report_events = __pthread_initial_thread.p_report_events;
647 if (__builtin_expect (report_events, 0))
649 /* It's a bit more complicated. We have to report the creation of
650 the manager thread. */
651 int idx = __td_eventword (TD_CREATE);
652 uint32_t mask = __td_eventmask (TD_CREATE);
656 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
657 p_eventbuf.eventmask.event_bits[idx]);
659 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
662 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
665 __pthread_lock(mgr->p_lock, NULL);
667 #ifdef NEED_SEPARATE_REGISTER_STACK
668 pid = __clone2(__pthread_manager_event,
669 (void **) __pthread_manager_thread_bos,
670 THREAD_MANAGER_STACK_SIZE,
671 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
673 #elif _STACK_GROWS_UP
674 pid = __clone(__pthread_manager_event,
675 (void **) __pthread_manager_thread_bos,
676 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
679 pid = __clone(__pthread_manager_event,
680 (void **) __pthread_manager_thread_tos,
681 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
687 /* Now fill in the information about the new thread in
688 the newly created thread's data structure. We cannot let
689 the new thread do this since we don't know whether it was
690 already scheduled when we send the event. */
691 mgr->p_eventbuf.eventdata = mgr;
692 mgr->p_eventbuf.eventnum = TD_CREATE;
693 __pthread_last_event = mgr;
694 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
697 /* Now call the function which signals the event. */
698 __linuxthreads_create_event ();
701 /* Now restart the thread. */
702 __pthread_unlock(mgr->p_lock);
706 if (__builtin_expect (pid, 0) == 0)
708 #ifdef NEED_SEPARATE_REGISTER_STACK
709 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
710 THREAD_MANAGER_STACK_SIZE,
711 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
712 #elif _STACK_GROWS_UP
713 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
714 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
716 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
717 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, mgr);
720 if (__builtin_expect (pid, 0) == -1) {
721 free(__pthread_manager_thread_bos);
722 __libc_close(manager_pipe[0]);
723 __libc_close(manager_pipe[1]);
726 mgr->p_tid = 2* PTHREAD_THREADS_MAX + 1;
728 /* Make gdb aware of new thread manager */
729 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
731 raise(__pthread_sig_debug);
732 /* We suspend ourself and gdb will wake us up when it is
733 ready to handle us. */
734 __pthread_wait_for_restart_signal(thread_self());
736 /* Synchronize debugging of the thread manager */
737 request.req_kind = REQ_DEBUG;
738 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
739 (char *) &request, sizeof(request)));
743 /* Thread creation */
745 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
746 void * (*start_routine)(void *), void *arg)
748 pthread_descr self = thread_self();
749 struct pthread_request request;
751 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
752 if (__pthread_initialize_manager() < 0) return EAGAIN;
754 request.req_thread = self;
755 request.req_kind = REQ_CREATE;
756 request.req_args.create.attr = attr;
757 request.req_args.create.fn = start_routine;
758 request.req_args.create.arg = arg;
759 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
760 &request.req_args.create.mask);
761 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
762 (char *) &request, sizeof(request)));
764 retval = THREAD_GETMEM(self, p_retcode);
765 if (__builtin_expect (retval, 0) == 0)
766 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
770 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
772 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
774 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
775 void * (*start_routine)(void *), void *arg)
777 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
778 the old size and access to the new members might crash the program.
779 We convert the struct now. */
780 pthread_attr_t new_attr;
784 size_t ps = __getpagesize ();
786 memcpy (&new_attr, attr,
787 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
788 new_attr.__guardsize = ps;
789 new_attr.__stackaddr_set = 0;
790 new_attr.__stackaddr = NULL;
791 new_attr.__stacksize = STACK_SIZE - ps;
794 return __pthread_create_2_1 (thread, attr, start_routine, arg);
796 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
799 /* Simple operations on thread identifiers */
801 pthread_descr __pthread_thread_self(void)
803 return thread_self();
806 pthread_t __pthread_self(void)
808 pthread_descr self = thread_self();
809 return THREAD_GETMEM(self, p_tid);
811 strong_alias (__pthread_self, pthread_self);
813 int __pthread_equal(pthread_t thread1, pthread_t thread2)
815 return thread1 == thread2;
817 strong_alias (__pthread_equal, pthread_equal);
819 /* Helper function for thread_self in the case of user-provided stacks */
823 pthread_descr __pthread_find_self(void)
825 char * sp = CURRENT_STACK_FRAME;
828 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
829 the manager threads handled specially in thread_self(), so start at 2 */
830 h = __pthread_handles + 2;
831 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
837 static pthread_descr thread_self_stack(void)
839 char *sp = CURRENT_STACK_FRAME;
842 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
843 return manager_thread;
844 h = __pthread_handles + 2;
846 while (h->h_descr == NULL
847 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
850 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
858 /* Thread scheduling */
860 int __pthread_setschedparam(pthread_t thread, int policy,
861 const struct sched_param *param)
863 pthread_handle handle = thread_handle(thread);
866 __pthread_lock(&handle->h_lock, NULL);
867 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
868 __pthread_unlock(&handle->h_lock);
871 th = handle->h_descr;
872 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
874 __pthread_unlock(&handle->h_lock);
877 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
878 __pthread_unlock(&handle->h_lock);
879 if (__pthread_manager_request >= 0)
880 __pthread_manager_adjust_prio(th->p_priority);
883 strong_alias (__pthread_setschedparam, pthread_setschedparam);
885 int __pthread_getschedparam(pthread_t thread, int *policy,
886 struct sched_param *param)
888 pthread_handle handle = thread_handle(thread);
891 __pthread_lock(&handle->h_lock, NULL);
892 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
893 __pthread_unlock(&handle->h_lock);
896 pid = handle->h_descr->p_pid;
897 __pthread_unlock(&handle->h_lock);
898 pol = __sched_getscheduler(pid);
899 if (__builtin_expect (pol, 0) == -1) return errno;
900 if (__sched_getparam(pid, param) == -1) return errno;
904 strong_alias (__pthread_getschedparam, pthread_getschedparam);
906 int __pthread_yield (void)
908 /* For now this is equivalent with the POSIX call. */
909 return sched_yield ();
911 weak_alias (__pthread_yield, pthread_yield)
913 /* Process-wide exit() request */
915 static void pthread_onexit_process(int retcode, void *arg)
917 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
918 struct pthread_request request;
919 pthread_descr self = thread_self();
921 request.req_thread = self;
922 request.req_kind = REQ_PROCESS_EXIT;
923 request.req_args.exit.code = retcode;
924 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
925 (char *) &request, sizeof(request)));
927 /* Main thread should accumulate times for thread manager and its
928 children, so that timings for main thread account for all threads. */
929 if (self == __pthread_main_thread)
932 waitpid(manager_thread->p_pid, NULL, __WCLONE);
934 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
936 /* Since all threads have been asynchronously terminated
937 (possibly holding locks), free cannot be used any more. */
938 /*free (__pthread_manager_thread_bos);*/
939 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
944 #ifndef HAVE_Z_NODELETE
945 static int __pthread_atexit_retcode;
947 static void pthread_atexit_process(void *arg, int retcode)
949 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
952 static void pthread_atexit_retcode(void *arg, int retcode)
954 __pthread_atexit_retcode = retcode;
958 /* The handler for the RESTART signal just records the signal received
959 in the thread descriptor, and optionally performs a siglongjmp
960 (for pthread_cond_timedwait). */
962 static void pthread_handle_sigrestart(int sig)
964 pthread_descr self = thread_self();
965 THREAD_SETMEM(self, p_signal, sig);
966 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
967 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
970 /* The handler for the CANCEL signal checks for cancellation
971 (in asynchronous mode), for process-wide exit and exec requests.
972 For the thread manager thread, redirect the signal to
973 __pthread_manager_sighandler. */
975 static void pthread_handle_sigcancel(int sig)
977 pthread_descr self = thread_self();
980 if (self == manager_thread)
983 /* A new thread might get a cancel signal before it is fully
984 initialized, so that the thread register might still point to the
985 manager thread. Double check that this is really the manager
987 pthread_descr real_self = thread_self_stack();
988 if (real_self == manager_thread)
990 __pthread_manager_sighandler(sig);
993 /* Oops, thread_self() isn't working yet.. */
995 # ifdef INIT_THREAD_SELF
996 INIT_THREAD_SELF(self, self->p_nr);
999 __pthread_manager_sighandler(sig);
1003 if (__builtin_expect (__pthread_exit_requested, 0)) {
1004 /* Main thread should accumulate times for thread manager and its
1005 children, so that timings for main thread account for all threads. */
1006 if (self == __pthread_main_thread) {
1008 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1010 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1013 _exit(__pthread_exit_code);
1015 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1016 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1017 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1018 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1019 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1020 if (jmpbuf != NULL) {
1021 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1022 siglongjmp(*jmpbuf, 1);
1027 /* Handler for the DEBUG signal.
1028 The debugging strategy is as follows:
1029 On reception of a REQ_DEBUG request (sent by new threads created to
1030 the thread manager under debugging mode), the thread manager throws
1031 __pthread_sig_debug to itself. The debugger (if active) intercepts
1032 this signal, takes into account new threads and continue execution
1033 of the thread manager by propagating the signal because it doesn't
1034 know what it is specifically done for. In the current implementation,
1035 the thread manager simply discards it. */
1037 static void pthread_handle_sigdebug(int sig)
1042 /* Reset the state of the thread machinery after a fork().
1043 Close the pipe used for requests and set the main thread to the forked
1045 Notice that we can't free the stack segments, as the forked thread
1046 may hold pointers into them. */
1048 void __pthread_reset_main_thread(void)
1050 pthread_descr self = thread_self();
1052 if (__pthread_manager_request != -1) {
1053 /* Free the thread manager stack */
1054 free(__pthread_manager_thread_bos);
1055 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1056 /* Close the two ends of the pipe */
1057 __libc_close(__pthread_manager_request);
1058 __libc_close(__pthread_manager_reader);
1059 __pthread_manager_request = __pthread_manager_reader = -1;
1062 /* Update the pid of the main thread */
1063 THREAD_SETMEM(self, p_pid, __getpid());
1064 /* Make the forked thread the main thread */
1065 __pthread_main_thread = self;
1066 THREAD_SETMEM(self, p_nextlive, self);
1067 THREAD_SETMEM(self, p_prevlive, self);
1068 #if !(USE_TLS && HAVE___THREAD)
1069 /* Now this thread modifies the global variables. */
1070 THREAD_SETMEM(self, p_errnop, &_errno);
1071 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1072 THREAD_SETMEM(self, p_resp, &_res);
1075 #ifndef FLOATING_STACKS
1076 /* This is to undo the setrlimit call in __pthread_init_max_stacksize.
1077 XXX This can be wrong if the user set the limit during the run. */
1079 struct rlimit limit;
1080 if (getrlimit (RLIMIT_STACK, &limit) == 0
1081 && limit.rlim_cur != limit.rlim_max)
1083 limit.rlim_cur = limit.rlim_max;
1084 setrlimit(RLIMIT_STACK, &limit);
1090 /* Process-wide exec() request */
1092 void __pthread_kill_other_threads_np(void)
1094 struct sigaction sa;
1095 /* Terminate all other threads and thread manager */
1096 pthread_onexit_process(0, NULL);
1097 /* Make current thread the main thread in case the calling thread
1098 changes its mind, does not exec(), and creates new threads instead. */
1099 __pthread_reset_main_thread();
1101 /* Reset the signal handlers behaviour for the signals the
1102 implementation uses since this would be passed to the new
1104 sigemptyset(&sa.sa_mask);
1106 sa.sa_handler = SIG_DFL;
1107 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1108 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1109 if (__pthread_sig_debug > 0)
1110 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1112 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1114 /* Concurrency symbol level. */
1115 static int current_level;
1117 int __pthread_setconcurrency(int level)
1119 /* We don't do anything unless we have found a useful interpretation. */
1120 current_level = level;
1123 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1125 int __pthread_getconcurrency(void)
1127 return current_level;
1129 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1131 /* Primitives for controlling thread execution */
1133 void __pthread_wait_for_restart_signal(pthread_descr self)
1137 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1138 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1139 THREAD_SETMEM(self, p_signal, 0);
1141 sigsuspend(&mask); /* Wait for signal */
1142 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1144 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1147 #if !__ASSUME_REALTIME_SIGNALS
1148 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1150 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1151 Since the restart signal does not queue, we use an atomic counter to create
1152 queuing semantics. This is needed to resolve a rare race condition in
1153 pthread_cond_timedwait_relative. */
1155 void __pthread_restart_old(pthread_descr th)
1157 if (atomic_increment(&th->p_resume_count) == -1)
1158 kill(th->p_pid, __pthread_sig_restart);
1161 void __pthread_suspend_old(pthread_descr self)
1163 if (atomic_decrement(&self->p_resume_count) <= 0)
1164 __pthread_wait_for_restart_signal(self);
1168 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1170 sigset_t unblock, initial_mask;
1171 int was_signalled = 0;
1174 if (atomic_decrement(&self->p_resume_count) == 0) {
1175 /* Set up a longjmp handler for the restart signal, unblock
1176 the signal and sleep. */
1178 if (sigsetjmp(jmpbuf, 1) == 0) {
1179 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1180 THREAD_SETMEM(self, p_signal, 0);
1181 /* Unblock the restart signal */
1182 sigemptyset(&unblock);
1183 sigaddset(&unblock, __pthread_sig_restart);
1184 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1188 struct timespec reltime;
1190 /* Compute a time offset relative to now. */
1191 __gettimeofday (&now, NULL);
1192 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1193 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1194 if (reltime.tv_nsec < 0) {
1195 reltime.tv_nsec += 1000000000;
1196 reltime.tv_sec -= 1;
1199 /* Sleep for the required duration. If woken by a signal,
1200 resume waiting as required by Single Unix Specification. */
1201 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1205 /* Block the restart signal again */
1206 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1211 THREAD_SETMEM(self, p_signal_jmp, NULL);
1214 /* Now was_signalled is true if we exited the above code
1215 due to the delivery of a restart signal. In that case,
1216 we know we have been dequeued and resumed and that the
1217 resume count is balanced. Otherwise, there are some
1218 cases to consider. First, try to bump up the resume count
1219 back to zero. If it goes to 1, it means restart() was
1220 invoked on this thread. The signal must be consumed
1221 and the count bumped down and everything is cool. We
1222 can return a 1 to the caller.
1223 Otherwise, no restart was delivered yet, so a potential
1224 race exists; we return a 0 to the caller which must deal
1225 with this race in an appropriate way; for example by
1226 atomically removing the thread from consideration for a
1227 wakeup---if such a thing fails, it means a restart is
1230 if (!was_signalled) {
1231 if (atomic_increment(&self->p_resume_count) != -1) {
1232 __pthread_wait_for_restart_signal(self);
1233 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1234 /* woke spontaneously and consumed restart signal */
1237 /* woke spontaneously but did not consume restart---caller must resolve */
1240 /* woken due to restart signal */
1243 #endif /* __ASSUME_REALTIME_SIGNALS */
1245 void __pthread_restart_new(pthread_descr th)
1247 /* The barrier is proabably not needed, in which case it still documents
1248 our assumptions. The intent is to commit previous writes to shared
1249 memory so the woken thread will have a consistent view. Complementary
1250 read barriers are present to the suspend functions. */
1251 WRITE_MEMORY_BARRIER();
1252 kill(th->p_pid, __pthread_sig_restart);
1255 /* There is no __pthread_suspend_new because it would just
1256 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1259 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1261 sigset_t unblock, initial_mask;
1262 int was_signalled = 0;
1265 if (sigsetjmp(jmpbuf, 1) == 0) {
1266 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1267 THREAD_SETMEM(self, p_signal, 0);
1268 /* Unblock the restart signal */
1269 sigemptyset(&unblock);
1270 sigaddset(&unblock, __pthread_sig_restart);
1271 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1275 struct timespec reltime;
1277 /* Compute a time offset relative to now. */
1278 __gettimeofday (&now, NULL);
1279 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1280 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1281 if (reltime.tv_nsec < 0) {
1282 reltime.tv_nsec += 1000000000;
1283 reltime.tv_sec -= 1;
1286 /* Sleep for the required duration. If woken by a signal,
1287 resume waiting as required by Single Unix Specification. */
1288 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1292 /* Block the restart signal again */
1293 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1298 THREAD_SETMEM(self, p_signal_jmp, NULL);
1300 /* Now was_signalled is true if we exited the above code
1301 due to the delivery of a restart signal. In that case,
1302 everything is cool. We have been removed from whatever
1303 we were waiting on by the other thread, and consumed its signal.
1305 Otherwise we this thread woke up spontaneously, or due to a signal other
1306 than restart. This is an ambiguous case that must be resolved by
1307 the caller; the thread is still eligible for a restart wakeup
1308 so there is a race. */
1310 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1311 return was_signalled;
1320 void __pthread_message(const char * fmt, ...)
1324 sprintf(buffer, "%05d : ", __getpid());
1325 va_start(args, fmt);
1326 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1328 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));