1 // Thread handling stuff for the Citadel server
3 // Copyright (c) 1987-2023 by the citadel.org team
5 // This program is open source software. Use, duplication, or disclosure
6 // is subject to the terms of the GNU General Public License, version 3.
11 #include <libcitadel.h>
12 #include "modules_init.h"
13 #include "serv_extensions.h"
14 #include "ctdl_module.h"
19 int num_workers = 0; // Current number of worker threads
20 int active_workers = 0; // Number of ACTIVE worker threads
21 pthread_key_t ThreadKey;
22 pthread_mutex_t Critters[MAX_SEMAPHORES]; // Things needing locking
23 struct thread_tsd masterTSD;
24 int server_shutting_down = 0; // set to nonzero during shutdown
25 pthread_mutex_t ThreadCountMutex;
27 void InitializeSemaphores(void) {
30 // Set up a bunch of semaphores to be used for critical sections
31 for (i=0; i<MAX_SEMAPHORES; ++i) {
32 pthread_mutex_init(&Critters[i], NULL);
37 // Obtain a semaphore lock to begin a critical section, but only if no one else has one
38 int try_critical_section(int which_one) {
39 // For all types of critical sections except those listed here,
40 // ensure nobody ever tries to do a critical section within a
41 // transaction; this could lead to deadlock.
42 if ( (which_one != S_FLOORCACHE)
43 && (which_one != S_NETCONFIGS)
47 return (pthread_mutex_trylock(&Critters[which_one]));
51 // Obtain a semaphore lock to begin a critical section.
52 void begin_critical_section(int which_one) {
53 // For all types of critical sections except those listed here,
54 // ensure nobody ever tries to do a critical section within a
55 // transaction; this could lead to deadlock.
56 if ( (which_one != S_FLOORCACHE)
57 && (which_one != S_NETCONFIGS)
61 pthread_mutex_lock(&Critters[which_one]);
65 // Release a semaphore lock to end a critical section.
66 void end_critical_section(int which_one) {
67 pthread_mutex_unlock(&Critters[which_one]);
71 // Return a pointer to our thread-specific (not session-specific) data.
72 struct thread_tsd *MyThread(void) {
73 struct thread_tsd *c = (struct thread_tsd *) pthread_getspecific(ThreadKey) ;
81 // Called by CtdlThreadCreate()
82 // We have to pass through here before starting our thread in order to create a set of data
83 // that is thread-specific rather than session-specific.
84 void *CTC_backend(void *supplied_start_routine) {
85 struct thread_tsd *mytsd;
86 void *(*start_routine)(void*) = supplied_start_routine;
88 mytsd = (struct thread_tsd *) malloc(sizeof(struct thread_tsd));
89 memset(mytsd, 0, sizeof(struct thread_tsd));
90 pthread_setspecific(ThreadKey, (const void *) mytsd);
98 // Function to create a thread.
99 void CtdlThreadCreate(void *(*start_routine)(void*)) {
104 ret = pthread_attr_init(&attr);
105 ret = pthread_attr_setstacksize(&attr, THREADSTACKSIZE);
106 ret = pthread_create(&thread, &attr, CTC_backend, (void *)start_routine);
107 if (ret != 0) syslog(LOG_ERR, "pthread_create() : %m");
111 void InitializeMasterTSD(void) {
112 memset(&masterTSD, 0, sizeof(struct thread_tsd));
116 // Initialize the thread system
117 void go_threading(void) {
118 pthread_mutex_init(&ThreadCountMutex, NULL);
120 // Second call to module init functions now that threading is up
121 initialize_modules(1);
123 // Begin with one worker thread. We will expand the pool if necessary
124 CtdlThreadCreate(worker_thread);
126 // The supervisor thread monitors worker threads and spawns more of them if it finds that they are all in use.
127 while (!server_shutting_down) {
128 if ((active_workers == num_workers) && (num_workers < CtdlGetConfigInt("c_max_workers"))) {
129 CtdlThreadCreate(worker_thread);
134 // When we get to this point we are getting ready to shut down our Citadel server
135 terminate_all_sessions(); // close all client sockets
136 CtdlShutdownServiceHooks(); // close all listener sockets to prevent new connections
137 PerformSessionHooks(EVT_SHUTDOWN); // run any registered shutdown hooks
139 // We used to wait for all threads to exit. Fuck that. The only thing important is that the databases are
140 // cleanly unmounted. After that, exit the whole program.