thpool.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. /* ********************************
  2. * Author: Johan Hanssen Seferidis
  3. * License: MIT
  4. * Description: Library providing a threading pool where you can add
  5. * work. For usage, check the thpool.h file or README.md
  6. *
  7. *//** @file thpool.h *//*
  8. *
  9. ********************************/
  10. #if defined(__APPLE__)
  11. #include <AvailabilityMacros.h>
  12. #else
  13. #ifndef _POSIX_C_SOURCE
  14. #define _POSIX_C_SOURCE 200809L
  15. #endif
  16. #ifndef _XOPEN_SOURCE
  17. #define _XOPEN_SOURCE 500
  18. #endif
  19. #endif
  20. #include <unistd.h>
  21. #include <signal.h>
  22. #include <stdio.h>
  23. #include <stdlib.h>
  24. #include <pthread.h>
  25. #include <errno.h>
  26. #include <time.h>
  27. #if defined(__linux__)
  28. #include <sys/prctl.h>
  29. #endif
  30. #if defined(__FreeBSD__) || defined(__OpenBSD__)
  31. #include <pthread_np.h>
  32. #endif
  33. #include "thpool.h"
  34. #ifdef THPOOL_DEBUG
  35. #define THPOOL_DEBUG 1
  36. #else
  37. #define THPOOL_DEBUG 0
  38. #endif
  39. #if !defined(DISABLE_PRINT) || defined(THPOOL_DEBUG)
  40. #define err(str) fprintf(stderr, str)
  41. #else
  42. #define err(str)
  43. #endif
  44. #ifndef THPOOL_THREAD_NAME
  45. #define THPOOL_THREAD_NAME thpool
  46. #endif
  47. #define STRINGIFY(x) #x
  48. #define TOSTRING(x) STRINGIFY(x)
  49. static volatile int threads_keepalive;
  50. static volatile int threads_on_hold;
  51. /* ========================== STRUCTURES ============================ */
  52. /* Binary semaphore */
  53. typedef struct bsem {
  54. pthread_mutex_t mutex;
  55. pthread_cond_t cond;
  56. int v;
  57. } bsem;
  58. /* Job */
  59. typedef struct job{
  60. struct job* prev; /* pointer to previous job */
  61. void (*function)(void* arg); /* function pointer */
  62. void* arg; /* function's argument */
  63. } job;
  64. /* Job queue */
  65. typedef struct jobqueue{
  66. pthread_mutex_t rwmutex; /* used for queue r/w access */
  67. job *front; /* pointer to front of queue */
  68. job *rear; /* pointer to rear of queue */
  69. bsem *has_jobs; /* flag as binary semaphore */
  70. int len; /* number of jobs in queue */
  71. } jobqueue;
  72. /* Thread */
  73. typedef struct thread{
  74. int id; /* friendly id */
  75. pthread_t pthread; /* pointer to actual thread */
  76. struct thpool_* thpool_p; /* access to thpool */
  77. } thread;
  78. /* Threadpool */
  79. typedef struct thpool_{
  80. thread** threads; /* pointer to threads */
  81. volatile int num_threads_alive; /* threads currently alive */
  82. volatile int num_threads_working; /* threads currently working */
  83. pthread_mutex_t thcount_lock; /* used for thread count etc */
  84. pthread_cond_t threads_all_idle; /* signal to thpool_wait */
  85. jobqueue jobqueue; /* job queue */
  86. } thpool_;
  87. /* ========================== PROTOTYPES ============================ */
  88. static int thread_init(thpool_* thpool_p, struct thread** thread_p, int id);
  89. static void* thread_do(struct thread* thread_p);
  90. static void thread_hold(int sig_id);
  91. static void thread_destroy(struct thread* thread_p);
  92. static int jobqueue_init(jobqueue* jobqueue_p);
  93. static void jobqueue_clear(jobqueue* jobqueue_p);
  94. static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob_p);
  95. static struct job* jobqueue_pull(jobqueue* jobqueue_p);
  96. static void jobqueue_destroy(jobqueue* jobqueue_p);
  97. static void bsem_init(struct bsem *bsem_p, int value);
  98. static void bsem_reset(struct bsem *bsem_p);
  99. static void bsem_post(struct bsem *bsem_p);
  100. static void bsem_post_all(struct bsem *bsem_p);
  101. static void bsem_wait(struct bsem *bsem_p);
  102. /* ========================== THREADPOOL ============================ */
  103. /* Initialise thread pool */
  104. struct thpool_* thpool_init(int num_threads){
  105. threads_on_hold = 0;
  106. threads_keepalive = 1;
  107. if (num_threads < 0){
  108. num_threads = 0;
  109. }
  110. /* Make new thread pool */
  111. thpool_* thpool_p;
  112. thpool_p = (struct thpool_*)malloc(sizeof(struct thpool_));
  113. if (thpool_p == NULL){
  114. err("thpool_init(): Could not allocate memory for thread pool\n");
  115. return NULL;
  116. }
  117. thpool_p->num_threads_alive = 0;
  118. thpool_p->num_threads_working = 0;
  119. /* Initialise the job queue */
  120. if (jobqueue_init(&thpool_p->jobqueue) == -1){
  121. err("thpool_init(): Could not allocate memory for job queue\n");
  122. free(thpool_p);
  123. return NULL;
  124. }
  125. /* Make threads in pool */
  126. thpool_p->threads = (struct thread**)malloc(num_threads * sizeof(struct thread *));
  127. if (thpool_p->threads == NULL){
  128. err("thpool_init(): Could not allocate memory for threads\n");
  129. jobqueue_destroy(&thpool_p->jobqueue);
  130. free(thpool_p);
  131. return NULL;
  132. }
  133. pthread_mutex_init(&(thpool_p->thcount_lock), NULL);
  134. pthread_cond_init(&thpool_p->threads_all_idle, NULL);
  135. /* Thread init */
  136. int n;
  137. for (n=0; n<num_threads; n++){
  138. thread_init(thpool_p, &thpool_p->threads[n], n);
  139. #if THPOOL_DEBUG
  140. printf("THPOOL_DEBUG: Created thread %d in pool \n", n);
  141. #endif
  142. }
  143. /* Wait for threads to initialize */
  144. while (thpool_p->num_threads_alive != num_threads) {}
  145. return thpool_p;
  146. }
  147. /* Add work to the thread pool */
  148. int thpool_add_work(thpool_* thpool_p, void (*function_p)(void*), void* arg_p){
  149. job* newjob;
  150. newjob=(struct job*)malloc(sizeof(struct job));
  151. if (newjob==NULL){
  152. err("thpool_add_work(): Could not allocate memory for new job\n");
  153. return -1;
  154. }
  155. /* add function and argument */
  156. newjob->function=function_p;
  157. newjob->arg=arg_p;
  158. /* add job to queue */
  159. jobqueue_push(&thpool_p->jobqueue, newjob);
  160. return 0;
  161. }
  162. /* Wait until all jobs have finished */
  163. void thpool_wait(thpool_* thpool_p){
  164. pthread_mutex_lock(&thpool_p->thcount_lock);
  165. while (thpool_p->jobqueue.len || thpool_p->num_threads_working) {
  166. pthread_cond_wait(&thpool_p->threads_all_idle, &thpool_p->thcount_lock);
  167. }
  168. pthread_mutex_unlock(&thpool_p->thcount_lock);
  169. }
  170. /* Destroy the threadpool */
  171. void thpool_destroy(thpool_* thpool_p){
  172. /* No need to destroy if it's NULL */
  173. if (thpool_p == NULL) return ;
  174. volatile int threads_total = thpool_p->num_threads_alive;
  175. /* End each thread 's infinite loop */
  176. threads_keepalive = 0;
  177. /* Give one second to kill idle threads */
  178. double TIMEOUT = 1.0;
  179. time_t start, end;
  180. double tpassed = 0.0;
  181. time (&start);
  182. while (tpassed < TIMEOUT && thpool_p->num_threads_alive){
  183. bsem_post_all(thpool_p->jobqueue.has_jobs);
  184. time (&end);
  185. tpassed = difftime(end,start);
  186. }
  187. /* Poll remaining threads */
  188. while (thpool_p->num_threads_alive){
  189. bsem_post_all(thpool_p->jobqueue.has_jobs);
  190. sleep(1);
  191. }
  192. /* Job queue cleanup */
  193. jobqueue_destroy(&thpool_p->jobqueue);
  194. /* Deallocs */
  195. int n;
  196. for (n=0; n < threads_total; n++){
  197. thread_destroy(thpool_p->threads[n]);
  198. }
  199. free(thpool_p->threads);
  200. free(thpool_p);
  201. }
  202. /* Pause all threads in threadpool */
  203. void thpool_pause(thpool_* thpool_p) {
  204. int n;
  205. for (n=0; n < thpool_p->num_threads_alive; n++){
  206. pthread_kill(thpool_p->threads[n]->pthread, SIGUSR1);
  207. }
  208. }
  209. /* Resume all threads in threadpool */
  210. void thpool_resume(thpool_* thpool_p) {
  211. // resuming a single threadpool hasn't been
  212. // implemented yet, meanwhile this suppresses
  213. // the warnings
  214. (void)thpool_p;
  215. threads_on_hold = 0;
  216. }
  217. int thpool_num_threads_working(thpool_* thpool_p){
  218. return thpool_p->num_threads_working;
  219. }
  220. /* ============================ THREAD ============================== */
  221. /* Initialize a thread in the thread pool
  222. *
  223. * @param thread address to the pointer of the thread to be created
  224. * @param id id to be given to the thread
  225. * @return 0 on success, -1 otherwise.
  226. */
  227. static int thread_init (thpool_* thpool_p, struct thread** thread_p, int id){
  228. *thread_p = (struct thread*)malloc(sizeof(struct thread));
  229. if (*thread_p == NULL){
  230. err("thread_init(): Could not allocate memory for thread\n");
  231. return -1;
  232. }
  233. (*thread_p)->thpool_p = thpool_p;
  234. (*thread_p)->id = id;
  235. pthread_create(&(*thread_p)->pthread, NULL, (void * (*)(void *)) thread_do, (*thread_p));
  236. pthread_detach((*thread_p)->pthread);
  237. return 0;
  238. }
  239. /* Sets the calling thread on hold */
  240. static void thread_hold(int sig_id) {
  241. (void)sig_id;
  242. threads_on_hold = 1;
  243. while (threads_on_hold){
  244. sleep(1);
  245. }
  246. }
  247. /* What each thread is doing
  248. *
  249. * In principle this is an endless loop. The only time this loop gets interrupted is once
  250. * thpool_destroy() is invoked or the program exits.
  251. *
  252. * @param thread thread that will run this function
  253. * @return nothing
  254. */
  255. static void* thread_do(struct thread* thread_p){
  256. /* Set thread name for profiling and debugging */
  257. char thread_name[16] = {0};
  258. snprintf(thread_name, 16, TOSTRING(THPOOL_THREAD_NAME) "-%d", thread_p->id);
  259. #if defined(__linux__)
  260. /* Use prctl instead to prevent using _GNU_SOURCE flag and implicit declaration */
  261. prctl(PR_SET_NAME, thread_name);
  262. #elif defined(__APPLE__) && defined(__MACH__)
  263. pthread_setname_np(thread_name);
  264. #elif defined(__FreeBSD__) || defined(__OpenBSD__)
  265. pthread_set_name_np(thread_p->pthread, thread_name);
  266. #else
  267. err("thread_do(): pthread_setname_np is not supported on this system");
  268. #endif
  269. /* Assure all threads have been created before starting serving */
  270. thpool_* thpool_p = thread_p->thpool_p;
  271. /* Register signal handler */
  272. struct sigaction act;
  273. sigemptyset(&act.sa_mask);
  274. act.sa_flags = SA_ONSTACK;
  275. act.sa_handler = thread_hold;
  276. if (sigaction(SIGUSR1, &act, NULL) == -1) {
  277. err("thread_do(): cannot handle SIGUSR1");
  278. }
  279. /* Mark thread as alive (initialized) */
  280. pthread_mutex_lock(&thpool_p->thcount_lock);
  281. thpool_p->num_threads_alive += 1;
  282. pthread_mutex_unlock(&thpool_p->thcount_lock);
  283. while(threads_keepalive){
  284. bsem_wait(thpool_p->jobqueue.has_jobs);
  285. if (threads_keepalive){
  286. pthread_mutex_lock(&thpool_p->thcount_lock);
  287. thpool_p->num_threads_working++;
  288. pthread_mutex_unlock(&thpool_p->thcount_lock);
  289. /* Read job from queue and execute it */
  290. void (*func_buff)(void*);
  291. void* arg_buff;
  292. job* job_p = jobqueue_pull(&thpool_p->jobqueue);
  293. if (job_p) {
  294. func_buff = job_p->function;
  295. arg_buff = job_p->arg;
  296. func_buff(arg_buff);
  297. free(job_p);
  298. }
  299. pthread_mutex_lock(&thpool_p->thcount_lock);
  300. thpool_p->num_threads_working--;
  301. if (!thpool_p->num_threads_working) {
  302. pthread_cond_signal(&thpool_p->threads_all_idle);
  303. }
  304. pthread_mutex_unlock(&thpool_p->thcount_lock);
  305. }
  306. }
  307. pthread_mutex_lock(&thpool_p->thcount_lock);
  308. thpool_p->num_threads_alive --;
  309. pthread_mutex_unlock(&thpool_p->thcount_lock);
  310. return NULL;
  311. }
  312. /* Frees a thread */
  313. static void thread_destroy (thread* thread_p){
  314. free(thread_p);
  315. }
  316. /* ============================ JOB QUEUE =========================== */
  317. /* Initialize queue */
  318. static int jobqueue_init(jobqueue* jobqueue_p){
  319. jobqueue_p->len = 0;
  320. jobqueue_p->front = NULL;
  321. jobqueue_p->rear = NULL;
  322. jobqueue_p->has_jobs = (struct bsem*)malloc(sizeof(struct bsem));
  323. if (jobqueue_p->has_jobs == NULL){
  324. return -1;
  325. }
  326. pthread_mutex_init(&(jobqueue_p->rwmutex), NULL);
  327. bsem_init(jobqueue_p->has_jobs, 0);
  328. return 0;
  329. }
  330. /* Clear the queue */
  331. static void jobqueue_clear(jobqueue* jobqueue_p){
  332. while(jobqueue_p->len){
  333. free(jobqueue_pull(jobqueue_p));
  334. }
  335. jobqueue_p->front = NULL;
  336. jobqueue_p->rear = NULL;
  337. bsem_reset(jobqueue_p->has_jobs);
  338. jobqueue_p->len = 0;
  339. }
  340. /* Add (allocated) job to queue
  341. */
  342. static void jobqueue_push(jobqueue* jobqueue_p, struct job* newjob){
  343. pthread_mutex_lock(&jobqueue_p->rwmutex);
  344. newjob->prev = NULL;
  345. switch(jobqueue_p->len){
  346. case 0: /* if no jobs in queue */
  347. jobqueue_p->front = newjob;
  348. jobqueue_p->rear = newjob;
  349. break;
  350. default: /* if jobs in queue */
  351. jobqueue_p->rear->prev = newjob;
  352. jobqueue_p->rear = newjob;
  353. }
  354. jobqueue_p->len++;
  355. bsem_post(jobqueue_p->has_jobs);
  356. pthread_mutex_unlock(&jobqueue_p->rwmutex);
  357. }
  358. /* Get first job from queue(removes it from queue)
  359. * Notice: Caller MUST hold a mutex
  360. */
  361. static struct job* jobqueue_pull(jobqueue* jobqueue_p){
  362. pthread_mutex_lock(&jobqueue_p->rwmutex);
  363. job* job_p = jobqueue_p->front;
  364. switch(jobqueue_p->len){
  365. case 0: /* if no jobs in queue */
  366. break;
  367. case 1: /* if one job in queue */
  368. jobqueue_p->front = NULL;
  369. jobqueue_p->rear = NULL;
  370. jobqueue_p->len = 0;
  371. break;
  372. default: /* if >1 jobs in queue */
  373. jobqueue_p->front = job_p->prev;
  374. jobqueue_p->len--;
  375. /* more than one job in queue -> post it */
  376. bsem_post(jobqueue_p->has_jobs);
  377. }
  378. pthread_mutex_unlock(&jobqueue_p->rwmutex);
  379. return job_p;
  380. }
  381. /* Free all queue resources back to the system */
  382. static void jobqueue_destroy(jobqueue* jobqueue_p){
  383. jobqueue_clear(jobqueue_p);
  384. free(jobqueue_p->has_jobs);
  385. }
  386. /* ======================== SYNCHRONISATION ========================= */
  387. /* Init semaphore to 1 or 0 */
  388. static void bsem_init(bsem *bsem_p, int value) {
  389. if (value < 0 || value > 1) {
  390. err("bsem_init(): Binary semaphore can take only values 1 or 0");
  391. exit(1);
  392. }
  393. pthread_mutex_init(&(bsem_p->mutex), NULL);
  394. pthread_cond_init(&(bsem_p->cond), NULL);
  395. bsem_p->v = value;
  396. }
  397. /* Reset semaphore to 0 */
  398. static void bsem_reset(bsem *bsem_p) {
  399. pthread_mutex_destroy(&(bsem_p->mutex));
  400. pthread_cond_destroy(&(bsem_p->cond));
  401. bsem_init(bsem_p, 0);
  402. }
  403. /* Post to at least one thread */
  404. static void bsem_post(bsem *bsem_p) {
  405. pthread_mutex_lock(&bsem_p->mutex);
  406. bsem_p->v = 1;
  407. pthread_cond_signal(&bsem_p->cond);
  408. pthread_mutex_unlock(&bsem_p->mutex);
  409. }
  410. /* Post to all threads */
  411. static void bsem_post_all(bsem *bsem_p) {
  412. pthread_mutex_lock(&bsem_p->mutex);
  413. bsem_p->v = 1;
  414. pthread_cond_broadcast(&bsem_p->cond);
  415. pthread_mutex_unlock(&bsem_p->mutex);
  416. }
  417. /* Wait on semaphore until semaphore has value 0 */
  418. static void bsem_wait(bsem* bsem_p) {
  419. pthread_mutex_lock(&bsem_p->mutex);
  420. while (bsem_p->v != 1) {
  421. pthread_cond_wait(&bsem_p->cond, &bsem_p->mutex);
  422. }
  423. bsem_p->v = 0;
  424. pthread_mutex_unlock(&bsem_p->mutex);
  425. }