event.c 102 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020
  1. /*
  2. * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
  3. * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. The name of the author may not be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #include "event2/event-config.h"
  28. #include "evconfig-private.h"
  29. #ifdef _WIN32
  30. #include <winsock2.h>
  31. #define WIN32_LEAN_AND_MEAN
  32. #include <windows.h>
  33. #undef WIN32_LEAN_AND_MEAN
  34. #endif
  35. #include <sys/types.h>
  36. #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
  37. #include <sys/time.h>
  38. #endif
  39. #include <sys/queue.h>
  40. #ifdef EVENT__HAVE_SYS_SOCKET_H
  41. #include <sys/socket.h>
  42. #endif
  43. #include <stdio.h>
  44. #include <stdlib.h>
  45. #ifdef EVENT__HAVE_UNISTD_H
  46. #include <unistd.h>
  47. #endif
  48. #include <ctype.h>
  49. #include <errno.h>
  50. #include <signal.h>
  51. #include <string.h>
  52. #include <time.h>
  53. #include <limits.h>
  54. #ifdef EVENT__HAVE_FCNTL_H
  55. #include <fcntl.h>
  56. #endif
  57. #include "event2/event.h"
  58. #include "event2/event_struct.h"
  59. #include "event2/event_compat.h"
  60. #include "event-internal.h"
  61. #include "defer-internal.h"
  62. #include "evthread-internal.h"
  63. #include "event2/thread.h"
  64. #include "event2/util.h"
  65. #include "log-internal.h"
  66. #include "evmap-internal.h"
  67. #include "iocp-internal.h"
  68. #include "changelist-internal.h"
  69. #define HT_NO_CACHE_HASH_VALUES
  70. #include "ht-internal.h"
  71. #include "util-internal.h"
  72. #ifdef EVENT__HAVE_WORKING_KQUEUE
  73. #include "kqueue-internal.h"
  74. #endif
  75. #ifdef EVENT__HAVE_EVENT_PORTS
  76. extern const struct eventop evportops;
  77. #endif
  78. #ifdef EVENT__HAVE_SELECT
  79. extern const struct eventop selectops;
  80. #endif
  81. #ifdef EVENT__HAVE_POLL
  82. extern const struct eventop pollops;
  83. #endif
  84. #ifdef EVENT__HAVE_EPOLL
  85. extern const struct eventop epollops;
  86. #endif
  87. #ifdef EVENT__HAVE_WORKING_KQUEUE
  88. extern const struct eventop kqops;
  89. #endif
  90. #ifdef EVENT__HAVE_DEVPOLL
  91. extern const struct eventop devpollops;
  92. #endif
  93. #ifdef _WIN32
  94. extern const struct eventop win32ops;
  95. #endif
  96. /* Array of backends in order of preference. */
  97. static const struct eventop *eventops[] = {
  98. #ifdef EVENT__HAVE_EVENT_PORTS
  99. &evportops,
  100. #endif
  101. #ifdef EVENT__HAVE_WORKING_KQUEUE
  102. &kqops,
  103. #endif
  104. #ifdef EVENT__HAVE_EPOLL
  105. &epollops,
  106. #endif
  107. #ifdef EVENT__HAVE_DEVPOLL
  108. &devpollops,
  109. #endif
  110. #ifdef EVENT__HAVE_POLL
  111. &pollops,
  112. #endif
  113. #ifdef EVENT__HAVE_SELECT
  114. &selectops,
  115. #endif
  116. #ifdef _WIN32
  117. &win32ops,
  118. #endif
  119. NULL
  120. };
  121. /* Global state; deprecated */
  122. EVENT2_EXPORT_SYMBOL
  123. struct event_base *event_global_current_base_ = NULL;
  124. #define current_base event_global_current_base_
  125. /* Global state */
  126. static void *event_self_cbarg_ptr_ = NULL;
  127. /* Prototypes */
  128. static void event_queue_insert_active(struct event_base *, struct event_callback *);
  129. static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
  130. static void event_queue_insert_timeout(struct event_base *, struct event *);
  131. static void event_queue_insert_inserted(struct event_base *, struct event *);
  132. static void event_queue_remove_active(struct event_base *, struct event_callback *);
  133. static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
  134. static void event_queue_remove_timeout(struct event_base *, struct event *);
  135. static void event_queue_remove_inserted(struct event_base *, struct event *);
  136. static void event_queue_make_later_events_active(struct event_base *base);
  137. static int evthread_make_base_notifiable_nolock_(struct event_base *base);
  138. static int event_del_(struct event *ev, int blocking);
  139. #ifdef USE_REINSERT_TIMEOUT
  140. /* This code seems buggy; only turn it on if we find out what the trouble is. */
  141. static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
  142. #endif
  143. static int event_haveevents(struct event_base *);
  144. static int event_process_active(struct event_base *);
  145. static int timeout_next(struct event_base *, struct timeval **);
  146. static void timeout_process(struct event_base *);
  147. static inline void event_signal_closure(struct event_base *, struct event *ev);
  148. static inline void event_persist_closure(struct event_base *, struct event *ev);
  149. static int evthread_notify_base(struct event_base *base);
  150. static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
  151. struct event *ev);
  152. #ifndef EVENT__DISABLE_DEBUG_MODE
  153. /* These functions implement a hashtable of which 'struct event *' structures
  154. * have been setup or added. We don't want to trust the content of the struct
  155. * event itself, since we're trying to work through cases where an event gets
  156. * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
  157. */
  158. struct event_debug_entry {
  159. HT_ENTRY(event_debug_entry) node;
  160. const struct event *ptr;
  161. unsigned added : 1;
  162. };
  163. static inline unsigned
  164. hash_debug_entry(const struct event_debug_entry *e)
  165. {
  166. /* We need to do this silliness to convince compilers that we
  167. * honestly mean to cast e->ptr to an integer, and discard any
  168. * part of it that doesn't fit in an unsigned.
  169. */
  170. unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
  171. /* Our hashtable implementation is pretty sensitive to low bits,
  172. * and every struct event is over 64 bytes in size, so we can
  173. * just say >>6. */
  174. return (u >> 6);
  175. }
  176. static inline int
  177. eq_debug_entry(const struct event_debug_entry *a,
  178. const struct event_debug_entry *b)
  179. {
  180. return a->ptr == b->ptr;
  181. }
  182. int event_debug_mode_on_ = 0;
  183. #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
  184. /**
  185. * @brief debug mode variable which is set for any function/structure that needs
  186. * to be shared across threads (if thread support is enabled).
  187. *
  188. * When and if evthreads are initialized, this variable will be evaluated,
  189. * and if set to something other than zero, this means the evthread setup
  190. * functions were called out of order.
  191. *
  192. * See: "Locks and threading" in the documentation.
  193. */
  194. int event_debug_created_threadable_ctx_ = 0;
  195. #endif
  196. /* Set if it's too late to enable event_debug_mode. */
  197. static int event_debug_mode_too_late = 0;
  198. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  199. static void *event_debug_map_lock_ = NULL;
  200. #endif
  201. static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
  202. HT_INITIALIZER();
  203. HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
  204. eq_debug_entry)
  205. HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
  206. eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
  207. /* record that ev is now setup (that is, ready for an add) */
  208. static void event_debug_note_setup_(const struct event *ev)
  209. {
  210. struct event_debug_entry *dent, find;
  211. if (!event_debug_mode_on_)
  212. goto out;
  213. find.ptr = ev;
  214. EVLOCK_LOCK(event_debug_map_lock_, 0);
  215. dent = HT_FIND(event_debug_map, &global_debug_map, &find);
  216. if (dent) {
  217. dent->added = 0;
  218. } else {
  219. dent = mm_malloc(sizeof(*dent));
  220. if (!dent)
  221. event_err(1,
  222. "Out of memory in debugging code");
  223. dent->ptr = ev;
  224. dent->added = 0;
  225. HT_INSERT(event_debug_map, &global_debug_map, dent);
  226. }
  227. EVLOCK_UNLOCK(event_debug_map_lock_, 0);
  228. out:
  229. event_debug_mode_too_late = 1;
  230. }
  231. /* record that ev is no longer setup */
  232. static void event_debug_note_teardown_(const struct event *ev)
  233. {
  234. struct event_debug_entry *dent, find;
  235. if (!event_debug_mode_on_)
  236. goto out;
  237. find.ptr = ev;
  238. EVLOCK_LOCK(event_debug_map_lock_, 0);
  239. dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
  240. if (dent)
  241. mm_free(dent);
  242. EVLOCK_UNLOCK(event_debug_map_lock_, 0);
  243. out:
  244. event_debug_mode_too_late = 1;
  245. }
  246. /* Macro: record that ev is now added */
  247. static void event_debug_note_add_(const struct event *ev)
  248. {
  249. struct event_debug_entry *dent,find;
  250. if (!event_debug_mode_on_)
  251. goto out;
  252. find.ptr = ev;
  253. EVLOCK_LOCK(event_debug_map_lock_, 0);
  254. dent = HT_FIND(event_debug_map, &global_debug_map, &find);
  255. if (dent) {
  256. dent->added = 1;
  257. } else {
  258. event_errx(EVENT_ERR_ABORT_,
  259. "%s: noting an add on a non-setup event %p"
  260. " (events: 0x%x, fd: "EV_SOCK_FMT
  261. ", flags: 0x%x)",
  262. __func__, ev, ev->ev_events,
  263. EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
  264. }
  265. EVLOCK_UNLOCK(event_debug_map_lock_, 0);
  266. out:
  267. event_debug_mode_too_late = 1;
  268. }
  269. /* record that ev is no longer added */
  270. static void event_debug_note_del_(const struct event *ev)
  271. {
  272. struct event_debug_entry *dent, find;
  273. if (!event_debug_mode_on_)
  274. goto out;
  275. find.ptr = ev;
  276. EVLOCK_LOCK(event_debug_map_lock_, 0);
  277. dent = HT_FIND(event_debug_map, &global_debug_map, &find);
  278. if (dent) {
  279. dent->added = 0;
  280. } else {
  281. event_errx(EVENT_ERR_ABORT_,
  282. "%s: noting a del on a non-setup event %p"
  283. " (events: 0x%x, fd: "EV_SOCK_FMT
  284. ", flags: 0x%x)",
  285. __func__, ev, ev->ev_events,
  286. EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
  287. }
  288. EVLOCK_UNLOCK(event_debug_map_lock_, 0);
  289. out:
  290. event_debug_mode_too_late = 1;
  291. }
  292. /* assert that ev is setup (i.e., okay to add or inspect) */
  293. static void event_debug_assert_is_setup_(const struct event *ev)
  294. {
  295. struct event_debug_entry *dent, find;
  296. if (!event_debug_mode_on_)
  297. return;
  298. find.ptr = ev;
  299. EVLOCK_LOCK(event_debug_map_lock_, 0);
  300. dent = HT_FIND(event_debug_map, &global_debug_map, &find);
  301. if (!dent) {
  302. event_errx(EVENT_ERR_ABORT_,
  303. "%s called on a non-initialized event %p"
  304. " (events: 0x%x, fd: "EV_SOCK_FMT
  305. ", flags: 0x%x)",
  306. __func__, ev, ev->ev_events,
  307. EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
  308. }
  309. EVLOCK_UNLOCK(event_debug_map_lock_, 0);
  310. }
  311. /* assert that ev is not added (i.e., okay to tear down or set up again) */
  312. static void event_debug_assert_not_added_(const struct event *ev)
  313. {
  314. struct event_debug_entry *dent, find;
  315. if (!event_debug_mode_on_)
  316. return;
  317. find.ptr = ev;
  318. EVLOCK_LOCK(event_debug_map_lock_, 0);
  319. dent = HT_FIND(event_debug_map, &global_debug_map, &find);
  320. if (dent && dent->added) {
  321. event_errx(EVENT_ERR_ABORT_,
  322. "%s called on an already added event %p"
  323. " (events: 0x%x, fd: "EV_SOCK_FMT", "
  324. "flags: 0x%x)",
  325. __func__, ev, ev->ev_events,
  326. EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
  327. }
  328. EVLOCK_UNLOCK(event_debug_map_lock_, 0);
  329. }
  330. static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
  331. {
  332. if (!event_debug_mode_on_)
  333. return;
  334. if (fd < 0)
  335. return;
  336. #ifndef _WIN32
  337. {
  338. int flags;
  339. if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
  340. EVUTIL_ASSERT(flags & O_NONBLOCK);
  341. }
  342. }
  343. #endif
  344. }
  345. #else
  346. static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
  347. static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
  348. static void event_debug_note_add_(const struct event *ev) { (void)ev; }
  349. static void event_debug_note_del_(const struct event *ev) { (void)ev; }
  350. static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
  351. static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
  352. static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
  353. #endif
  354. #define EVENT_BASE_ASSERT_LOCKED(base) \
  355. EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
  356. /* How often (in seconds) do we check for changes in wall clock time relative
  357. * to monotonic time? Set this to -1 for 'never.' */
  358. #define CLOCK_SYNC_INTERVAL 5
  359. /** Set 'tp' to the current time according to 'base'. We must hold the lock
  360. * on 'base'. If there is a cached time, return it. Otherwise, use
  361. * clock_gettime or gettimeofday as appropriate to find out the right time.
  362. * Return 0 on success, -1 on failure.
  363. */
  364. static int
  365. gettime(struct event_base *base, struct timeval *tp)
  366. {
  367. EVENT_BASE_ASSERT_LOCKED(base);
  368. if (base->tv_cache.tv_sec) {
  369. *tp = base->tv_cache;
  370. return (0);
  371. }
  372. if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
  373. return -1;
  374. }
  375. if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
  376. < tp->tv_sec) {
  377. struct timeval tv;
  378. evutil_gettimeofday(&tv,NULL);
  379. evutil_timersub(&tv, tp, &base->tv_clock_diff);
  380. base->last_updated_clock_diff = tp->tv_sec;
  381. }
  382. return 0;
  383. }
  384. int
  385. event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
  386. {
  387. int r;
  388. if (!base) {
  389. base = current_base;
  390. if (!current_base)
  391. return evutil_gettimeofday(tv, NULL);
  392. }
  393. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  394. if (base->tv_cache.tv_sec == 0) {
  395. r = evutil_gettimeofday(tv, NULL);
  396. } else {
  397. evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
  398. r = 0;
  399. }
  400. EVBASE_RELEASE_LOCK(base, th_base_lock);
  401. return r;
  402. }
  403. /** Make 'base' have no current cached time. */
  404. static inline void
  405. clear_time_cache(struct event_base *base)
  406. {
  407. base->tv_cache.tv_sec = 0;
  408. }
  409. /** Replace the cached time in 'base' with the current time. */
  410. static inline void
  411. update_time_cache(struct event_base *base)
  412. {
  413. base->tv_cache.tv_sec = 0;
  414. if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
  415. gettime(base, &base->tv_cache);
  416. }
  417. int
  418. event_base_update_cache_time(struct event_base *base)
  419. {
  420. if (!base) {
  421. base = current_base;
  422. if (!current_base)
  423. return -1;
  424. }
  425. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  426. if (base->running_loop)
  427. update_time_cache(base);
  428. EVBASE_RELEASE_LOCK(base, th_base_lock);
  429. return 0;
  430. }
  431. static inline struct event *
  432. event_callback_to_event(struct event_callback *evcb)
  433. {
  434. EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
  435. return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
  436. }
  437. static inline struct event_callback *
  438. event_to_event_callback(struct event *ev)
  439. {
  440. return &ev->ev_evcallback;
  441. }
  442. struct event_base *
  443. event_init(void)
  444. {
  445. struct event_base *base = event_base_new_with_config(NULL);
  446. if (base == NULL) {
  447. event_errx(1, "%s: Unable to construct event_base", __func__);
  448. return NULL;
  449. }
  450. current_base = base;
  451. return (base);
  452. }
  453. struct event_base *
  454. event_base_new(void)
  455. {
  456. struct event_base *base = NULL;
  457. struct event_config *cfg = event_config_new();
  458. if (cfg) {
  459. base = event_base_new_with_config(cfg);
  460. event_config_free(cfg);
  461. }
  462. return base;
  463. }
  464. /** Return true iff 'method' is the name of a method that 'cfg' tells us to
  465. * avoid. */
  466. static int
  467. event_config_is_avoided_method(const struct event_config *cfg,
  468. const char *method)
  469. {
  470. struct event_config_entry *entry;
  471. TAILQ_FOREACH(entry, &cfg->entries, next) {
  472. if (entry->avoid_method != NULL &&
  473. strcmp(entry->avoid_method, method) == 0)
  474. return (1);
  475. }
  476. return (0);
  477. }
  478. /** Return true iff 'method' is disabled according to the environment. */
  479. static int
  480. event_is_method_disabled(const char *name)
  481. {
  482. char environment[64];
  483. int i;
  484. evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
  485. for (i = 8; environment[i] != '\0'; ++i)
  486. environment[i] = EVUTIL_TOUPPER_(environment[i]);
  487. /* Note that evutil_getenv_() ignores the environment entirely if
  488. * we're setuid */
  489. return (evutil_getenv_(environment) != NULL);
  490. }
  491. int
  492. event_base_get_features(const struct event_base *base)
  493. {
  494. return base->evsel->features;
  495. }
  496. void
  497. event_enable_debug_mode(void)
  498. {
  499. #ifndef EVENT__DISABLE_DEBUG_MODE
  500. if (event_debug_mode_on_)
  501. event_errx(1, "%s was called twice!", __func__);
  502. if (event_debug_mode_too_late)
  503. event_errx(1, "%s must be called *before* creating any events "
  504. "or event_bases",__func__);
  505. event_debug_mode_on_ = 1;
  506. HT_INIT(event_debug_map, &global_debug_map);
  507. #endif
  508. }
  509. void
  510. event_disable_debug_mode(void)
  511. {
  512. #ifndef EVENT__DISABLE_DEBUG_MODE
  513. struct event_debug_entry **ent, *victim;
  514. EVLOCK_LOCK(event_debug_map_lock_, 0);
  515. for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
  516. victim = *ent;
  517. ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
  518. mm_free(victim);
  519. }
  520. HT_CLEAR(event_debug_map, &global_debug_map);
  521. EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
  522. event_debug_mode_on_ = 0;
  523. #endif
  524. }
  525. struct event_base *
  526. event_base_new_with_config(const struct event_config *cfg)
  527. {
  528. int i;
  529. struct event_base *base;
  530. int should_check_environment;
  531. #ifndef EVENT__DISABLE_DEBUG_MODE
  532. event_debug_mode_too_late = 1;
  533. #endif
  534. if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
  535. event_warn("%s: calloc", __func__);
  536. return NULL;
  537. }
  538. if (cfg)
  539. base->flags = cfg->flags;
  540. should_check_environment =
  541. !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
  542. {
  543. struct timeval tmp;
  544. int precise_time =
  545. cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
  546. int flags;
  547. if (should_check_environment && !precise_time) {
  548. precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
  549. if (precise_time) {
  550. base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
  551. }
  552. }
  553. flags = precise_time ? EV_MONOT_PRECISE : 0;
  554. evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
  555. gettime(base, &tmp);
  556. }
  557. min_heap_ctor_(&base->timeheap);
  558. base->sig.ev_signal_pair[0] = -1;
  559. base->sig.ev_signal_pair[1] = -1;
  560. base->th_notify_fd[0] = -1;
  561. base->th_notify_fd[1] = -1;
  562. TAILQ_INIT(&base->active_later_queue);
  563. evmap_io_initmap_(&base->io);
  564. evmap_signal_initmap_(&base->sigmap);
  565. event_changelist_init_(&base->changelist);
  566. base->evbase = NULL;
  567. if (cfg) {
  568. memcpy(&base->max_dispatch_time,
  569. &cfg->max_dispatch_interval, sizeof(struct timeval));
  570. base->limit_callbacks_after_prio =
  571. cfg->limit_callbacks_after_prio;
  572. } else {
  573. base->max_dispatch_time.tv_sec = -1;
  574. base->limit_callbacks_after_prio = 1;
  575. }
  576. if (cfg && cfg->max_dispatch_callbacks >= 0) {
  577. base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
  578. } else {
  579. base->max_dispatch_callbacks = INT_MAX;
  580. }
  581. if (base->max_dispatch_callbacks == INT_MAX &&
  582. base->max_dispatch_time.tv_sec == -1)
  583. base->limit_callbacks_after_prio = INT_MAX;
  584. for (i = 0; eventops[i] && !base->evbase; i++) {
  585. if (cfg != NULL) {
  586. /* determine if this backend should be avoided */
  587. if (event_config_is_avoided_method(cfg,
  588. eventops[i]->name))
  589. continue;
  590. if ((eventops[i]->features & cfg->require_features)
  591. != cfg->require_features)
  592. continue;
  593. }
  594. /* also obey the environment variables */
  595. if (should_check_environment &&
  596. event_is_method_disabled(eventops[i]->name))
  597. continue;
  598. base->evsel = eventops[i];
  599. base->evbase = base->evsel->init(base);
  600. }
  601. if (base->evbase == NULL) {
  602. event_warnx("%s: no event mechanism available",
  603. __func__);
  604. base->evsel = NULL;
  605. event_base_free(base);
  606. return NULL;
  607. }
  608. if (evutil_getenv_("EVENT_SHOW_METHOD"))
  609. event_msgx("libevent using: %s", base->evsel->name);
  610. /* allocate a single active event queue */
  611. if (event_base_priority_init(base, 1) < 0) {
  612. event_base_free(base);
  613. return NULL;
  614. }
  615. /* prepare for threading */
  616. #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
  617. event_debug_created_threadable_ctx_ = 1;
  618. #endif
  619. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  620. if (EVTHREAD_LOCKING_ENABLED() &&
  621. (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
  622. int r;
  623. EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
  624. EVTHREAD_ALLOC_COND(base->current_event_cond);
  625. r = evthread_make_base_notifiable(base);
  626. if (r<0) {
  627. event_warnx("%s: Unable to make base notifiable.", __func__);
  628. event_base_free(base);
  629. return NULL;
  630. }
  631. }
  632. #endif
  633. #ifdef _WIN32
  634. if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
  635. event_base_start_iocp_(base, cfg->n_cpus_hint);
  636. #endif
  637. return (base);
  638. }
  639. int
  640. event_base_start_iocp_(struct event_base *base, int n_cpus)
  641. {
  642. #ifdef _WIN32
  643. if (base->iocp)
  644. return 0;
  645. base->iocp = event_iocp_port_launch_(n_cpus);
  646. if (!base->iocp) {
  647. event_warnx("%s: Couldn't launch IOCP", __func__);
  648. return -1;
  649. }
  650. return 0;
  651. #else
  652. return -1;
  653. #endif
  654. }
  655. void
  656. event_base_stop_iocp_(struct event_base *base)
  657. {
  658. #ifdef _WIN32
  659. int rv;
  660. if (!base->iocp)
  661. return;
  662. rv = event_iocp_shutdown_(base->iocp, -1);
  663. EVUTIL_ASSERT(rv >= 0);
  664. base->iocp = NULL;
  665. #endif
  666. }
  667. static int
  668. event_base_cancel_single_callback_(struct event_base *base,
  669. struct event_callback *evcb,
  670. int run_finalizers)
  671. {
  672. int result = 0;
  673. if (evcb->evcb_flags & EVLIST_INIT) {
  674. struct event *ev = event_callback_to_event(evcb);
  675. if (!(ev->ev_flags & EVLIST_INTERNAL)) {
  676. event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
  677. result = 1;
  678. }
  679. } else {
  680. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  681. event_callback_cancel_nolock_(base, evcb, 1);
  682. EVBASE_RELEASE_LOCK(base, th_base_lock);
  683. result = 1;
  684. }
  685. if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
  686. switch (evcb->evcb_closure) {
  687. case EV_CLOSURE_EVENT_FINALIZE:
  688. case EV_CLOSURE_EVENT_FINALIZE_FREE: {
  689. struct event *ev = event_callback_to_event(evcb);
  690. ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
  691. if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
  692. mm_free(ev);
  693. break;
  694. }
  695. case EV_CLOSURE_CB_FINALIZE:
  696. evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
  697. break;
  698. default:
  699. break;
  700. }
  701. }
  702. return result;
  703. }
  704. static int event_base_free_queues_(struct event_base *base, int run_finalizers)
  705. {
  706. int deleted = 0, i;
  707. for (i = 0; i < base->nactivequeues; ++i) {
  708. struct event_callback *evcb, *next;
  709. for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
  710. next = TAILQ_NEXT(evcb, evcb_active_next);
  711. deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
  712. evcb = next;
  713. }
  714. }
  715. {
  716. struct event_callback *evcb;
  717. while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
  718. deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
  719. }
  720. }
  721. return deleted;
  722. }
  723. static void
  724. event_base_free_(struct event_base *base, int run_finalizers)
  725. {
  726. int i, n_deleted=0;
  727. struct event *ev;
  728. /* XXXX grab the lock? If there is contention when one thread frees
  729. * the base, then the contending thread will be very sad soon. */
  730. /* event_base_free(NULL) is how to free the current_base if we
  731. * made it with event_init and forgot to hold a reference to it. */
  732. if (base == NULL && current_base)
  733. base = current_base;
  734. /* Don't actually free NULL. */
  735. if (base == NULL) {
  736. event_warnx("%s: no base to free", __func__);
  737. return;
  738. }
  739. /* XXX(niels) - check for internal events first */
  740. #ifdef _WIN32
  741. event_base_stop_iocp_(base);
  742. #endif
  743. /* threading fds if we have them */
  744. if (base->th_notify_fd[0] != -1) {
  745. event_del(&base->th_notify);
  746. EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
  747. if (base->th_notify_fd[1] != -1)
  748. EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
  749. base->th_notify_fd[0] = -1;
  750. base->th_notify_fd[1] = -1;
  751. event_debug_unassign(&base->th_notify);
  752. }
  753. /* Delete all non-internal events. */
  754. evmap_delete_all_(base);
  755. while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
  756. event_del(ev);
  757. ++n_deleted;
  758. }
  759. for (i = 0; i < base->n_common_timeouts; ++i) {
  760. struct common_timeout_list *ctl =
  761. base->common_timeout_queues[i];
  762. event_del(&ctl->timeout_event); /* Internal; doesn't count */
  763. event_debug_unassign(&ctl->timeout_event);
  764. for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
  765. struct event *next = TAILQ_NEXT(ev,
  766. ev_timeout_pos.ev_next_with_common_timeout);
  767. if (!(ev->ev_flags & EVLIST_INTERNAL)) {
  768. event_del(ev);
  769. ++n_deleted;
  770. }
  771. ev = next;
  772. }
  773. mm_free(ctl);
  774. }
  775. if (base->common_timeout_queues)
  776. mm_free(base->common_timeout_queues);
  777. for (;;) {
  778. /* For finalizers we can register yet another finalizer out from
  779. * finalizer, and iff finalizer will be in active_later_queue we can
  780. * add finalizer to activequeues, and we will have events in
  781. * activequeues after this function returns, which is not what we want
  782. * (we even have an assertion for this).
  783. *
  784. * A simple case is bufferevent with underlying (i.e. filters).
  785. */
  786. int i = event_base_free_queues_(base, run_finalizers);
  787. event_debug(("%s: %d events freed", __func__, i));
  788. if (!i) {
  789. break;
  790. }
  791. n_deleted += i;
  792. }
  793. if (n_deleted)
  794. event_debug(("%s: %d events were still set in base",
  795. __func__, n_deleted));
  796. while (LIST_FIRST(&base->once_events)) {
  797. struct event_once *eonce = LIST_FIRST(&base->once_events);
  798. LIST_REMOVE(eonce, next_once);
  799. mm_free(eonce);
  800. }
  801. if (base->evsel != NULL && base->evsel->dealloc != NULL)
  802. base->evsel->dealloc(base);
  803. for (i = 0; i < base->nactivequeues; ++i)
  804. EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
  805. EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
  806. min_heap_dtor_(&base->timeheap);
  807. mm_free(base->activequeues);
  808. evmap_io_clear_(&base->io);
  809. evmap_signal_clear_(&base->sigmap);
  810. event_changelist_freemem_(&base->changelist);
  811. EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
  812. EVTHREAD_FREE_COND(base->current_event_cond);
  813. /* If we're freeing current_base, there won't be a current_base. */
  814. if (base == current_base)
  815. current_base = NULL;
  816. mm_free(base);
  817. }
  818. void
  819. event_base_free_nofinalize(struct event_base *base)
  820. {
  821. event_base_free_(base, 0);
  822. }
  823. void
  824. event_base_free(struct event_base *base)
  825. {
  826. event_base_free_(base, 1);
  827. }
  828. /* Fake eventop; used to disable the backend temporarily inside event_reinit
  829. * so that we can call event_del() on an event without telling the backend.
  830. */
  831. static int
  832. nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
  833. short events, void *fdinfo)
  834. {
  835. return 0;
  836. }
  837. const struct eventop nil_eventop = {
  838. "nil",
  839. NULL, /* init: unused. */
  840. NULL, /* add: unused. */
  841. nil_backend_del, /* del: used, so needs to be killed. */
  842. NULL, /* dispatch: unused. */
  843. NULL, /* dealloc: unused. */
  844. 0, 0, 0
  845. };
  846. /* reinitialize the event base after a fork */
  847. int
  848. event_reinit(struct event_base *base)
  849. {
  850. const struct eventop *evsel;
  851. int res = 0;
  852. int was_notifiable = 0;
  853. int had_signal_added = 0;
  854. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  855. evsel = base->evsel;
  856. /* check if this event mechanism requires reinit on the backend */
  857. if (evsel->need_reinit) {
  858. /* We're going to call event_del() on our notify events (the
  859. * ones that tell about signals and wakeup events). But we
  860. * don't actually want to tell the backend to change its
  861. * state, since it might still share some resource (a kqueue,
  862. * an epoll fd) with the parent process, and we don't want to
  863. * delete the fds from _that_ backend, we temporarily stub out
  864. * the evsel with a replacement.
  865. */
  866. base->evsel = &nil_eventop;
  867. }
  868. /* We need to re-create a new signal-notification fd and a new
  869. * thread-notification fd. Otherwise, we'll still share those with
  870. * the parent process, which would make any notification sent to them
  871. * get received by one or both of the event loops, more or less at
  872. * random.
  873. */
  874. if (base->sig.ev_signal_added) {
  875. event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
  876. event_debug_unassign(&base->sig.ev_signal);
  877. memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
  878. had_signal_added = 1;
  879. base->sig.ev_signal_added = 0;
  880. }
  881. if (base->sig.ev_signal_pair[0] != -1)
  882. EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
  883. if (base->sig.ev_signal_pair[1] != -1)
  884. EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
  885. if (base->th_notify_fn != NULL) {
  886. was_notifiable = 1;
  887. base->th_notify_fn = NULL;
  888. }
  889. if (base->th_notify_fd[0] != -1) {
  890. event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
  891. EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
  892. if (base->th_notify_fd[1] != -1)
  893. EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
  894. base->th_notify_fd[0] = -1;
  895. base->th_notify_fd[1] = -1;
  896. event_debug_unassign(&base->th_notify);
  897. }
  898. /* Replace the original evsel. */
  899. base->evsel = evsel;
  900. if (evsel->need_reinit) {
  901. /* Reconstruct the backend through brute-force, so that we do
  902. * not share any structures with the parent process. For some
  903. * backends, this is necessary: epoll and kqueue, for
  904. * instance, have events associated with a kernel
  905. * structure. If didn't reinitialize, we'd share that
  906. * structure with the parent process, and any changes made by
  907. * the parent would affect our backend's behavior (and vice
  908. * versa).
  909. */
  910. if (base->evsel->dealloc != NULL)
  911. base->evsel->dealloc(base);
  912. base->evbase = evsel->init(base);
  913. if (base->evbase == NULL) {
  914. event_errx(1,
  915. "%s: could not reinitialize event mechanism",
  916. __func__);
  917. res = -1;
  918. goto done;
  919. }
  920. /* Empty out the changelist (if any): we are starting from a
  921. * blank slate. */
  922. event_changelist_freemem_(&base->changelist);
  923. /* Tell the event maps to re-inform the backend about all
  924. * pending events. This will make the signal notification
  925. * event get re-created if necessary. */
  926. if (evmap_reinit_(base) < 0)
  927. res = -1;
  928. } else {
  929. res = evsig_init_(base);
  930. if (res == 0 && had_signal_added) {
  931. res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
  932. if (res == 0)
  933. base->sig.ev_signal_added = 1;
  934. }
  935. }
  936. /* If we were notifiable before, and nothing just exploded, become
  937. * notifiable again. */
  938. if (was_notifiable && res == 0)
  939. res = evthread_make_base_notifiable_nolock_(base);
  940. done:
  941. EVBASE_RELEASE_LOCK(base, th_base_lock);
  942. return (res);
  943. }
  944. /* Get the monotonic time for this event_base' timer */
  945. int
  946. event_gettime_monotonic(struct event_base *base, struct timeval *tv)
  947. {
  948. int rv = -1;
  949. if (base && tv) {
  950. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  951. rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
  952. EVBASE_RELEASE_LOCK(base, th_base_lock);
  953. }
  954. return rv;
  955. }
  956. const char **
  957. event_get_supported_methods(void)
  958. {
  959. static const char **methods = NULL;
  960. const struct eventop **method;
  961. const char **tmp;
  962. int i = 0, k;
  963. /* count all methods */
  964. for (method = &eventops[0]; *method != NULL; ++method) {
  965. ++i;
  966. }
  967. /* allocate one more than we need for the NULL pointer */
  968. tmp = mm_calloc((i + 1), sizeof(char *));
  969. if (tmp == NULL)
  970. return (NULL);
  971. /* populate the array with the supported methods */
  972. for (k = 0, i = 0; eventops[k] != NULL; ++k) {
  973. tmp[i++] = eventops[k]->name;
  974. }
  975. tmp[i] = NULL;
  976. if (methods != NULL)
  977. mm_free((char**)methods);
  978. methods = tmp;
  979. return (methods);
  980. }
  981. struct event_config *
  982. event_config_new(void)
  983. {
  984. struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
  985. if (cfg == NULL)
  986. return (NULL);
  987. TAILQ_INIT(&cfg->entries);
  988. cfg->max_dispatch_interval.tv_sec = -1;
  989. cfg->max_dispatch_callbacks = INT_MAX;
  990. cfg->limit_callbacks_after_prio = 1;
  991. return (cfg);
  992. }
  993. static void
  994. event_config_entry_free(struct event_config_entry *entry)
  995. {
  996. if (entry->avoid_method != NULL)
  997. mm_free((char *)entry->avoid_method);
  998. mm_free(entry);
  999. }
  1000. void
  1001. event_config_free(struct event_config *cfg)
  1002. {
  1003. struct event_config_entry *entry;
  1004. while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
  1005. TAILQ_REMOVE(&cfg->entries, entry, next);
  1006. event_config_entry_free(entry);
  1007. }
  1008. mm_free(cfg);
  1009. }
  1010. int
  1011. event_config_set_flag(struct event_config *cfg, int flag)
  1012. {
  1013. if (!cfg)
  1014. return -1;
  1015. cfg->flags |= flag;
  1016. return 0;
  1017. }
  1018. int
  1019. event_config_avoid_method(struct event_config *cfg, const char *method)
  1020. {
  1021. struct event_config_entry *entry = mm_malloc(sizeof(*entry));
  1022. if (entry == NULL)
  1023. return (-1);
  1024. if ((entry->avoid_method = mm_strdup(method)) == NULL) {
  1025. mm_free(entry);
  1026. return (-1);
  1027. }
  1028. TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
  1029. return (0);
  1030. }
  1031. int
  1032. event_config_require_features(struct event_config *cfg,
  1033. int features)
  1034. {
  1035. if (!cfg)
  1036. return (-1);
  1037. cfg->require_features = features;
  1038. return (0);
  1039. }
  1040. int
  1041. event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
  1042. {
  1043. if (!cfg)
  1044. return (-1);
  1045. cfg->n_cpus_hint = cpus;
  1046. return (0);
  1047. }
  1048. int
  1049. event_config_set_max_dispatch_interval(struct event_config *cfg,
  1050. const struct timeval *max_interval, int max_callbacks, int min_priority)
  1051. {
  1052. if (max_interval)
  1053. memcpy(&cfg->max_dispatch_interval, max_interval,
  1054. sizeof(struct timeval));
  1055. else
  1056. cfg->max_dispatch_interval.tv_sec = -1;
  1057. cfg->max_dispatch_callbacks =
  1058. max_callbacks >= 0 ? max_callbacks : INT_MAX;
  1059. if (min_priority < 0)
  1060. min_priority = 0;
  1061. cfg->limit_callbacks_after_prio = min_priority;
  1062. return (0);
  1063. }
  1064. int
  1065. event_priority_init(int npriorities)
  1066. {
  1067. return event_base_priority_init(current_base, npriorities);
  1068. }
  1069. int
  1070. event_base_priority_init(struct event_base *base, int npriorities)
  1071. {
  1072. int i, r;
  1073. r = -1;
  1074. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1075. if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
  1076. || npriorities >= EVENT_MAX_PRIORITIES)
  1077. goto err;
  1078. if (npriorities == base->nactivequeues)
  1079. goto ok;
  1080. if (base->nactivequeues) {
  1081. mm_free(base->activequeues);
  1082. base->nactivequeues = 0;
  1083. }
  1084. /* Allocate our priority queues */
  1085. base->activequeues = (struct evcallback_list *)
  1086. mm_calloc(npriorities, sizeof(struct evcallback_list));
  1087. if (base->activequeues == NULL) {
  1088. event_warn("%s: calloc", __func__);
  1089. goto err;
  1090. }
  1091. base->nactivequeues = npriorities;
  1092. for (i = 0; i < base->nactivequeues; ++i) {
  1093. TAILQ_INIT(&base->activequeues[i]);
  1094. }
  1095. ok:
  1096. r = 0;
  1097. err:
  1098. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1099. return (r);
  1100. }
  1101. int
  1102. event_base_get_npriorities(struct event_base *base)
  1103. {
  1104. int n;
  1105. if (base == NULL)
  1106. base = current_base;
  1107. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1108. n = base->nactivequeues;
  1109. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1110. return (n);
  1111. }
  1112. int
  1113. event_base_get_num_events(struct event_base *base, unsigned int type)
  1114. {
  1115. int r = 0;
  1116. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1117. if (type & EVENT_BASE_COUNT_ACTIVE)
  1118. r += base->event_count_active;
  1119. if (type & EVENT_BASE_COUNT_VIRTUAL)
  1120. r += base->virtual_event_count;
  1121. if (type & EVENT_BASE_COUNT_ADDED)
  1122. r += base->event_count;
  1123. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1124. return r;
  1125. }
  1126. int
  1127. event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
  1128. {
  1129. int r = 0;
  1130. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1131. if (type & EVENT_BASE_COUNT_ACTIVE) {
  1132. r += base->event_count_active_max;
  1133. if (clear)
  1134. base->event_count_active_max = 0;
  1135. }
  1136. if (type & EVENT_BASE_COUNT_VIRTUAL) {
  1137. r += base->virtual_event_count_max;
  1138. if (clear)
  1139. base->virtual_event_count_max = 0;
  1140. }
  1141. if (type & EVENT_BASE_COUNT_ADDED) {
  1142. r += base->event_count_max;
  1143. if (clear)
  1144. base->event_count_max = 0;
  1145. }
  1146. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1147. return r;
  1148. }
  1149. /* Returns true iff we're currently watching any events. */
  1150. static int
  1151. event_haveevents(struct event_base *base)
  1152. {
  1153. /* Caller must hold th_base_lock */
  1154. return (base->virtual_event_count > 0 || base->event_count > 0);
  1155. }
  1156. /* "closure" function called when processing active signal events */
  1157. static inline void
  1158. event_signal_closure(struct event_base *base, struct event *ev)
  1159. {
  1160. short ncalls;
  1161. int should_break;
  1162. /* Allows deletes to work */
  1163. ncalls = ev->ev_ncalls;
  1164. if (ncalls != 0)
  1165. ev->ev_pncalls = &ncalls;
  1166. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1167. while (ncalls) {
  1168. ncalls--;
  1169. ev->ev_ncalls = ncalls;
  1170. if (ncalls == 0)
  1171. ev->ev_pncalls = NULL;
  1172. (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
  1173. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1174. should_break = base->event_break;
  1175. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1176. if (should_break) {
  1177. if (ncalls != 0)
  1178. ev->ev_pncalls = NULL;
  1179. return;
  1180. }
  1181. }
  1182. }
  1183. /* Common timeouts are special timeouts that are handled as queues rather than
  1184. * in the minheap. This is more efficient than the minheap if we happen to
  1185. * know that we're going to get several thousands of timeout events all with
  1186. * the same timeout value.
  1187. *
  1188. * Since all our timeout handling code assumes timevals can be copied,
  1189. * assigned, etc, we can't use "magic pointer" to encode these common
  1190. * timeouts. Searching through a list to see if every timeout is common could
  1191. * also get inefficient. Instead, we take advantage of the fact that tv_usec
  1192. * is 32 bits long, but only uses 20 of those bits (since it can never be over
  1193. * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
  1194. * of index into the event_base's aray of common timeouts.
  1195. */
  1196. #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
  1197. #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
  1198. #define COMMON_TIMEOUT_IDX_SHIFT 20
  1199. #define COMMON_TIMEOUT_MASK 0xf0000000
  1200. #define COMMON_TIMEOUT_MAGIC 0x50000000
  1201. #define COMMON_TIMEOUT_IDX(tv) \
  1202. (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
  1203. /** Return true iff if 'tv' is a common timeout in 'base' */
  1204. static inline int
  1205. is_common_timeout(const struct timeval *tv,
  1206. const struct event_base *base)
  1207. {
  1208. int idx;
  1209. if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
  1210. return 0;
  1211. idx = COMMON_TIMEOUT_IDX(tv);
  1212. return idx < base->n_common_timeouts;
  1213. }
  1214. /* True iff tv1 and tv2 have the same common-timeout index, or if neither
  1215. * one is a common timeout. */
  1216. static inline int
  1217. is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
  1218. {
  1219. return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
  1220. (tv2->tv_usec & ~MICROSECONDS_MASK);
  1221. }
  1222. /** Requires that 'tv' is a common timeout. Return the corresponding
  1223. * common_timeout_list. */
  1224. static inline struct common_timeout_list *
  1225. get_common_timeout_list(struct event_base *base, const struct timeval *tv)
  1226. {
  1227. return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
  1228. }
  1229. #if 0
  1230. static inline int
  1231. common_timeout_ok(const struct timeval *tv,
  1232. struct event_base *base)
  1233. {
  1234. const struct timeval *expect =
  1235. &get_common_timeout_list(base, tv)->duration;
  1236. return tv->tv_sec == expect->tv_sec &&
  1237. tv->tv_usec == expect->tv_usec;
  1238. }
  1239. #endif
  1240. /* Add the timeout for the first event in given common timeout list to the
  1241. * event_base's minheap. */
  1242. static void
  1243. common_timeout_schedule(struct common_timeout_list *ctl,
  1244. const struct timeval *now, struct event *head)
  1245. {
  1246. struct timeval timeout = head->ev_timeout;
  1247. timeout.tv_usec &= MICROSECONDS_MASK;
  1248. event_add_nolock_(&ctl->timeout_event, &timeout, 1);
  1249. }
  1250. /* Callback: invoked when the timeout for a common timeout queue triggers.
  1251. * This means that (at least) the first event in that queue should be run,
  1252. * and the timeout should be rescheduled if there are more events. */
  1253. static void
  1254. common_timeout_callback(evutil_socket_t fd, short what, void *arg)
  1255. {
  1256. struct timeval now;
  1257. struct common_timeout_list *ctl = arg;
  1258. struct event_base *base = ctl->base;
  1259. struct event *ev = NULL;
  1260. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1261. gettime(base, &now);
  1262. while (1) {
  1263. ev = TAILQ_FIRST(&ctl->events);
  1264. if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
  1265. (ev->ev_timeout.tv_sec == now.tv_sec &&
  1266. (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
  1267. break;
  1268. event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
  1269. event_active_nolock_(ev, EV_TIMEOUT, 1);
  1270. }
  1271. if (ev)
  1272. common_timeout_schedule(ctl, &now, ev);
  1273. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1274. }
  1275. #define MAX_COMMON_TIMEOUTS 256
  1276. const struct timeval *
  1277. event_base_init_common_timeout(struct event_base *base,
  1278. const struct timeval *duration)
  1279. {
  1280. int i;
  1281. struct timeval tv;
  1282. const struct timeval *result=NULL;
  1283. struct common_timeout_list *new_ctl;
  1284. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1285. if (duration->tv_usec > 1000000) {
  1286. memcpy(&tv, duration, sizeof(struct timeval));
  1287. if (is_common_timeout(duration, base))
  1288. tv.tv_usec &= MICROSECONDS_MASK;
  1289. tv.tv_sec += tv.tv_usec / 1000000;
  1290. tv.tv_usec %= 1000000;
  1291. duration = &tv;
  1292. }
  1293. for (i = 0; i < base->n_common_timeouts; ++i) {
  1294. const struct common_timeout_list *ctl =
  1295. base->common_timeout_queues[i];
  1296. if (duration->tv_sec == ctl->duration.tv_sec &&
  1297. duration->tv_usec ==
  1298. (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
  1299. EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
  1300. result = &ctl->duration;
  1301. goto done;
  1302. }
  1303. }
  1304. if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
  1305. event_warnx("%s: Too many common timeouts already in use; "
  1306. "we only support %d per event_base", __func__,
  1307. MAX_COMMON_TIMEOUTS);
  1308. goto done;
  1309. }
  1310. if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
  1311. int n = base->n_common_timeouts < 16 ? 16 :
  1312. base->n_common_timeouts*2;
  1313. struct common_timeout_list **newqueues =
  1314. mm_realloc(base->common_timeout_queues,
  1315. n*sizeof(struct common_timeout_queue *));
  1316. if (!newqueues) {
  1317. event_warn("%s: realloc",__func__);
  1318. goto done;
  1319. }
  1320. base->n_common_timeouts_allocated = n;
  1321. base->common_timeout_queues = newqueues;
  1322. }
  1323. new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
  1324. if (!new_ctl) {
  1325. event_warn("%s: calloc",__func__);
  1326. goto done;
  1327. }
  1328. TAILQ_INIT(&new_ctl->events);
  1329. new_ctl->duration.tv_sec = duration->tv_sec;
  1330. new_ctl->duration.tv_usec =
  1331. duration->tv_usec | COMMON_TIMEOUT_MAGIC |
  1332. (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
  1333. evtimer_assign(&new_ctl->timeout_event, base,
  1334. common_timeout_callback, new_ctl);
  1335. new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
  1336. event_priority_set(&new_ctl->timeout_event, 0);
  1337. new_ctl->base = base;
  1338. base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
  1339. result = &new_ctl->duration;
  1340. done:
  1341. if (result)
  1342. EVUTIL_ASSERT(is_common_timeout(result, base));
  1343. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1344. return result;
  1345. }
  1346. /* Closure function invoked when we're activating a persistent event. */
  1347. static inline void
  1348. event_persist_closure(struct event_base *base, struct event *ev)
  1349. {
  1350. void (*evcb_callback)(evutil_socket_t, short, void *);
  1351. // Other fields of *ev that must be stored before executing
  1352. evutil_socket_t evcb_fd;
  1353. short evcb_res;
  1354. void *evcb_arg;
  1355. /* reschedule the persistent event if we have a timeout. */
  1356. if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
  1357. /* If there was a timeout, we want it to run at an interval of
  1358. * ev_io_timeout after the last time it was _scheduled_ for,
  1359. * not ev_io_timeout after _now_. If it fired for another
  1360. * reason, though, the timeout ought to start ticking _now_. */
  1361. struct timeval run_at, relative_to, delay, now;
  1362. ev_uint32_t usec_mask = 0;
  1363. EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
  1364. &ev->ev_io_timeout));
  1365. gettime(base, &now);
  1366. if (is_common_timeout(&ev->ev_timeout, base)) {
  1367. delay = ev->ev_io_timeout;
  1368. usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
  1369. delay.tv_usec &= MICROSECONDS_MASK;
  1370. if (ev->ev_res & EV_TIMEOUT) {
  1371. relative_to = ev->ev_timeout;
  1372. relative_to.tv_usec &= MICROSECONDS_MASK;
  1373. } else {
  1374. relative_to = now;
  1375. }
  1376. } else {
  1377. delay = ev->ev_io_timeout;
  1378. if (ev->ev_res & EV_TIMEOUT) {
  1379. relative_to = ev->ev_timeout;
  1380. } else {
  1381. relative_to = now;
  1382. }
  1383. }
  1384. evutil_timeradd(&relative_to, &delay, &run_at);
  1385. if (evutil_timercmp(&run_at, &now, <)) {
  1386. /* Looks like we missed at least one invocation due to
  1387. * a clock jump, not running the event loop for a
  1388. * while, really slow callbacks, or
  1389. * something. Reschedule relative to now.
  1390. */
  1391. evutil_timeradd(&now, &delay, &run_at);
  1392. }
  1393. run_at.tv_usec |= usec_mask;
  1394. event_add_nolock_(ev, &run_at, 1);
  1395. }
  1396. // Save our callback before we release the lock
  1397. evcb_callback = ev->ev_callback;
  1398. evcb_fd = ev->ev_fd;
  1399. evcb_res = ev->ev_res;
  1400. evcb_arg = ev->ev_arg;
  1401. // Release the lock
  1402. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1403. // Execute the callback
  1404. (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
  1405. }
  1406. /*
  1407. Helper for event_process_active to process all the events in a single queue,
  1408. releasing the lock as we go. This function requires that the lock be held
  1409. when it's invoked. Returns -1 if we get a signal or an event_break that
  1410. means we should stop processing any active events now. Otherwise returns
  1411. the number of non-internal event_callbacks that we processed.
  1412. */
  1413. static int
  1414. event_process_active_single_queue(struct event_base *base,
  1415. struct evcallback_list *activeq,
  1416. int max_to_process, const struct timeval *endtime)
  1417. {
  1418. struct event_callback *evcb;
  1419. int count = 0;
  1420. EVUTIL_ASSERT(activeq != NULL);
  1421. for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
  1422. struct event *ev=NULL;
  1423. if (evcb->evcb_flags & EVLIST_INIT) {
  1424. ev = event_callback_to_event(evcb);
  1425. if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
  1426. event_queue_remove_active(base, evcb);
  1427. else
  1428. event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
  1429. event_debug((
  1430. "event_process_active: event: %p, %s%s%scall %p",
  1431. ev,
  1432. ev->ev_res & EV_READ ? "EV_READ " : " ",
  1433. ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
  1434. ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
  1435. ev->ev_callback));
  1436. } else {
  1437. event_queue_remove_active(base, evcb);
  1438. event_debug(("event_process_active: event_callback %p, "
  1439. "closure %d, call %p",
  1440. evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
  1441. }
  1442. if (!(evcb->evcb_flags & EVLIST_INTERNAL))
  1443. ++count;
  1444. base->current_event = evcb;
  1445. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  1446. base->current_event_waiters = 0;
  1447. #endif
  1448. switch (evcb->evcb_closure) {
  1449. case EV_CLOSURE_EVENT_SIGNAL:
  1450. EVUTIL_ASSERT(ev != NULL);
  1451. event_signal_closure(base, ev);
  1452. break;
  1453. case EV_CLOSURE_EVENT_PERSIST:
  1454. EVUTIL_ASSERT(ev != NULL);
  1455. event_persist_closure(base, ev);
  1456. break;
  1457. case EV_CLOSURE_EVENT: {
  1458. void (*evcb_callback)(evutil_socket_t, short, void *);
  1459. short res;
  1460. EVUTIL_ASSERT(ev != NULL);
  1461. evcb_callback = *ev->ev_callback;
  1462. res = ev->ev_res;
  1463. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1464. evcb_callback(ev->ev_fd, res, ev->ev_arg);
  1465. }
  1466. break;
  1467. case EV_CLOSURE_CB_SELF: {
  1468. void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
  1469. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1470. evcb_selfcb(evcb, evcb->evcb_arg);
  1471. }
  1472. break;
  1473. case EV_CLOSURE_EVENT_FINALIZE:
  1474. case EV_CLOSURE_EVENT_FINALIZE_FREE: {
  1475. void (*evcb_evfinalize)(struct event *, void *);
  1476. int evcb_closure = evcb->evcb_closure;
  1477. EVUTIL_ASSERT(ev != NULL);
  1478. base->current_event = NULL;
  1479. evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
  1480. EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
  1481. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1482. event_debug_note_teardown_(ev);
  1483. evcb_evfinalize(ev, ev->ev_arg);
  1484. if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
  1485. mm_free(ev);
  1486. }
  1487. break;
  1488. case EV_CLOSURE_CB_FINALIZE: {
  1489. void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
  1490. base->current_event = NULL;
  1491. EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
  1492. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1493. evcb_cbfinalize(evcb, evcb->evcb_arg);
  1494. }
  1495. break;
  1496. default:
  1497. EVUTIL_ASSERT(0);
  1498. }
  1499. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1500. base->current_event = NULL;
  1501. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  1502. if (base->current_event_waiters) {
  1503. base->current_event_waiters = 0;
  1504. EVTHREAD_COND_BROADCAST(base->current_event_cond);
  1505. }
  1506. #endif
  1507. if (base->event_break)
  1508. return -1;
  1509. if (count >= max_to_process)
  1510. return count;
  1511. if (count && endtime) {
  1512. struct timeval now;
  1513. update_time_cache(base);
  1514. gettime(base, &now);
  1515. if (evutil_timercmp(&now, endtime, >=))
  1516. return count;
  1517. }
  1518. if (base->event_continue)
  1519. break;
  1520. }
  1521. return count;
  1522. }
  1523. /*
  1524. * Active events are stored in priority queues. Lower priorities are always
  1525. * process before higher priorities. Low priority events can starve high
  1526. * priority ones.
  1527. */
  1528. static int
  1529. event_process_active(struct event_base *base)
  1530. {
  1531. /* Caller must hold th_base_lock */
  1532. struct evcallback_list *activeq = NULL;
  1533. int i, c = 0;
  1534. const struct timeval *endtime;
  1535. struct timeval tv;
  1536. const int maxcb = base->max_dispatch_callbacks;
  1537. const int limit_after_prio = base->limit_callbacks_after_prio;
  1538. if (base->max_dispatch_time.tv_sec >= 0) {
  1539. update_time_cache(base);
  1540. gettime(base, &tv);
  1541. evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
  1542. endtime = &tv;
  1543. } else {
  1544. endtime = NULL;
  1545. }
  1546. for (i = 0; i < base->nactivequeues; ++i) {
  1547. if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
  1548. base->event_running_priority = i;
  1549. activeq = &base->activequeues[i];
  1550. if (i < limit_after_prio)
  1551. c = event_process_active_single_queue(base, activeq,
  1552. INT_MAX, NULL);
  1553. else
  1554. c = event_process_active_single_queue(base, activeq,
  1555. maxcb, endtime);
  1556. if (c < 0) {
  1557. goto done;
  1558. } else if (c > 0)
  1559. break; /* Processed a real event; do not
  1560. * consider lower-priority events */
  1561. /* If we get here, all of the events we processed
  1562. * were internal. Continue. */
  1563. }
  1564. }
  1565. done:
  1566. base->event_running_priority = -1;
  1567. return c;
  1568. }
  1569. /*
  1570. * Wait continuously for events. We exit only if no events are left.
  1571. */
  1572. int
  1573. event_dispatch(void)
  1574. {
  1575. return (event_loop(0));
  1576. }
  1577. int
  1578. event_base_dispatch(struct event_base *event_base)
  1579. {
  1580. return (event_base_loop(event_base, 0));
  1581. }
  1582. const char *
  1583. event_base_get_method(const struct event_base *base)
  1584. {
  1585. EVUTIL_ASSERT(base);
  1586. return (base->evsel->name);
  1587. }
  1588. /** Callback: used to implement event_base_loopexit by telling the event_base
  1589. * that it's time to exit its loop. */
  1590. static void
  1591. event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
  1592. {
  1593. struct event_base *base = arg;
  1594. base->event_gotterm = 1;
  1595. }
  1596. int
  1597. event_loopexit(const struct timeval *tv)
  1598. {
  1599. return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
  1600. current_base, tv));
  1601. }
  1602. int
  1603. event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
  1604. {
  1605. return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
  1606. event_base, tv));
  1607. }
  1608. int
  1609. event_loopbreak(void)
  1610. {
  1611. return (event_base_loopbreak(current_base));
  1612. }
  1613. int
  1614. event_base_loopbreak(struct event_base *event_base)
  1615. {
  1616. int r = 0;
  1617. if (event_base == NULL)
  1618. return (-1);
  1619. EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1620. event_base->event_break = 1;
  1621. if (EVBASE_NEED_NOTIFY(event_base)) {
  1622. r = evthread_notify_base(event_base);
  1623. } else {
  1624. r = (0);
  1625. }
  1626. EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1627. return r;
  1628. }
  1629. int
  1630. event_base_loopcontinue(struct event_base *event_base)
  1631. {
  1632. int r = 0;
  1633. if (event_base == NULL)
  1634. return (-1);
  1635. EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1636. event_base->event_continue = 1;
  1637. if (EVBASE_NEED_NOTIFY(event_base)) {
  1638. r = evthread_notify_base(event_base);
  1639. } else {
  1640. r = (0);
  1641. }
  1642. EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1643. return r;
  1644. }
  1645. int
  1646. event_base_got_break(struct event_base *event_base)
  1647. {
  1648. int res;
  1649. EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1650. res = event_base->event_break;
  1651. EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1652. return res;
  1653. }
  1654. int
  1655. event_base_got_exit(struct event_base *event_base)
  1656. {
  1657. int res;
  1658. EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
  1659. res = event_base->event_gotterm;
  1660. EVBASE_RELEASE_LOCK(event_base, th_base_lock);
  1661. return res;
  1662. }
  1663. /* not thread safe */
  1664. int
  1665. event_loop(int flags)
  1666. {
  1667. return event_base_loop(current_base, flags);
  1668. }
  1669. int
  1670. event_base_loop(struct event_base *base, int flags)
  1671. {
  1672. const struct eventop *evsel = base->evsel;
  1673. struct timeval tv;
  1674. struct timeval *tv_p;
  1675. int res, done, retval = 0;
  1676. /* Grab the lock. We will release it inside evsel.dispatch, and again
  1677. * as we invoke user callbacks. */
  1678. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1679. if (base->running_loop) {
  1680. event_warnx("%s: reentrant invocation. Only one event_base_loop"
  1681. " can run on each event_base at once.", __func__);
  1682. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1683. return -1;
  1684. }
  1685. base->running_loop = 1;
  1686. clear_time_cache(base);
  1687. if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
  1688. evsig_set_base_(base);
  1689. done = 0;
  1690. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  1691. base->th_owner_id = EVTHREAD_GET_ID();
  1692. #endif
  1693. base->event_gotterm = base->event_break = 0;
  1694. while (!done) {
  1695. base->event_continue = 0;
  1696. base->n_deferreds_queued = 0;
  1697. /* Terminate the loop if we have been asked to */
  1698. if (base->event_gotterm) {
  1699. break;
  1700. }
  1701. if (base->event_break) {
  1702. break;
  1703. }
  1704. tv_p = &tv;
  1705. if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
  1706. timeout_next(base, &tv_p);
  1707. } else {
  1708. /*
  1709. * if we have active events, we just poll new events
  1710. * without waiting.
  1711. */
  1712. evutil_timerclear(&tv);
  1713. }
  1714. /* If we have no events, we just exit */
  1715. if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
  1716. !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
  1717. event_debug(("%s: no events registered.", __func__));
  1718. retval = 1;
  1719. goto done;
  1720. }
  1721. event_queue_make_later_events_active(base);
  1722. clear_time_cache(base);
  1723. res = evsel->dispatch(base, tv_p);
  1724. if (res == -1) {
  1725. event_debug(("%s: dispatch returned unsuccessfully.",
  1726. __func__));
  1727. retval = -1;
  1728. goto done;
  1729. }
  1730. update_time_cache(base);
  1731. timeout_process(base);
  1732. if (N_ACTIVE_CALLBACKS(base)) {
  1733. int n = event_process_active(base);
  1734. if ((flags & EVLOOP_ONCE)
  1735. && N_ACTIVE_CALLBACKS(base) == 0
  1736. && n != 0)
  1737. done = 1;
  1738. } else if (flags & EVLOOP_NONBLOCK)
  1739. done = 1;
  1740. }
  1741. event_debug(("%s: asked to terminate loop.", __func__));
  1742. done:
  1743. clear_time_cache(base);
  1744. base->running_loop = 0;
  1745. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1746. return (retval);
  1747. }
  1748. /* One-time callback to implement event_base_once: invokes the user callback,
  1749. * then deletes the allocated storage */
  1750. static void
  1751. event_once_cb(evutil_socket_t fd, short events, void *arg)
  1752. {
  1753. struct event_once *eonce = arg;
  1754. (*eonce->cb)(fd, events, eonce->arg);
  1755. EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
  1756. LIST_REMOVE(eonce, next_once);
  1757. EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
  1758. event_debug_unassign(&eonce->ev);
  1759. mm_free(eonce);
  1760. }
  1761. /* not threadsafe, event scheduled once. */
  1762. int
  1763. event_once(evutil_socket_t fd, short events,
  1764. void (*callback)(evutil_socket_t, short, void *),
  1765. void *arg, const struct timeval *tv)
  1766. {
  1767. return event_base_once(current_base, fd, events, callback, arg, tv);
  1768. }
  1769. /* Schedules an event once */
  1770. int
  1771. event_base_once(struct event_base *base, evutil_socket_t fd, short events,
  1772. void (*callback)(evutil_socket_t, short, void *),
  1773. void *arg, const struct timeval *tv)
  1774. {
  1775. struct event_once *eonce;
  1776. int res = 0;
  1777. int activate = 0;
  1778. if (!base)
  1779. return (-1);
  1780. /* We cannot support signals that just fire once, or persistent
  1781. * events. */
  1782. if (events & (EV_SIGNAL|EV_PERSIST))
  1783. return (-1);
  1784. if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
  1785. return (-1);
  1786. eonce->cb = callback;
  1787. eonce->arg = arg;
  1788. if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
  1789. evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
  1790. if (tv == NULL || ! evutil_timerisset(tv)) {
  1791. /* If the event is going to become active immediately,
  1792. * don't put it on the timeout queue. This is one
  1793. * idiom for scheduling a callback, so let's make
  1794. * it fast (and order-preserving). */
  1795. activate = 1;
  1796. }
  1797. } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
  1798. events &= EV_READ|EV_WRITE|EV_CLOSED;
  1799. event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
  1800. } else {
  1801. /* Bad event combination */
  1802. mm_free(eonce);
  1803. return (-1);
  1804. }
  1805. if (res == 0) {
  1806. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1807. if (activate)
  1808. event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
  1809. else
  1810. res = event_add_nolock_(&eonce->ev, tv, 0);
  1811. if (res != 0) {
  1812. mm_free(eonce);
  1813. return (res);
  1814. } else {
  1815. LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
  1816. }
  1817. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1818. }
  1819. return (0);
  1820. }
  1821. int
  1822. event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
  1823. {
  1824. if (!base)
  1825. base = current_base;
  1826. if (arg == &event_self_cbarg_ptr_)
  1827. arg = ev;
  1828. if (!(events & EV_SIGNAL))
  1829. event_debug_assert_socket_nonblocking_(fd);
  1830. event_debug_assert_not_added_(ev);
  1831. ev->ev_base = base;
  1832. ev->ev_callback = callback;
  1833. ev->ev_arg = arg;
  1834. ev->ev_fd = fd;
  1835. ev->ev_events = events;
  1836. ev->ev_res = 0;
  1837. ev->ev_flags = EVLIST_INIT;
  1838. ev->ev_ncalls = 0;
  1839. ev->ev_pncalls = NULL;
  1840. if (events & EV_SIGNAL) {
  1841. if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
  1842. event_warnx("%s: EV_SIGNAL is not compatible with "
  1843. "EV_READ, EV_WRITE or EV_CLOSED", __func__);
  1844. return -1;
  1845. }
  1846. ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
  1847. } else {
  1848. if (events & EV_PERSIST) {
  1849. evutil_timerclear(&ev->ev_io_timeout);
  1850. ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
  1851. } else {
  1852. ev->ev_closure = EV_CLOSURE_EVENT;
  1853. }
  1854. }
  1855. min_heap_elem_init_(ev);
  1856. if (base != NULL) {
  1857. /* by default, we put new events into the middle priority */
  1858. ev->ev_pri = base->nactivequeues / 2;
  1859. }
  1860. event_debug_note_setup_(ev);
  1861. return 0;
  1862. }
  1863. int
  1864. event_base_set(struct event_base *base, struct event *ev)
  1865. {
  1866. /* Only innocent events may be assigned to a different base */
  1867. if (ev->ev_flags != EVLIST_INIT)
  1868. return (-1);
  1869. event_debug_assert_is_setup_(ev);
  1870. ev->ev_base = base;
  1871. ev->ev_pri = base->nactivequeues/2;
  1872. return (0);
  1873. }
  1874. void
  1875. event_set(struct event *ev, evutil_socket_t fd, short events,
  1876. void (*callback)(evutil_socket_t, short, void *), void *arg)
  1877. {
  1878. int r;
  1879. r = event_assign(ev, current_base, fd, events, callback, arg);
  1880. EVUTIL_ASSERT(r == 0);
  1881. }
  1882. void *
  1883. event_self_cbarg(void)
  1884. {
  1885. return &event_self_cbarg_ptr_;
  1886. }
  1887. struct event *
  1888. event_base_get_running_event(struct event_base *base)
  1889. {
  1890. struct event *ev = NULL;
  1891. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1892. if (EVBASE_IN_THREAD(base)) {
  1893. struct event_callback *evcb = base->current_event;
  1894. if (evcb->evcb_flags & EVLIST_INIT)
  1895. ev = event_callback_to_event(evcb);
  1896. }
  1897. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1898. return ev;
  1899. }
  1900. struct event *
  1901. event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
  1902. {
  1903. struct event *ev;
  1904. ev = mm_malloc(sizeof(struct event));
  1905. if (ev == NULL)
  1906. return (NULL);
  1907. if (event_assign(ev, base, fd, events, cb, arg) < 0) {
  1908. mm_free(ev);
  1909. return (NULL);
  1910. }
  1911. return (ev);
  1912. }
  1913. void
  1914. event_free(struct event *ev)
  1915. {
  1916. /* This is disabled, so that events which have been finalized be a
  1917. * valid target for event_free(). That's */
  1918. // event_debug_assert_is_setup_(ev);
  1919. /* make sure that this event won't be coming back to haunt us. */
  1920. event_del(ev);
  1921. event_debug_note_teardown_(ev);
  1922. mm_free(ev);
  1923. }
  1924. void
  1925. event_debug_unassign(struct event *ev)
  1926. {
  1927. event_debug_assert_not_added_(ev);
  1928. event_debug_note_teardown_(ev);
  1929. ev->ev_flags &= ~EVLIST_INIT;
  1930. }
  1931. #define EVENT_FINALIZE_FREE_ 0x10000
  1932. static int
  1933. event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
  1934. {
  1935. ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
  1936. EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
  1937. event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
  1938. ev->ev_closure = closure;
  1939. ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
  1940. event_active_nolock_(ev, EV_FINALIZE, 1);
  1941. ev->ev_flags |= EVLIST_FINALIZING;
  1942. return 0;
  1943. }
  1944. static int
  1945. event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
  1946. {
  1947. int r;
  1948. struct event_base *base = ev->ev_base;
  1949. if (EVUTIL_FAILURE_CHECK(!base)) {
  1950. event_warnx("%s: event has no event_base set.", __func__);
  1951. return -1;
  1952. }
  1953. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1954. r = event_finalize_nolock_(base, flags, ev, cb);
  1955. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1956. return r;
  1957. }
  1958. int
  1959. event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
  1960. {
  1961. return event_finalize_impl_(flags, ev, cb);
  1962. }
  1963. int
  1964. event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
  1965. {
  1966. return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
  1967. }
  1968. void
  1969. event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
  1970. {
  1971. struct event *ev = NULL;
  1972. if (evcb->evcb_flags & EVLIST_INIT) {
  1973. ev = event_callback_to_event(evcb);
  1974. event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
  1975. } else {
  1976. event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
  1977. }
  1978. evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
  1979. evcb->evcb_cb_union.evcb_cbfinalize = cb;
  1980. event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
  1981. evcb->evcb_flags |= EVLIST_FINALIZING;
  1982. }
  1983. void
  1984. event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
  1985. {
  1986. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  1987. event_callback_finalize_nolock_(base, flags, evcb, cb);
  1988. EVBASE_RELEASE_LOCK(base, th_base_lock);
  1989. }
  1990. /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
  1991. * callback will be invoked on *one of them*, after they have *all* been
  1992. * finalized. */
  1993. int
  1994. event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
  1995. {
  1996. int n_pending = 0, i;
  1997. if (base == NULL)
  1998. base = current_base;
  1999. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  2000. event_debug(("%s: %d events finalizing", __func__, n_cbs));
  2001. /* At most one can be currently executing; the rest we just
  2002. * cancel... But we always make sure that the finalize callback
  2003. * runs. */
  2004. for (i = 0; i < n_cbs; ++i) {
  2005. struct event_callback *evcb = evcbs[i];
  2006. if (evcb == base->current_event) {
  2007. event_callback_finalize_nolock_(base, 0, evcb, cb);
  2008. ++n_pending;
  2009. } else {
  2010. event_callback_cancel_nolock_(base, evcb, 0);
  2011. }
  2012. }
  2013. if (n_pending == 0) {
  2014. /* Just do the first one. */
  2015. event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
  2016. }
  2017. EVBASE_RELEASE_LOCK(base, th_base_lock);
  2018. return 0;
  2019. }
  2020. /*
  2021. * Set's the priority of an event - if an event is already scheduled
  2022. * changing the priority is going to fail.
  2023. */
  2024. int
  2025. event_priority_set(struct event *ev, int pri)
  2026. {
  2027. event_debug_assert_is_setup_(ev);
  2028. if (ev->ev_flags & EVLIST_ACTIVE)
  2029. return (-1);
  2030. if (pri < 0 || pri >= ev->ev_base->nactivequeues)
  2031. return (-1);
  2032. ev->ev_pri = pri;
  2033. return (0);
  2034. }
  2035. /*
  2036. * Checks if a specific event is pending or scheduled.
  2037. */
  2038. int
  2039. event_pending(const struct event *ev, short event, struct timeval *tv)
  2040. {
  2041. int flags = 0;
  2042. if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
  2043. event_warnx("%s: event has no event_base set.", __func__);
  2044. return 0;
  2045. }
  2046. EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  2047. event_debug_assert_is_setup_(ev);
  2048. if (ev->ev_flags & EVLIST_INSERTED)
  2049. flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
  2050. if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
  2051. flags |= ev->ev_res;
  2052. if (ev->ev_flags & EVLIST_TIMEOUT)
  2053. flags |= EV_TIMEOUT;
  2054. event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
  2055. /* See if there is a timeout that we should report */
  2056. if (tv != NULL && (flags & event & EV_TIMEOUT)) {
  2057. struct timeval tmp = ev->ev_timeout;
  2058. tmp.tv_usec &= MICROSECONDS_MASK;
  2059. /* correctly remamp to real time */
  2060. evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
  2061. }
  2062. EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  2063. return (flags & event);
  2064. }
  2065. int
  2066. event_initialized(const struct event *ev)
  2067. {
  2068. if (!(ev->ev_flags & EVLIST_INIT))
  2069. return 0;
  2070. return 1;
  2071. }
  2072. void
  2073. event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
  2074. {
  2075. event_debug_assert_is_setup_(event);
  2076. if (base_out)
  2077. *base_out = event->ev_base;
  2078. if (fd_out)
  2079. *fd_out = event->ev_fd;
  2080. if (events_out)
  2081. *events_out = event->ev_events;
  2082. if (callback_out)
  2083. *callback_out = event->ev_callback;
  2084. if (arg_out)
  2085. *arg_out = event->ev_arg;
  2086. }
  2087. size_t
  2088. event_get_struct_event_size(void)
  2089. {
  2090. return sizeof(struct event);
  2091. }
  2092. evutil_socket_t
  2093. event_get_fd(const struct event *ev)
  2094. {
  2095. event_debug_assert_is_setup_(ev);
  2096. return ev->ev_fd;
  2097. }
  2098. struct event_base *
  2099. event_get_base(const struct event *ev)
  2100. {
  2101. event_debug_assert_is_setup_(ev);
  2102. return ev->ev_base;
  2103. }
  2104. short
  2105. event_get_events(const struct event *ev)
  2106. {
  2107. event_debug_assert_is_setup_(ev);
  2108. return ev->ev_events;
  2109. }
  2110. event_callback_fn
  2111. event_get_callback(const struct event *ev)
  2112. {
  2113. event_debug_assert_is_setup_(ev);
  2114. return ev->ev_callback;
  2115. }
  2116. void *
  2117. event_get_callback_arg(const struct event *ev)
  2118. {
  2119. event_debug_assert_is_setup_(ev);
  2120. return ev->ev_arg;
  2121. }
  2122. int
  2123. event_get_priority(const struct event *ev)
  2124. {
  2125. event_debug_assert_is_setup_(ev);
  2126. return ev->ev_pri;
  2127. }
  2128. int
  2129. event_add(struct event *ev, const struct timeval *tv)
  2130. {
  2131. int res;
  2132. if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
  2133. event_warnx("%s: event has no event_base set.", __func__);
  2134. return -1;
  2135. }
  2136. EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  2137. res = event_add_nolock_(ev, tv, 0);
  2138. EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  2139. return (res);
  2140. }
  2141. /* Helper callback: wake an event_base from another thread. This version
  2142. * works by writing a byte to one end of a socketpair, so that the event_base
  2143. * listening on the other end will wake up as the corresponding event
  2144. * triggers */
  2145. static int
  2146. evthread_notify_base_default(struct event_base *base)
  2147. {
  2148. char buf[1];
  2149. int r;
  2150. buf[0] = (char) 0;
  2151. #ifdef _WIN32
  2152. r = send(base->th_notify_fd[1], buf, 1, 0);
  2153. #else
  2154. r = write(base->th_notify_fd[1], buf, 1);
  2155. #endif
  2156. return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
  2157. }
  2158. #ifdef EVENT__HAVE_EVENTFD
  2159. /* Helper callback: wake an event_base from another thread. This version
  2160. * assumes that you have a working eventfd() implementation. */
  2161. static int
  2162. evthread_notify_base_eventfd(struct event_base *base)
  2163. {
  2164. ev_uint64_t msg = 1;
  2165. int r;
  2166. do {
  2167. r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
  2168. } while (r < 0 && errno == EAGAIN);
  2169. return (r < 0) ? -1 : 0;
  2170. }
  2171. #endif
  2172. /** Tell the thread currently running the event_loop for base (if any) that it
  2173. * needs to stop waiting in its dispatch function (if it is) and process all
  2174. * active callbacks. */
  2175. static int
  2176. evthread_notify_base(struct event_base *base)
  2177. {
  2178. EVENT_BASE_ASSERT_LOCKED(base);
  2179. if (!base->th_notify_fn)
  2180. return -1;
  2181. if (base->is_notify_pending)
  2182. return 0;
  2183. base->is_notify_pending = 1;
  2184. return base->th_notify_fn(base);
  2185. }
  2186. /* Implementation function to remove a timeout on a currently pending event.
  2187. */
  2188. int
  2189. event_remove_timer_nolock_(struct event *ev)
  2190. {
  2191. struct event_base *base = ev->ev_base;
  2192. EVENT_BASE_ASSERT_LOCKED(base);
  2193. event_debug_assert_is_setup_(ev);
  2194. event_debug(("event_remove_timer_nolock: event: %p", ev));
  2195. /* If it's not pending on a timeout, we don't need to do anything. */
  2196. if (ev->ev_flags & EVLIST_TIMEOUT) {
  2197. event_queue_remove_timeout(base, ev);
  2198. evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
  2199. }
  2200. return (0);
  2201. }
  2202. int
  2203. event_remove_timer(struct event *ev)
  2204. {
  2205. int res;
  2206. if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
  2207. event_warnx("%s: event has no event_base set.", __func__);
  2208. return -1;
  2209. }
  2210. EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  2211. res = event_remove_timer_nolock_(ev);
  2212. EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  2213. return (res);
  2214. }
  2215. /* Implementation function to add an event. Works just like event_add,
  2216. * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
  2217. * we treat tv as an absolute time, not as an interval to add to the current
  2218. * time */
  2219. int
  2220. event_add_nolock_(struct event *ev, const struct timeval *tv,
  2221. int tv_is_absolute)
  2222. {
  2223. struct event_base *base = ev->ev_base;
  2224. int res = 0;
  2225. int notify = 0;
  2226. EVENT_BASE_ASSERT_LOCKED(base);
  2227. event_debug_assert_is_setup_(ev);
  2228. event_debug((
  2229. "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
  2230. ev,
  2231. EV_SOCK_ARG(ev->ev_fd),
  2232. ev->ev_events & EV_READ ? "EV_READ " : " ",
  2233. ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
  2234. ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
  2235. tv ? "EV_TIMEOUT " : " ",
  2236. ev->ev_callback));
  2237. EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
  2238. if (ev->ev_flags & EVLIST_FINALIZING) {
  2239. /* XXXX debug */
  2240. return (-1);
  2241. }
  2242. /*
  2243. * prepare for timeout insertion further below, if we get a
  2244. * failure on any step, we should not change any state.
  2245. */
  2246. if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
  2247. if (min_heap_reserve_(&base->timeheap,
  2248. 1 + min_heap_size_(&base->timeheap)) == -1)
  2249. return (-1); /* ENOMEM == errno */
  2250. }
  2251. /* If the main thread is currently executing a signal event's
  2252. * callback, and we are not the main thread, then we want to wait
  2253. * until the callback is done before we mess with the event, or else
  2254. * we can race on ev_ncalls and ev_pncalls below. */
  2255. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  2256. if (base->current_event == event_to_event_callback(ev) &&
  2257. (ev->ev_events & EV_SIGNAL)
  2258. && !EVBASE_IN_THREAD(base)) {
  2259. ++base->current_event_waiters;
  2260. EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
  2261. }
  2262. #endif
  2263. if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
  2264. !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
  2265. if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
  2266. res = evmap_io_add_(base, ev->ev_fd, ev);
  2267. else if (ev->ev_events & EV_SIGNAL)
  2268. res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
  2269. if (res != -1)
  2270. event_queue_insert_inserted(base, ev);
  2271. if (res == 1) {
  2272. /* evmap says we need to notify the main thread. */
  2273. notify = 1;
  2274. res = 0;
  2275. }
  2276. }
  2277. /*
  2278. * we should change the timeout state only if the previous event
  2279. * addition succeeded.
  2280. */
  2281. if (res != -1 && tv != NULL) {
  2282. struct timeval now;
  2283. int common_timeout;
  2284. #ifdef USE_REINSERT_TIMEOUT
  2285. int was_common;
  2286. int old_timeout_idx;
  2287. #endif
  2288. /*
  2289. * for persistent timeout events, we remember the
  2290. * timeout value and re-add the event.
  2291. *
  2292. * If tv_is_absolute, this was already set.
  2293. */
  2294. if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
  2295. ev->ev_io_timeout = *tv;
  2296. #ifndef USE_REINSERT_TIMEOUT
  2297. if (ev->ev_flags & EVLIST_TIMEOUT) {
  2298. event_queue_remove_timeout(base, ev);
  2299. }
  2300. #endif
  2301. /* Check if it is active due to a timeout. Rescheduling
  2302. * this timeout before the callback can be executed
  2303. * removes it from the active list. */
  2304. if ((ev->ev_flags & EVLIST_ACTIVE) &&
  2305. (ev->ev_res & EV_TIMEOUT)) {
  2306. if (ev->ev_events & EV_SIGNAL) {
  2307. /* See if we are just active executing
  2308. * this event in a loop
  2309. */
  2310. if (ev->ev_ncalls && ev->ev_pncalls) {
  2311. /* Abort loop */
  2312. *ev->ev_pncalls = 0;
  2313. }
  2314. }
  2315. event_queue_remove_active(base, event_to_event_callback(ev));
  2316. }
  2317. gettime(base, &now);
  2318. common_timeout = is_common_timeout(tv, base);
  2319. #ifdef USE_REINSERT_TIMEOUT
  2320. was_common = is_common_timeout(&ev->ev_timeout, base);
  2321. old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
  2322. #endif
  2323. if (tv_is_absolute) {
  2324. ev->ev_timeout = *tv;
  2325. } else if (common_timeout) {
  2326. struct timeval tmp = *tv;
  2327. tmp.tv_usec &= MICROSECONDS_MASK;
  2328. evutil_timeradd(&now, &tmp, &ev->ev_timeout);
  2329. ev->ev_timeout.tv_usec |=
  2330. (tv->tv_usec & ~MICROSECONDS_MASK);
  2331. } else {
  2332. evutil_timeradd(&now, tv, &ev->ev_timeout);
  2333. }
  2334. event_debug((
  2335. "event_add: event %p, timeout in %d seconds %d useconds, call %p",
  2336. ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
  2337. #ifdef USE_REINSERT_TIMEOUT
  2338. event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
  2339. #else
  2340. event_queue_insert_timeout(base, ev);
  2341. #endif
  2342. if (common_timeout) {
  2343. struct common_timeout_list *ctl =
  2344. get_common_timeout_list(base, &ev->ev_timeout);
  2345. if (ev == TAILQ_FIRST(&ctl->events)) {
  2346. common_timeout_schedule(ctl, &now, ev);
  2347. }
  2348. } else {
  2349. struct event* top = NULL;
  2350. /* See if the earliest timeout is now earlier than it
  2351. * was before: if so, we will need to tell the main
  2352. * thread to wake up earlier than it would otherwise.
  2353. * We double check the timeout of the top element to
  2354. * handle time distortions due to system suspension.
  2355. */
  2356. if (min_heap_elt_is_top_(ev))
  2357. notify = 1;
  2358. else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
  2359. evutil_timercmp(&top->ev_timeout, &now, <))
  2360. notify = 1;
  2361. }
  2362. }
  2363. /* if we are not in the right thread, we need to wake up the loop */
  2364. if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
  2365. evthread_notify_base(base);
  2366. event_debug_note_add_(ev);
  2367. return (res);
  2368. }
  2369. static int
  2370. event_del_(struct event *ev, int blocking)
  2371. {
  2372. int res;
  2373. struct event_base *base = ev->ev_base;
  2374. if (EVUTIL_FAILURE_CHECK(!base)) {
  2375. event_warnx("%s: event has no event_base set.", __func__);
  2376. return -1;
  2377. }
  2378. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  2379. res = event_del_nolock_(ev, blocking);
  2380. EVBASE_RELEASE_LOCK(base, th_base_lock);
  2381. return (res);
  2382. }
  2383. int
  2384. event_del(struct event *ev)
  2385. {
  2386. return event_del_(ev, EVENT_DEL_AUTOBLOCK);
  2387. }
  2388. int
  2389. event_del_block(struct event *ev)
  2390. {
  2391. return event_del_(ev, EVENT_DEL_BLOCK);
  2392. }
  2393. int
  2394. event_del_noblock(struct event *ev)
  2395. {
  2396. return event_del_(ev, EVENT_DEL_NOBLOCK);
  2397. }
  2398. /** Helper for event_del: always called with th_base_lock held.
  2399. *
  2400. * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
  2401. * EVEN_IF_FINALIZING} values. See those for more information.
  2402. */
  2403. int
  2404. event_del_nolock_(struct event *ev, int blocking)
  2405. {
  2406. struct event_base *base;
  2407. int res = 0, notify = 0;
  2408. event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
  2409. ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
  2410. /* An event without a base has not been added */
  2411. if (ev->ev_base == NULL)
  2412. return (-1);
  2413. EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
  2414. if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
  2415. if (ev->ev_flags & EVLIST_FINALIZING) {
  2416. /* XXXX Debug */
  2417. return 0;
  2418. }
  2419. }
  2420. base = ev->ev_base;
  2421. EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
  2422. /* See if we are just active executing this event in a loop */
  2423. if (ev->ev_events & EV_SIGNAL) {
  2424. if (ev->ev_ncalls && ev->ev_pncalls) {
  2425. /* Abort loop */
  2426. *ev->ev_pncalls = 0;
  2427. }
  2428. }
  2429. if (ev->ev_flags & EVLIST_TIMEOUT) {
  2430. /* NOTE: We never need to notify the main thread because of a
  2431. * deleted timeout event: all that could happen if we don't is
  2432. * that the dispatch loop might wake up too early. But the
  2433. * point of notifying the main thread _is_ to wake up the
  2434. * dispatch loop early anyway, so we wouldn't gain anything by
  2435. * doing it.
  2436. */
  2437. event_queue_remove_timeout(base, ev);
  2438. }
  2439. if (ev->ev_flags & EVLIST_ACTIVE)
  2440. event_queue_remove_active(base, event_to_event_callback(ev));
  2441. else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
  2442. event_queue_remove_active_later(base, event_to_event_callback(ev));
  2443. if (ev->ev_flags & EVLIST_INSERTED) {
  2444. event_queue_remove_inserted(base, ev);
  2445. if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
  2446. res = evmap_io_del_(base, ev->ev_fd, ev);
  2447. else
  2448. res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
  2449. if (res == 1) {
  2450. /* evmap says we need to notify the main thread. */
  2451. notify = 1;
  2452. res = 0;
  2453. }
  2454. /* If we do not have events, let's notify event base so it can
  2455. * exit without waiting */
  2456. if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
  2457. notify = 1;
  2458. }
  2459. /* if we are not in the right thread, we need to wake up the loop */
  2460. if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
  2461. evthread_notify_base(base);
  2462. event_debug_note_del_(ev);
  2463. /* If the main thread is currently executing this event's callback,
  2464. * and we are not the main thread, then we want to wait until the
  2465. * callback is done before returning. That way, when this function
  2466. * returns, it will be safe to free the user-supplied argument.
  2467. */
  2468. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  2469. if (blocking != EVENT_DEL_NOBLOCK &&
  2470. base->current_event == event_to_event_callback(ev) &&
  2471. !EVBASE_IN_THREAD(base) &&
  2472. (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
  2473. ++base->current_event_waiters;
  2474. EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
  2475. }
  2476. #endif
  2477. return (res);
  2478. }
  2479. void
  2480. event_active(struct event *ev, int res, short ncalls)
  2481. {
  2482. if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
  2483. event_warnx("%s: event has no event_base set.", __func__);
  2484. return;
  2485. }
  2486. EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  2487. event_debug_assert_is_setup_(ev);
  2488. event_active_nolock_(ev, res, ncalls);
  2489. EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  2490. }
  2491. void
  2492. event_active_nolock_(struct event *ev, int res, short ncalls)
  2493. {
  2494. struct event_base *base;
  2495. event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
  2496. ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
  2497. base = ev->ev_base;
  2498. EVENT_BASE_ASSERT_LOCKED(base);
  2499. if (ev->ev_flags & EVLIST_FINALIZING) {
  2500. /* XXXX debug */
  2501. return;
  2502. }
  2503. switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
  2504. default:
  2505. case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
  2506. EVUTIL_ASSERT(0);
  2507. break;
  2508. case EVLIST_ACTIVE:
  2509. /* We get different kinds of events, add them together */
  2510. ev->ev_res |= res;
  2511. return;
  2512. case EVLIST_ACTIVE_LATER:
  2513. ev->ev_res |= res;
  2514. break;
  2515. case 0:
  2516. ev->ev_res = res;
  2517. break;
  2518. }
  2519. if (ev->ev_pri < base->event_running_priority)
  2520. base->event_continue = 1;
  2521. if (ev->ev_events & EV_SIGNAL) {
  2522. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  2523. if (base->current_event == event_to_event_callback(ev) &&
  2524. !EVBASE_IN_THREAD(base)) {
  2525. ++base->current_event_waiters;
  2526. EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
  2527. }
  2528. #endif
  2529. ev->ev_ncalls = ncalls;
  2530. ev->ev_pncalls = NULL;
  2531. }
  2532. event_callback_activate_nolock_(base, event_to_event_callback(ev));
  2533. }
  2534. void
  2535. event_active_later_(struct event *ev, int res)
  2536. {
  2537. EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
  2538. event_active_later_nolock_(ev, res);
  2539. EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
  2540. }
  2541. void
  2542. event_active_later_nolock_(struct event *ev, int res)
  2543. {
  2544. struct event_base *base = ev->ev_base;
  2545. EVENT_BASE_ASSERT_LOCKED(base);
  2546. if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
  2547. /* We get different kinds of events, add them together */
  2548. ev->ev_res |= res;
  2549. return;
  2550. }
  2551. ev->ev_res = res;
  2552. event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
  2553. }
  2554. int
  2555. event_callback_activate_(struct event_base *base,
  2556. struct event_callback *evcb)
  2557. {
  2558. int r;
  2559. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  2560. r = event_callback_activate_nolock_(base, evcb);
  2561. EVBASE_RELEASE_LOCK(base, th_base_lock);
  2562. return r;
  2563. }
  2564. int
  2565. event_callback_activate_nolock_(struct event_base *base,
  2566. struct event_callback *evcb)
  2567. {
  2568. int r = 1;
  2569. if (evcb->evcb_flags & EVLIST_FINALIZING)
  2570. return 0;
  2571. switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
  2572. default:
  2573. EVUTIL_ASSERT(0);
  2574. EVUTIL_FALLTHROUGH;
  2575. case EVLIST_ACTIVE_LATER:
  2576. event_queue_remove_active_later(base, evcb);
  2577. r = 0;
  2578. break;
  2579. case EVLIST_ACTIVE:
  2580. return 0;
  2581. case 0:
  2582. break;
  2583. }
  2584. event_queue_insert_active(base, evcb);
  2585. if (EVBASE_NEED_NOTIFY(base))
  2586. evthread_notify_base(base);
  2587. return r;
  2588. }
  2589. int
  2590. event_callback_activate_later_nolock_(struct event_base *base,
  2591. struct event_callback *evcb)
  2592. {
  2593. if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
  2594. return 0;
  2595. event_queue_insert_active_later(base, evcb);
  2596. if (EVBASE_NEED_NOTIFY(base))
  2597. evthread_notify_base(base);
  2598. return 1;
  2599. }
  2600. void
  2601. event_callback_init_(struct event_base *base,
  2602. struct event_callback *cb)
  2603. {
  2604. memset(cb, 0, sizeof(*cb));
  2605. cb->evcb_pri = base->nactivequeues - 1;
  2606. }
  2607. int
  2608. event_callback_cancel_(struct event_base *base,
  2609. struct event_callback *evcb)
  2610. {
  2611. int r;
  2612. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  2613. r = event_callback_cancel_nolock_(base, evcb, 0);
  2614. EVBASE_RELEASE_LOCK(base, th_base_lock);
  2615. return r;
  2616. }
  2617. int
  2618. event_callback_cancel_nolock_(struct event_base *base,
  2619. struct event_callback *evcb, int even_if_finalizing)
  2620. {
  2621. if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
  2622. return 0;
  2623. if (evcb->evcb_flags & EVLIST_INIT)
  2624. return event_del_nolock_(event_callback_to_event(evcb),
  2625. even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
  2626. switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
  2627. default:
  2628. case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
  2629. EVUTIL_ASSERT(0);
  2630. break;
  2631. case EVLIST_ACTIVE:
  2632. /* We get different kinds of events, add them together */
  2633. event_queue_remove_active(base, evcb);
  2634. return 0;
  2635. case EVLIST_ACTIVE_LATER:
  2636. event_queue_remove_active_later(base, evcb);
  2637. break;
  2638. case 0:
  2639. break;
  2640. }
  2641. return 0;
  2642. }
  2643. void
  2644. event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
  2645. {
  2646. memset(cb, 0, sizeof(*cb));
  2647. cb->evcb_cb_union.evcb_selfcb = fn;
  2648. cb->evcb_arg = arg;
  2649. cb->evcb_pri = priority;
  2650. cb->evcb_closure = EV_CLOSURE_CB_SELF;
  2651. }
  2652. void
  2653. event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
  2654. {
  2655. cb->evcb_pri = priority;
  2656. }
  2657. void
  2658. event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
  2659. {
  2660. if (!base)
  2661. base = current_base;
  2662. event_callback_cancel_(base, cb);
  2663. }
  2664. #define MAX_DEFERREDS_QUEUED 32
  2665. int
  2666. event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
  2667. {
  2668. int r = 1;
  2669. if (!base)
  2670. base = current_base;
  2671. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  2672. if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
  2673. r = event_callback_activate_later_nolock_(base, cb);
  2674. } else {
  2675. r = event_callback_activate_nolock_(base, cb);
  2676. if (r) {
  2677. ++base->n_deferreds_queued;
  2678. }
  2679. }
  2680. EVBASE_RELEASE_LOCK(base, th_base_lock);
  2681. return r;
  2682. }
  2683. static int
  2684. timeout_next(struct event_base *base, struct timeval **tv_p)
  2685. {
  2686. /* Caller must hold th_base_lock */
  2687. struct timeval now;
  2688. struct event *ev;
  2689. struct timeval *tv = *tv_p;
  2690. int res = 0;
  2691. ev = min_heap_top_(&base->timeheap);
  2692. if (ev == NULL) {
  2693. /* if no time-based events are active wait for I/O */
  2694. *tv_p = NULL;
  2695. goto out;
  2696. }
  2697. if (gettime(base, &now) == -1) {
  2698. res = -1;
  2699. goto out;
  2700. }
  2701. if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
  2702. evutil_timerclear(tv);
  2703. goto out;
  2704. }
  2705. evutil_timersub(&ev->ev_timeout, &now, tv);
  2706. EVUTIL_ASSERT(tv->tv_sec >= 0);
  2707. EVUTIL_ASSERT(tv->tv_usec >= 0);
  2708. event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
  2709. out:
  2710. return (res);
  2711. }
  2712. /* Activate every event whose timeout has elapsed. */
  2713. static void
  2714. timeout_process(struct event_base *base)
  2715. {
  2716. /* Caller must hold lock. */
  2717. struct timeval now;
  2718. struct event *ev;
  2719. if (min_heap_empty_(&base->timeheap)) {
  2720. return;
  2721. }
  2722. gettime(base, &now);
  2723. while ((ev = min_heap_top_(&base->timeheap))) {
  2724. if (evutil_timercmp(&ev->ev_timeout, &now, >))
  2725. break;
  2726. /* delete this event from the I/O queues */
  2727. event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
  2728. event_debug(("timeout_process: event: %p, call %p",
  2729. ev, ev->ev_callback));
  2730. event_active_nolock_(ev, EV_TIMEOUT, 1);
  2731. }
  2732. }
  2733. #ifndef MAX
  2734. #define MAX(a,b) (((a)>(b))?(a):(b))
  2735. #endif
  2736. #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
  2737. /* These are a fancy way to spell
  2738. if (~flags & EVLIST_INTERNAL)
  2739. base->event_count--/++;
  2740. */
  2741. #define DECR_EVENT_COUNT(base,flags) \
  2742. ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
  2743. #define INCR_EVENT_COUNT(base,flags) do { \
  2744. ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
  2745. MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
  2746. } while (0)
  2747. static void
  2748. event_queue_remove_inserted(struct event_base *base, struct event *ev)
  2749. {
  2750. EVENT_BASE_ASSERT_LOCKED(base);
  2751. if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
  2752. event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
  2753. ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
  2754. return;
  2755. }
  2756. DECR_EVENT_COUNT(base, ev->ev_flags);
  2757. ev->ev_flags &= ~EVLIST_INSERTED;
  2758. }
  2759. static void
  2760. event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
  2761. {
  2762. EVENT_BASE_ASSERT_LOCKED(base);
  2763. if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
  2764. event_errx(1, "%s: %p not on queue %x", __func__,
  2765. evcb, EVLIST_ACTIVE);
  2766. return;
  2767. }
  2768. DECR_EVENT_COUNT(base, evcb->evcb_flags);
  2769. evcb->evcb_flags &= ~EVLIST_ACTIVE;
  2770. base->event_count_active--;
  2771. TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
  2772. evcb, evcb_active_next);
  2773. }
  2774. static void
  2775. event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
  2776. {
  2777. EVENT_BASE_ASSERT_LOCKED(base);
  2778. if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
  2779. event_errx(1, "%s: %p not on queue %x", __func__,
  2780. evcb, EVLIST_ACTIVE_LATER);
  2781. return;
  2782. }
  2783. DECR_EVENT_COUNT(base, evcb->evcb_flags);
  2784. evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
  2785. base->event_count_active--;
  2786. TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
  2787. }
  2788. static void
  2789. event_queue_remove_timeout(struct event_base *base, struct event *ev)
  2790. {
  2791. EVENT_BASE_ASSERT_LOCKED(base);
  2792. if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
  2793. event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
  2794. ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
  2795. return;
  2796. }
  2797. DECR_EVENT_COUNT(base, ev->ev_flags);
  2798. ev->ev_flags &= ~EVLIST_TIMEOUT;
  2799. if (is_common_timeout(&ev->ev_timeout, base)) {
  2800. struct common_timeout_list *ctl =
  2801. get_common_timeout_list(base, &ev->ev_timeout);
  2802. TAILQ_REMOVE(&ctl->events, ev,
  2803. ev_timeout_pos.ev_next_with_common_timeout);
  2804. } else {
  2805. min_heap_erase_(&base->timeheap, ev);
  2806. }
  2807. }
  2808. #ifdef USE_REINSERT_TIMEOUT
  2809. /* Remove and reinsert 'ev' into the timeout queue. */
  2810. static void
  2811. event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
  2812. int was_common, int is_common, int old_timeout_idx)
  2813. {
  2814. struct common_timeout_list *ctl;
  2815. if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
  2816. event_queue_insert_timeout(base, ev);
  2817. return;
  2818. }
  2819. switch ((was_common<<1) | is_common) {
  2820. case 3: /* Changing from one common timeout to another */
  2821. ctl = base->common_timeout_queues[old_timeout_idx];
  2822. TAILQ_REMOVE(&ctl->events, ev,
  2823. ev_timeout_pos.ev_next_with_common_timeout);
  2824. ctl = get_common_timeout_list(base, &ev->ev_timeout);
  2825. insert_common_timeout_inorder(ctl, ev);
  2826. break;
  2827. case 2: /* Was common; is no longer common */
  2828. ctl = base->common_timeout_queues[old_timeout_idx];
  2829. TAILQ_REMOVE(&ctl->events, ev,
  2830. ev_timeout_pos.ev_next_with_common_timeout);
  2831. min_heap_push_(&base->timeheap, ev);
  2832. break;
  2833. case 1: /* Wasn't common; has become common. */
  2834. min_heap_erase_(&base->timeheap, ev);
  2835. ctl = get_common_timeout_list(base, &ev->ev_timeout);
  2836. insert_common_timeout_inorder(ctl, ev);
  2837. break;
  2838. case 0: /* was in heap; is still on heap. */
  2839. min_heap_adjust_(&base->timeheap, ev);
  2840. break;
  2841. default:
  2842. EVUTIL_ASSERT(0); /* unreachable */
  2843. break;
  2844. }
  2845. }
  2846. #endif
  2847. /* Add 'ev' to the common timeout list in 'ev'. */
  2848. static void
  2849. insert_common_timeout_inorder(struct common_timeout_list *ctl,
  2850. struct event *ev)
  2851. {
  2852. struct event *e;
  2853. /* By all logic, we should just be able to append 'ev' to the end of
  2854. * ctl->events, since the timeout on each 'ev' is set to {the common
  2855. * timeout} + {the time when we add the event}, and so the events
  2856. * should arrive in order of their timeeouts. But just in case
  2857. * there's some wacky threading issue going on, we do a search from
  2858. * the end of 'ev' to find the right insertion point.
  2859. */
  2860. TAILQ_FOREACH_REVERSE(e, &ctl->events,
  2861. event_list, ev_timeout_pos.ev_next_with_common_timeout) {
  2862. /* This timercmp is a little sneaky, since both ev and e have
  2863. * magic values in tv_usec. Fortunately, they ought to have
  2864. * the _same_ magic values in tv_usec. Let's assert for that.
  2865. */
  2866. EVUTIL_ASSERT(
  2867. is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
  2868. if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
  2869. TAILQ_INSERT_AFTER(&ctl->events, e, ev,
  2870. ev_timeout_pos.ev_next_with_common_timeout);
  2871. return;
  2872. }
  2873. }
  2874. TAILQ_INSERT_HEAD(&ctl->events, ev,
  2875. ev_timeout_pos.ev_next_with_common_timeout);
  2876. }
  2877. static void
  2878. event_queue_insert_inserted(struct event_base *base, struct event *ev)
  2879. {
  2880. EVENT_BASE_ASSERT_LOCKED(base);
  2881. if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
  2882. event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
  2883. ev, EV_SOCK_ARG(ev->ev_fd));
  2884. return;
  2885. }
  2886. INCR_EVENT_COUNT(base, ev->ev_flags);
  2887. ev->ev_flags |= EVLIST_INSERTED;
  2888. }
  2889. static void
  2890. event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
  2891. {
  2892. EVENT_BASE_ASSERT_LOCKED(base);
  2893. if (evcb->evcb_flags & EVLIST_ACTIVE) {
  2894. /* Double insertion is possible for active events */
  2895. return;
  2896. }
  2897. INCR_EVENT_COUNT(base, evcb->evcb_flags);
  2898. evcb->evcb_flags |= EVLIST_ACTIVE;
  2899. base->event_count_active++;
  2900. MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
  2901. EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
  2902. TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
  2903. evcb, evcb_active_next);
  2904. }
  2905. static void
  2906. event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
  2907. {
  2908. EVENT_BASE_ASSERT_LOCKED(base);
  2909. if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
  2910. /* Double insertion is possible */
  2911. return;
  2912. }
  2913. INCR_EVENT_COUNT(base, evcb->evcb_flags);
  2914. evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
  2915. base->event_count_active++;
  2916. MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
  2917. EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
  2918. TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
  2919. }
  2920. static void
  2921. event_queue_insert_timeout(struct event_base *base, struct event *ev)
  2922. {
  2923. EVENT_BASE_ASSERT_LOCKED(base);
  2924. if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
  2925. event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
  2926. ev, EV_SOCK_ARG(ev->ev_fd));
  2927. return;
  2928. }
  2929. INCR_EVENT_COUNT(base, ev->ev_flags);
  2930. ev->ev_flags |= EVLIST_TIMEOUT;
  2931. if (is_common_timeout(&ev->ev_timeout, base)) {
  2932. struct common_timeout_list *ctl =
  2933. get_common_timeout_list(base, &ev->ev_timeout);
  2934. insert_common_timeout_inorder(ctl, ev);
  2935. } else {
  2936. min_heap_push_(&base->timeheap, ev);
  2937. }
  2938. }
  2939. static void
  2940. event_queue_make_later_events_active(struct event_base *base)
  2941. {
  2942. struct event_callback *evcb;
  2943. EVENT_BASE_ASSERT_LOCKED(base);
  2944. while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
  2945. TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
  2946. evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
  2947. EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
  2948. TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
  2949. base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
  2950. }
  2951. }
  2952. /* Functions for debugging */
  2953. const char *
  2954. event_get_version(void)
  2955. {
  2956. return (EVENT__VERSION);
  2957. }
  2958. ev_uint32_t
  2959. event_get_version_number(void)
  2960. {
  2961. return (EVENT__NUMERIC_VERSION);
  2962. }
  2963. /*
  2964. * No thread-safe interface needed - the information should be the same
  2965. * for all threads.
  2966. */
  2967. const char *
  2968. event_get_method(void)
  2969. {
  2970. return (current_base->evsel->name);
  2971. }
  2972. #ifndef EVENT__DISABLE_MM_REPLACEMENT
  2973. static void *(*mm_malloc_fn_)(size_t sz) = NULL;
  2974. static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
  2975. static void (*mm_free_fn_)(void *p) = NULL;
  2976. void *
  2977. event_mm_malloc_(size_t sz)
  2978. {
  2979. if (sz == 0)
  2980. return NULL;
  2981. if (mm_malloc_fn_)
  2982. return mm_malloc_fn_(sz);
  2983. else
  2984. return malloc(sz);
  2985. }
  2986. void *
  2987. event_mm_calloc_(size_t count, size_t size)
  2988. {
  2989. if (count == 0 || size == 0)
  2990. return NULL;
  2991. if (mm_malloc_fn_) {
  2992. size_t sz = count * size;
  2993. void *p = NULL;
  2994. if (count > EV_SIZE_MAX / size)
  2995. goto error;
  2996. p = mm_malloc_fn_(sz);
  2997. if (p)
  2998. return memset(p, 0, sz);
  2999. } else {
  3000. void *p = calloc(count, size);
  3001. #ifdef _WIN32
  3002. /* Windows calloc doesn't reliably set ENOMEM */
  3003. if (p == NULL)
  3004. goto error;
  3005. #endif
  3006. return p;
  3007. }
  3008. error:
  3009. errno = ENOMEM;
  3010. return NULL;
  3011. }
  3012. char *
  3013. event_mm_strdup_(const char *str)
  3014. {
  3015. if (!str) {
  3016. errno = EINVAL;
  3017. return NULL;
  3018. }
  3019. if (mm_malloc_fn_) {
  3020. size_t ln = strlen(str);
  3021. void *p = NULL;
  3022. if (ln == EV_SIZE_MAX)
  3023. goto error;
  3024. p = mm_malloc_fn_(ln+1);
  3025. if (p)
  3026. return memcpy(p, str, ln+1);
  3027. } else
  3028. #ifdef _WIN32
  3029. return _strdup(str);
  3030. #else
  3031. return strdup(str);
  3032. #endif
  3033. error:
  3034. errno = ENOMEM;
  3035. return NULL;
  3036. }
  3037. void *
  3038. event_mm_realloc_(void *ptr, size_t sz)
  3039. {
  3040. if (mm_realloc_fn_)
  3041. return mm_realloc_fn_(ptr, sz);
  3042. else
  3043. return realloc(ptr, sz);
  3044. }
  3045. void
  3046. event_mm_free_(void *ptr)
  3047. {
  3048. if (mm_free_fn_)
  3049. mm_free_fn_(ptr);
  3050. else
  3051. free(ptr);
  3052. }
  3053. void
  3054. event_set_mem_functions(void *(*malloc_fn)(size_t sz),
  3055. void *(*realloc_fn)(void *ptr, size_t sz),
  3056. void (*free_fn)(void *ptr))
  3057. {
  3058. mm_malloc_fn_ = malloc_fn;
  3059. mm_realloc_fn_ = realloc_fn;
  3060. mm_free_fn_ = free_fn;
  3061. }
  3062. #endif
  3063. #ifdef EVENT__HAVE_EVENTFD
  3064. static void
  3065. evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
  3066. {
  3067. ev_uint64_t msg;
  3068. ev_ssize_t r;
  3069. struct event_base *base = arg;
  3070. r = read(fd, (void*) &msg, sizeof(msg));
  3071. if (r<0 && errno != EAGAIN) {
  3072. event_sock_warn(fd, "Error reading from eventfd");
  3073. }
  3074. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3075. base->is_notify_pending = 0;
  3076. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3077. }
  3078. #endif
  3079. static void
  3080. evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
  3081. {
  3082. unsigned char buf[1024];
  3083. struct event_base *base = arg;
  3084. #ifdef _WIN32
  3085. while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
  3086. ;
  3087. #else
  3088. while (read(fd, (char*)buf, sizeof(buf)) > 0)
  3089. ;
  3090. #endif
  3091. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3092. base->is_notify_pending = 0;
  3093. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3094. }
  3095. int
  3096. evthread_make_base_notifiable(struct event_base *base)
  3097. {
  3098. int r;
  3099. if (!base)
  3100. return -1;
  3101. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3102. r = evthread_make_base_notifiable_nolock_(base);
  3103. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3104. return r;
  3105. }
  3106. static int
  3107. evthread_make_base_notifiable_nolock_(struct event_base *base)
  3108. {
  3109. void (*cb)(evutil_socket_t, short, void *);
  3110. int (*notify)(struct event_base *);
  3111. if (base->th_notify_fn != NULL) {
  3112. /* The base is already notifiable: we're doing fine. */
  3113. return 0;
  3114. }
  3115. #if defined(EVENT__HAVE_WORKING_KQUEUE)
  3116. if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
  3117. base->th_notify_fn = event_kq_notify_base_;
  3118. /* No need to add an event here; the backend can wake
  3119. * itself up just fine. */
  3120. return 0;
  3121. }
  3122. #endif
  3123. #ifdef EVENT__HAVE_EVENTFD
  3124. base->th_notify_fd[0] = evutil_eventfd_(0,
  3125. EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
  3126. if (base->th_notify_fd[0] >= 0) {
  3127. base->th_notify_fd[1] = -1;
  3128. notify = evthread_notify_base_eventfd;
  3129. cb = evthread_notify_drain_eventfd;
  3130. } else
  3131. #endif
  3132. if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
  3133. notify = evthread_notify_base_default;
  3134. cb = evthread_notify_drain_default;
  3135. } else {
  3136. return -1;
  3137. }
  3138. base->th_notify_fn = notify;
  3139. /* prepare an event that we can use for wakeup */
  3140. event_assign(&base->th_notify, base, base->th_notify_fd[0],
  3141. EV_READ|EV_PERSIST, cb, base);
  3142. /* we need to mark this as internal event */
  3143. base->th_notify.ev_flags |= EVLIST_INTERNAL;
  3144. event_priority_set(&base->th_notify, 0);
  3145. return event_add_nolock_(&base->th_notify, NULL, 0);
  3146. }
  3147. int
  3148. event_base_foreach_event_nolock_(struct event_base *base,
  3149. event_base_foreach_event_cb fn, void *arg)
  3150. {
  3151. int r, i;
  3152. unsigned u;
  3153. struct event *ev;
  3154. /* Start out with all the EVLIST_INSERTED events. */
  3155. if ((r = evmap_foreach_event_(base, fn, arg)))
  3156. return r;
  3157. /* Okay, now we deal with those events that have timeouts and are in
  3158. * the min-heap. */
  3159. for (u = 0; u < base->timeheap.n; ++u) {
  3160. ev = base->timeheap.p[u];
  3161. if (ev->ev_flags & EVLIST_INSERTED) {
  3162. /* we already processed this one */
  3163. continue;
  3164. }
  3165. if ((r = fn(base, ev, arg)))
  3166. return r;
  3167. }
  3168. /* Now for the events in one of the timeout queues.
  3169. * the min-heap. */
  3170. for (i = 0; i < base->n_common_timeouts; ++i) {
  3171. struct common_timeout_list *ctl =
  3172. base->common_timeout_queues[i];
  3173. TAILQ_FOREACH(ev, &ctl->events,
  3174. ev_timeout_pos.ev_next_with_common_timeout) {
  3175. if (ev->ev_flags & EVLIST_INSERTED) {
  3176. /* we already processed this one */
  3177. continue;
  3178. }
  3179. if ((r = fn(base, ev, arg)))
  3180. return r;
  3181. }
  3182. }
  3183. /* Finally, we deal wit all the active events that we haven't touched
  3184. * yet. */
  3185. for (i = 0; i < base->nactivequeues; ++i) {
  3186. struct event_callback *evcb;
  3187. TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
  3188. if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
  3189. /* This isn't an event (evlist_init clear), or
  3190. * we already processed it. (inserted or
  3191. * timeout set */
  3192. continue;
  3193. }
  3194. ev = event_callback_to_event(evcb);
  3195. if ((r = fn(base, ev, arg)))
  3196. return r;
  3197. }
  3198. }
  3199. return 0;
  3200. }
  3201. /* Helper for event_base_dump_events: called on each event in the event base;
  3202. * dumps only the inserted events. */
  3203. static int
  3204. dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
  3205. {
  3206. FILE *output = arg;
  3207. const char *gloss = (e->ev_events & EV_SIGNAL) ?
  3208. "sig" : "fd ";
  3209. if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
  3210. return 0;
  3211. fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
  3212. (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
  3213. (e->ev_events&EV_READ)?" Read":"",
  3214. (e->ev_events&EV_WRITE)?" Write":"",
  3215. (e->ev_events&EV_CLOSED)?" EOF":"",
  3216. (e->ev_events&EV_SIGNAL)?" Signal":"",
  3217. (e->ev_events&EV_PERSIST)?" Persist":"",
  3218. (e->ev_events&EV_ET)?" ET":"",
  3219. (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
  3220. if (e->ev_flags & EVLIST_TIMEOUT) {
  3221. struct timeval tv;
  3222. tv.tv_sec = e->ev_timeout.tv_sec;
  3223. tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
  3224. evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
  3225. fprintf(output, " Timeout=%ld.%06d",
  3226. (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
  3227. }
  3228. fputc('\n', output);
  3229. return 0;
  3230. }
  3231. /* Helper for event_base_dump_events: called on each event in the event base;
  3232. * dumps only the active events. */
  3233. static int
  3234. dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
  3235. {
  3236. FILE *output = arg;
  3237. const char *gloss = (e->ev_events & EV_SIGNAL) ?
  3238. "sig" : "fd ";
  3239. if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
  3240. return 0;
  3241. fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
  3242. (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
  3243. (e->ev_res&EV_READ)?" Read":"",
  3244. (e->ev_res&EV_WRITE)?" Write":"",
  3245. (e->ev_res&EV_CLOSED)?" EOF":"",
  3246. (e->ev_res&EV_SIGNAL)?" Signal":"",
  3247. (e->ev_res&EV_TIMEOUT)?" Timeout":"",
  3248. (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
  3249. (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
  3250. return 0;
  3251. }
  3252. int
  3253. event_base_foreach_event(struct event_base *base,
  3254. event_base_foreach_event_cb fn, void *arg)
  3255. {
  3256. int r;
  3257. if ((!fn) || (!base)) {
  3258. return -1;
  3259. }
  3260. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3261. r = event_base_foreach_event_nolock_(base, fn, arg);
  3262. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3263. return r;
  3264. }
  3265. void
  3266. event_base_dump_events(struct event_base *base, FILE *output)
  3267. {
  3268. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3269. fprintf(output, "Inserted events:\n");
  3270. event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
  3271. fprintf(output, "Active events:\n");
  3272. event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
  3273. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3274. }
  3275. void
  3276. event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
  3277. {
  3278. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3279. /* Activate any non timer events */
  3280. if (!(events & EV_TIMEOUT)) {
  3281. evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
  3282. } else {
  3283. /* If we want to activate timer events, loop and activate each event with
  3284. * the same fd in both the timeheap and common timeouts list */
  3285. int i;
  3286. unsigned u;
  3287. struct event *ev;
  3288. for (u = 0; u < base->timeheap.n; ++u) {
  3289. ev = base->timeheap.p[u];
  3290. if (ev->ev_fd == fd) {
  3291. event_active_nolock_(ev, EV_TIMEOUT, 1);
  3292. }
  3293. }
  3294. for (i = 0; i < base->n_common_timeouts; ++i) {
  3295. struct common_timeout_list *ctl = base->common_timeout_queues[i];
  3296. TAILQ_FOREACH(ev, &ctl->events,
  3297. ev_timeout_pos.ev_next_with_common_timeout) {
  3298. if (ev->ev_fd == fd) {
  3299. event_active_nolock_(ev, EV_TIMEOUT, 1);
  3300. }
  3301. }
  3302. }
  3303. }
  3304. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3305. }
  3306. void
  3307. event_base_active_by_signal(struct event_base *base, int sig)
  3308. {
  3309. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3310. evmap_signal_active_(base, sig, 1);
  3311. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3312. }
  3313. void
  3314. event_base_add_virtual_(struct event_base *base)
  3315. {
  3316. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3317. base->virtual_event_count++;
  3318. MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
  3319. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3320. }
  3321. void
  3322. event_base_del_virtual_(struct event_base *base)
  3323. {
  3324. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3325. EVUTIL_ASSERT(base->virtual_event_count > 0);
  3326. base->virtual_event_count--;
  3327. if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
  3328. evthread_notify_base(base);
  3329. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3330. }
  3331. static void
  3332. event_free_debug_globals_locks(void)
  3333. {
  3334. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  3335. #ifndef EVENT__DISABLE_DEBUG_MODE
  3336. if (event_debug_map_lock_ != NULL) {
  3337. EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
  3338. event_debug_map_lock_ = NULL;
  3339. evthreadimpl_disable_lock_debugging_();
  3340. }
  3341. #endif /* EVENT__DISABLE_DEBUG_MODE */
  3342. #endif /* EVENT__DISABLE_THREAD_SUPPORT */
  3343. return;
  3344. }
  3345. static void
  3346. event_free_debug_globals(void)
  3347. {
  3348. event_free_debug_globals_locks();
  3349. }
  3350. static void
  3351. event_free_evsig_globals(void)
  3352. {
  3353. evsig_free_globals_();
  3354. }
  3355. static void
  3356. event_free_evutil_globals(void)
  3357. {
  3358. evutil_free_globals_();
  3359. }
  3360. static void
  3361. event_free_globals(void)
  3362. {
  3363. event_free_debug_globals();
  3364. event_free_evsig_globals();
  3365. event_free_evutil_globals();
  3366. }
  3367. void
  3368. libevent_global_shutdown(void)
  3369. {
  3370. event_disable_debug_mode();
  3371. event_free_globals();
  3372. }
  3373. #ifndef EVENT__DISABLE_THREAD_SUPPORT
  3374. int
  3375. event_global_setup_locks_(const int enable_locks)
  3376. {
  3377. #ifndef EVENT__DISABLE_DEBUG_MODE
  3378. EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
  3379. #endif
  3380. if (evsig_global_setup_locks_(enable_locks) < 0)
  3381. return -1;
  3382. if (evutil_global_setup_locks_(enable_locks) < 0)
  3383. return -1;
  3384. if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
  3385. return -1;
  3386. return 0;
  3387. }
  3388. #endif
  3389. void
  3390. event_base_assert_ok_(struct event_base *base)
  3391. {
  3392. EVBASE_ACQUIRE_LOCK(base, th_base_lock);
  3393. event_base_assert_ok_nolock_(base);
  3394. EVBASE_RELEASE_LOCK(base, th_base_lock);
  3395. }
  3396. void
  3397. event_base_assert_ok_nolock_(struct event_base *base)
  3398. {
  3399. int i;
  3400. int count;
  3401. /* First do checks on the per-fd and per-signal lists */
  3402. evmap_check_integrity_(base);
  3403. /* Check the heap property */
  3404. for (i = 1; i < (int)base->timeheap.n; ++i) {
  3405. int parent = (i - 1) / 2;
  3406. struct event *ev, *p_ev;
  3407. ev = base->timeheap.p[i];
  3408. p_ev = base->timeheap.p[parent];
  3409. EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
  3410. EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
  3411. EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
  3412. }
  3413. /* Check that the common timeouts are fine */
  3414. for (i = 0; i < base->n_common_timeouts; ++i) {
  3415. struct common_timeout_list *ctl = base->common_timeout_queues[i];
  3416. struct event *last=NULL, *ev;
  3417. EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
  3418. TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
  3419. if (last)
  3420. EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
  3421. EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
  3422. EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
  3423. EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
  3424. last = ev;
  3425. }
  3426. }
  3427. /* Check the active queues. */
  3428. count = 0;
  3429. for (i = 0; i < base->nactivequeues; ++i) {
  3430. struct event_callback *evcb;
  3431. EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
  3432. TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
  3433. EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
  3434. EVUTIL_ASSERT(evcb->evcb_pri == i);
  3435. ++count;
  3436. }
  3437. }
  3438. {
  3439. struct event_callback *evcb;
  3440. TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
  3441. EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
  3442. ++count;
  3443. }
  3444. }
  3445. EVUTIL_ASSERT(count == base->event_count_active);
  3446. }