12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970(* Copyright (c) 2020, 2021, Guillaume Munch-Maccagnoni & INRIA
SPDX-License-Identifier: LGPL-3.0-only WITH LGPL-3.0-linking-exception
*)moduleT=Thread_map_coretypemask={mutableon:bool}letmask_tls:maskT.t=T.create ()(* whether the current thread is masked *)letcreate_mask()=letr={on=false}inT.setmask_tls(Somer);rletdelete_mask()=T.setmask_tlsNoneletis_blocked()=matchT.getmask_tlswith|None->false|Somer->r.onletassert_blocked()=assert(is_blocked())(* The current goal is only to protect from those asynchronous
exceptions raised after dutifully checking that [is_blocked ()]
evaluates to false, and that expect the asynchronous callback to be
called again shortly thereafter (e.g. memprof callbacks). There is
currently no mechanism to delay asynchronous callbacks, so this
strategy cannot work for other kinds of asynchronous callbacks. *)letwith_resource~acquirearg~scope~(release:_->unit)=letmask,delete_after=matchT.getmask_tls with|None->create_mask(),true|Somer->r,falseinletold_mask=mask.oninletremove_mask ()=(* remove the mask flag from the TLS to avoid it growing
uncontrollably when there are lots of threads. *)ifdelete_afterthendelete_mask()elsemask.on<-old_maskinletrelease_and_unmaskrx=matchreleaserwith|()->remove_mask ();x|exception e->remove_mask();raiseeinmask.on<-true;letr=tryacquireargwith|e->mask.on<-old_mask;raise einmatchmask.on<-old_mask;scoperwith|(* BEGINATOMIC *)y->(mask.on<-true;(* END ATOMIC *)release_and_unmaskry)|(* BEGINATOMIC *) exceptione->(mask.on<-true;(* END ATOMIC *)matchPrintexc.get_raw_backtrace()with|bt->(lete=release_and_unmaskreinPrintexc.raise_with_backtraceebt)|exceptionOut_of_memory->raise(release_and_unmaskre))