package tezt
Test framework for unit tests, integration tests, and regression tests
Install
Dune Dependency
Authors
Maintainers
Sources
tezt-4.2.0.tar.bz2
md5=7878acd788ae59f1a07d0392644f0fff
sha512=b9e8ce2576b0bc65870409380edf17b88656a985ceb9a438a84f479b51d6b30740acf7b035eccf7d122bf5227611bf15e888e607dcdbb1576b4383f12314dd49
doc/src/tezt.scheduler/scheduler.ml.html
Source file scheduler.ml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
(*****************************************************************************) (* *) (* SPDX-License-Identifier: MIT *) (* SPDX-FileCopyrightText: 2024 Nomadic Labs <contact@nomadic-labs.com> *) (* *) (*****************************************************************************) module Float_map = Map.Make (Float) let show_signal n = if n = Sys.sigabrt then "SIGABRT" else if n = Sys.sigalrm then "SIGALRM" else if n = Sys.sigfpe then "SIGFPE" else if n = Sys.sighup then "SIGHUP" else if n = Sys.sigill then "SIGILL" else if n = Sys.sigint then "SIGINT" else if n = Sys.sigkill then "SIGKILL" else if n = Sys.sigpipe then "SIGPIPE" else if n = Sys.sigquit then "SIGQUIT" else if n = Sys.sigsegv then "SIGSEGV" else if n = Sys.sigterm then "SIGTERM" else if n = Sys.sigusr1 then "SIGUSR1" else if n = Sys.sigusr2 then "SIGUSR2" else if n = Sys.sigchld then "SIGCHLD" else if n = Sys.sigcont then "SIGCONT" else if n = Sys.sigstop then "SIGSTOP" else if n = Sys.sigtstp then "SIGTSTP" else if n = Sys.sigttin then "SIGTTIN" else if n = Sys.sigttou then "SIGTTOU" else if n = Sys.sigvtalrm then "SIGVTALRM" else if n = Sys.sigprof then "SIGPROF" else if n = Sys.sigbus then "SIGBUS" else if n = Sys.sigpoll then "SIGPOLL" else if n = Sys.sigsys then "SIGSYS" else if n = Sys.sigtrap then "SIGTRAP" else if n = Sys.sigurg then "SIGURG" else if n = Sys.sigxcpu then "SIGXCPU" else if n = Sys.sigxfsz then "SIGXFSZ" else "unknown signal (" ^ string_of_int n ^ ")" let show_process_status (status : Unix.process_status) = match status with | WEXITED n -> Printf.sprintf "exited with code %d" n | WSIGNALED n -> Printf.sprintf "was killed by %s" (show_signal n) | WSTOPPED n -> Printf.sprintf "was stopped by %s" (show_signal n) (* Set to [true] by [stop], to [false] by [run], and used to decide whether to start new tasks and to send SIGTERM to current workers. *) let stopped = ref false (* We use file descriptor maps when looking at the result of [Unix.select]. We don't strictly need maps ([Unix.select] usually only returns one file descriptor) but we can't avoid comparing file descriptors. It happens that [Stdlib.compare] works on file descriptors. *) module FD = struct type t = Unix.file_descr let compare = (Stdlib.compare : t -> t -> int) end module FD_map = struct include Map.Make (FD) let of_list l = List.fold_left (fun acc (k, v) -> add k v acc) empty l end (* [Unix.close] usually does not fail except with EBADF if called on a file descriptor that was already closed. But other than that there is not much we can do and it is often better to prioritize other errors from other functions. *) let try_close file_descriptor = try Unix.close file_descriptor with Unix.Unix_error _ -> () (* Part of the [Message] module that does not depend on types defined in [Worker]. The [Worker] module itself depends on [Message] so we have to split it. *) module Message_not_depending_on_worker = struct type value = | Unit | Bool of bool | Char of char | Int of int | Int32 of int32 | Int64 of int64 | Float of float | String of string | Block of value array | Closure of (unit -> unit) let rec output_value out = function | Unit -> out "()" | Bool b -> out (string_of_bool b) | Char c -> out (Printf.sprintf "%C" c) | Int i -> out (string_of_int i) | Int32 i -> out (Int32.to_string i) | Int64 i -> out (Int64.to_string i) | Float f -> out (string_of_float f) | String s -> out (Printf.sprintf "%S" s) | Block [||] -> out "[]" | Block a -> out "[ " ; output_value out a.(0) ; for i = 1 to Array.length a - 1 do out "; " ; output_value out a.(i) done ; out " ]" | Closure _ -> out "<fun>" let show_value value = let buffer = Buffer.create 128 in output_value (Buffer.add_string buffer) value ; Buffer.contents buffer type 'a typ = {encode : 'a -> value; decode : value -> 'a} let encode x = x.encode let decode x = x.decode exception Failed_to_decode of value * string let () = Printexc.register_printer @@ function | Failed_to_decode (value, type_name) -> Some ("failed to decode " ^ show_value value ^ " as " ^ type_name) | _ -> None let typ ~encode ~decode = {encode; decode} let unit = { encode = (fun () -> Unit); decode = (function Unit -> () | v -> raise (Failed_to_decode (v, "unit"))); } let bool = { encode = (fun x -> Bool x); decode = (function Bool x -> x | v -> raise (Failed_to_decode (v, "bool"))); } let char = { encode = (fun x -> Char x); decode = (function Char x -> x | v -> raise (Failed_to_decode (v, "char"))); } let int = { encode = (fun x -> Int x); decode = (function Int x -> x | v -> raise (Failed_to_decode (v, "int"))); } let int32 = { encode = (fun x -> Int32 x); decode = (function Int32 x -> x | v -> raise (Failed_to_decode (v, "int32"))); } let int64 = { encode = (fun x -> Int64 x); decode = (function Int64 x -> x | v -> raise (Failed_to_decode (v, "int64"))); } let float = { encode = (fun x -> Float x); decode = (function Float x -> x | v -> raise (Failed_to_decode (v, "float"))); } let string = { encode = (fun x -> String x); decode = (function String x -> x | v -> raise (Failed_to_decode (v, "string"))); } let closure = { encode = (fun x -> Closure x); decode = (function | Closure x -> x | v -> raise (Failed_to_decode (v, "closure"))); } type 'a tag = {id : int; typ : 'a typ} let next_tag_id = ref 0 let tag_names : (int, string) Hashtbl.t = Hashtbl.create 16 let register typ name = let id = !next_tag_id in incr next_tag_id ; let tag = {id; typ} in Hashtbl.replace tag_names id name ; tag type t = {tag_id : int; value : value} let make tag value = {tag_id = tag.id; value = tag.typ.encode value} let output out {tag_id; value} = (match Hashtbl.find_opt tag_names tag_id with | None -> out "#" ; out (string_of_int tag_id) | Some name -> out name) ; match value with | Unit -> () | _ -> out " " ; output_value out value let show message = let buffer = Buffer.create 128 in output (Buffer.add_string buffer) message ; Buffer.contents buffer let match_with {tag_id; value} tag handler ~default = if tag_id = tag.id then handler (tag.typ.decode value) else default () type _ case = Case : 'a tag * ('a -> 'b) -> 'b case let case tag handler = Case (tag, handler) (* We could have a two-step function with a first step that builds a table from tag id to handler, and a second step that uses it. The result of the first step could then be used to match multiple messages in O(log n) instead of O(n). But this is a bit cumbersome and probably overkill. *) let match_with_list {tag_id; value} (type a) (cases : a case list) ~(default : unit -> a) = let rec find_case = function | [] -> default () | Case (tag, handler) :: tail -> if tag.id = tag_id then handler (tag.typ.decode value) else find_case tail in find_case cases (* Alias to be able to refer to the message type in [Reader] and [Writer], which define their own [t] type. *) type message = t (* Reading messages serialized using [Marshal] from a file descriptor. *) module Reader = struct (* [bytes] is a buffer. It is mutable so that it can grow if needed. [position] is the position in [bytes] of the next unused byte. It is between 0 and [Bytes.length bytes]. [expected] is the expected number of [bytes] for the message currently being read. It is [None] until the marshal header has been read. *) type t = { file_descriptor : Unix.file_descr; mutable end_of_file : bool; mutable bytes : bytes; mutable position : int; mutable expected : int option; messages : message Queue.t; } let create file_descriptor = { file_descriptor; end_of_file = false; bytes = Bytes.create (max 512 Marshal.header_size); position = 0; expected = None; messages = Queue.create (); } (* Grow [reader.bytes] so that its size is at least [target_length]. *) let grow reader target_length = let current_length = Bytes.length reader.bytes in if current_length < target_length then ( let new_bytes = Bytes.create (max target_length (current_length * 2)) in Bytes.blit reader.bytes 0 new_bytes 0 current_length ; reader.bytes <- new_bytes) exception Partial_message (* Read once from the file descriptor and update the current state accordingly. To be called when the file descriptor is ready for reading. Can raise [Unix.Unix_error], or [Failure] if unmarshaling fails. *) let read_non_blocking ~raise_partial_message reader = if not reader.end_of_file then ( (* [reader.bytes] must have room for at least one byte. Otherwise [Unix.read] could return 0 without meaning end of file. *) grow reader (reader.position + 1) ; (* Read at least one byte into [reader.bytes] (or detect end of file). *) match Unix.read reader.file_descriptor reader.bytes reader.position (Bytes.length reader.bytes - reader.position) with | exception Unix.Unix_error ((EAGAIN | EWOULDBLOCK | EINTR), _, _) -> () | read_length -> if read_length = 0 then ( reader.end_of_file <- true ; if raise_partial_message && reader.position > 0 then raise Partial_message) else ( reader.position <- reader.position + read_length ; let rec decode_messages () = (* Check if we have a full marshal header already. *) (match reader.expected with | None -> if reader.position >= Marshal.header_size then ( let total_size = Marshal.total_size reader.bytes 0 in reader.expected <- Some total_size ; grow reader total_size) | Some _ -> ()) ; match reader.expected with | None -> () | Some expected -> (* Check if we have the full marshaled value already. *) if reader.position >= expected then ( (* Decode the message and add it to the queue. *) let message : message = Marshal.from_bytes reader.bytes 0 in Queue.add message reader.messages ; (* Remove the message bytes. *) let remainder_length = reader.position - expected in Bytes.blit reader.bytes expected reader.bytes 0 remainder_length ; reader.position <- remainder_length ; reader.expected <- None ; (* Maybe we received several messages at once. *) decode_messages ()) in decode_messages ())) (* Read at least one message and return it. Can raise [Unix.Unix_error]. *) let read_blocking ?timeout reader = let deadline = match timeout with | None -> None | Some timeout -> Some (Unix.gettimeofday () +. timeout) in let rec loop () = match Queue.take_opt reader.messages with | Some message -> Some message | None -> ( if reader.end_of_file then None else let timeout = match deadline with | None -> -1. | Some deadline -> max 0. (deadline -. Unix.gettimeofday ()) in match Unix.select [reader.file_descriptor] [] [] timeout with | exception Unix.Unix_error (EINTR, _, _) -> (* Interrupted by a signal, try again. *) loop () | [], _, _ -> None | _ :: _, _, _ -> read_non_blocking ~raise_partial_message:true reader ; loop ()) in loop () let iter_and_clear reader f = Queue.iter f reader.messages ; Queue.clear reader.messages end (* Writing messages serialized using [Marshal] to a file descriptor. *) module Writer = struct (* [broken] is [true] if the other side closed the pipe. [message] is the current message being written. [position] is the position of the next byte to write in [message]. [messages] are the next messages to write. They are never empty. *) type t = { file_descriptor : Unix.file_descr; mutable broken : bool; mutable message : bytes; mutable position : int; messages : bytes Queue.t; } let create file_descriptor = { file_descriptor; broken = false; message = Bytes.empty; position = 0; messages = Queue.create (); } (* Perform a single write. To be called when the file descriptor is ready for writing. Can raise [Unix.Unix_error], or [Failure] if unmarshaling fails. *) let write_non_blocking writer = if not writer.broken then ( (* If we already wrote [writer.message], pop a new message. *) (if writer.position >= Bytes.length writer.message then match Queue.take_opt writer.messages with | None -> () | Some message -> writer.message <- message ; writer.position <- 0) ; (* If there is something to write, write it. *) if writer.position < Bytes.length writer.message then ( let write_length = try (* Use [single_write] instead of [write] because we don't want to block, and [Unix.select] only guarantees that a file descriptor is ready for one write. *) Unix.single_write writer.file_descriptor writer.message writer.position (Bytes.length writer.message - writer.position) with | Unix.Unix_error ((EAGAIN | EWOULDBLOCK | EINTR), _, _) -> 0 | Unix.Unix_error (EPIPE, _, _) -> (* Broken pipe: the other side closed the pipe exit. We won't be able to send anything. *) writer.broken <- true ; writer.message <- Bytes.empty ; writer.position <- 0 ; Queue.clear writer.messages ; 0 in writer.position <- writer.position + write_length ; (* If the current message was fully wrote, remove it to avoid using memory. *) if writer.position >= Bytes.length writer.message then ( writer.message <- Bytes.empty ; writer.position <- 0))) let is_empty writer = writer.position >= Bytes.length writer.message && Queue.is_empty writer.messages let write_blocking writer = while not (is_empty writer) do (* [write_non_blocking] will actually block because we didn't set the file descriptor as non-blocking and we didn't wait for it to be ready with [Unix.select]. But we want it to block here. *) write_non_blocking writer done let push writer tag value = if not writer.broken then Queue.add (Marshal.to_bytes (make tag value : message) [Closures]) writer.messages let push_and_write_blocking writer tag value = push writer tag value ; write_blocking writer end end (* [task_queue] depends on type [task], which depends on type [scheduler_context], which is defined in the [Worker] module, which wants to be able to clear [task_queue]. To fix this cyclic dependency problem we define [clear_task_queue] and will set it later to [Queue.clear task_queue]. *) let clear_task_queue = ref (fun () -> ()) module Timer = struct type t = {deadline : float; handler : unit -> unit; mutable canceled : bool} (* We use a map to store active timers. The keys of this map are the [deadline]s. This allows to easily pick the next timer to wait for. There may be multiple timers associated to the same time. *) let map : t list Float_map.t ref = ref Float_map.empty let on_delay delay handler = let deadline = Unix.gettimeofday () +. delay in let timer = {deadline; handler; canceled = false} in let previous_timers = Float_map.find_opt deadline !map |> Option.value ~default:[] in map := Float_map.add deadline (timer :: previous_timers) !map ; timer let cancel timer = (* The timer will be removed from the [map] by [next]. *) timer.canceled <- true let cancel_all () = map := Float_map.empty let trigger timer = if not timer.canceled then ( timer.canceled <- true ; timer.handler ()) let rec next () = match Float_map.min_binding_opt !map with | None -> None | Some (deadline, timers) -> ( let existed_canceled = ref false in let timers = Fun.flip List.filter timers @@ fun timer -> if timer.canceled then ( existed_canceled := true ; false) else true in match timers with | [] -> map := Float_map.remove deadline !map ; next () | timer :: _ -> if !existed_canceled then map := Float_map.add deadline timers !map ; Some timer) end (* The [Worker] module is responsible for spawning workers, running workers, and defining the state of a worker from the point of view of the worker itself and from the scheduler. *) module Worker = struct module Message = Message_not_depending_on_worker type current_task = { started_at : float; sigterm : int; term_timeout : float option; kill_timeout : float option; on_term_timeout : unit -> unit; on_kill_timeout : unit -> unit; on_message : Message.t -> unit; } (* State of a live worker from the point of view of the scheduler. [sent_sigterm] contains the time when SIGTERM was sent, if it was sent. [sent_msg_stop] contains the time when [msg_stop] was sent, if it was sent. *) type parent_state_alive = { pid : int; pipe_to_worker_entrance : Message.Writer.t; pipe_from_worker_exit : Message.Reader.t; mutable sent_sigterm : float option; mutable sent_sigkill : bool; mutable sent_msg_stop : float option; mutable current_task : current_task option; } (* State of a dead worker from the point of view of the scheduler. There may be some last words to read from the worker. *) type parent_state_dead = {pipe_from_worker_exit : Message.Reader.t} (* [Burried] is [Dead] + we closed [pipe_from_worker_exit]: there is nothing left. *) type status = | Alive of parent_state_alive | Dead of parent_state_dead | Burried (* State of a worker, alive or dead, from the point of view of the scheduler. *) type parent_state = {mutable status : status} (* State of a worker from the point of view of the worker itself. *) type child_state = { pipe_to_worker_exit : Message.Reader.t; pipe_from_worker_entrance : Message.Writer.t; } let msg_execute : (unit -> unit) Message.tag = Message.(register closure) "Execute" let msg_raised : string Message.tag = Message.(register string) "Raised" let msg_stop : unit Message.tag = Message.(register unit) "Stop" (* [current_child_state] stores the state of the current child, if the current process is a child (i.e. a worker). It is set by [main_loop] before a task is run. There are two motivations for this: - (1) make it easier to define the [execute] wrapper in [add_task]; - (2) make it easier for users to define functions that send messages from workers without having to pass a worker context around. To expand on (1), the [execute] wrapper needs the [child_state] to send the task result message in particular. But the [child_state] is only defined once the pipes are open, i.e. after the worker is created. The [execute] wrapper is defined before that. To expand on (2), an example is log functions. If log functions want to send the log message to the scheduler, they need a worker context (i.e. a [child_state]). Passing a worker context to all log functions can be inconvenient. With the global reference, the log function can call [get_current_worker_context] instead. Global references are usually frown upon, but here it feels like the pragmatic choice. *) let current_child_state : child_state option ref = ref None (* Run the main loop of the child process: receive tasks, execute them, send the result, repeat. *) let rec main_loop ~idle_timeout (state : child_state) = match Message.Reader.read_blocking ?timeout:idle_timeout state.pipe_to_worker_exit with | (exception Message.Reader.Partial_message) | None -> (* This means the scheduler closed the pipe. This only happens on purpose in [check_whether_workers_exited], if the worker exited (in which case this line of code cannot be reached this the worker is no longer running). Otherwise, it can happen if the system closed the pipe or killed the scheduler without killing the worker. In that case there isn't much we can do besides print an error and exit with a non-zero exit code, which is what [failwith] does. The exception is not handled by the worker, but if the scheduler is alive, it will detect the non-zero exit code. *) failwith "internal worker error: end of file or timeout while reading next \ task" | Some message -> Message.match_with_list message [ ( Message.case msg_execute @@ fun task_function -> current_child_state := Some state ; match task_function () with | exception exn -> Message.Writer.push_and_write_blocking state.pipe_from_worker_entrance msg_raised (Printexc.to_string exn) | () -> main_loop ~idle_timeout state ); Message.case msg_stop Fun.id; ] ~default:(fun () -> (* Maybe a message intended for a previous task? *) (main_loop [@tailcall]) ~idle_timeout state) (* Fork a new worker. Return the worker state to the parent, and start the worker in the child. Can raise [Unix.Unix_error]. *) let spawn ~idle_timeout fork = let pipe_to_worker_exit, pipe_to_worker_entrance = Unix.pipe ~cloexec:true () in let pipe_from_worker_exit, pipe_from_worker_entrance = Unix.pipe ~cloexec:true () in let pid = fork () in if pid <> 0 then ( (* This is the parent process. *) try_close pipe_to_worker_exit ; try_close pipe_from_worker_entrance ; { status = Alive { pid; pipe_to_worker_entrance = Message.Writer.create pipe_to_worker_entrance; pipe_from_worker_exit = Message.Reader.create pipe_from_worker_exit; sent_sigterm = None; sent_sigkill = false; sent_msg_stop = None; current_task = None; }; }) else ( (* This is the child process. *) try_close pipe_to_worker_entrance ; try_close pipe_from_worker_exit ; Fun.protect ~finally:(fun () -> try_close pipe_to_worker_exit ; try_close pipe_to_worker_entrance) @@ fun () -> (* Clear global state in case the worker wants to run a scheduler too. And also to free memory maybe. *) !clear_task_queue () ; Timer.cancel_all () ; main_loop ~idle_timeout { pipe_to_worker_exit = Message.Reader.create pipe_to_worker_exit; pipe_from_worker_entrance = Message.Writer.create pipe_from_worker_entrance; } ; exit 0) let set_as_idle (worker : parent_state) = match worker.status with | Dead _ | Burried -> (* Already quite idle. *) () | Alive alive_worker -> alive_worker.current_task <- None end type worker_context = Worker.child_state type scheduler_context = Worker.parent_state (* Now we can add the functions that depend on the [Worker] module to obtain the final [Message] module, visible in the [.mli]. *) module Message = struct include Message_not_depending_on_worker let send_to_worker (worker : scheduler_context) tag value = match worker.status with | Dead _ | Burried -> (* Act as if we sent the message, but it will never be received. *) () | Alive {pipe_to_worker_entrance; _} -> Writer.push pipe_to_worker_entrance tag value let send_to_scheduler (worker_context : worker_context) message = Writer.push_and_write_blocking worker_context.pipe_from_worker_entrance message let receive_from_scheduler (worker_context : worker_context) = try Reader.read_blocking worker_context.pipe_to_worker_exit with Reader.Partial_message -> failwith "Scheduler.Message.receive_from_scheduler: end of file after partial \ message" let receive_from_scheduler_with_timeout (worker_context : worker_context) timeout = try Reader.read_blocking ~timeout worker_context.pipe_to_worker_exit with Reader.Partial_message -> failwith "Scheduler.Message.receive_from_scheduler_with_timeout: end of file \ after partial message" end type task = { sigterm : int; term_timeout : float option; kill_timeout : float option; on_term_timeout : unit -> unit; on_kill_timeout : unit -> unit; on_start : scheduler_context -> unit; on_message : scheduler_context -> Message.t -> unit; execute : unit -> unit; } let task_queue : task Queue.t = Queue.create () let () = clear_task_queue := fun () -> Queue.clear task_queue let add_task (type a) ?(sigterm = Sys.sigterm) ?term_timeout ?kill_timeout ?(on_term_timeout = fun () -> ()) ?(on_kill_timeout = fun () -> ()) ?(on_start = fun _ -> ()) ?(on_message = fun _ _ -> ()) ?(on_finish = fun (_ : (a, string) result) -> ()) (typ : a Message.typ) (execute : worker_context -> a) = (* Generate a new message tag for the particular result type of this task. *) let msg_completed = Message.register typ "Completed" in (* Modify [execute] to send a message with this tag in case of success. *) let execute () = match !Worker.current_child_state with | None -> failwith "internal worker error: no current child state" | Some worker_context -> let result = execute worker_context in Message.send_to_scheduler worker_context msg_completed result in (* Modify [on_message] to trigger [on_finish] when receiving the [Completed] or [Raised] messages. *) let on_message (worker : scheduler_context) (message : Message.t) = Message.match_with_list message [ ( Message.case msg_completed @@ fun result -> Worker.set_as_idle worker ; on_finish (Ok result) ); ( Message.case Worker.msg_raised @@ fun error_message -> Worker.set_as_idle worker ; on_finish (Error error_message) ); ] ~default:(fun () -> on_message worker message) in let task = { sigterm; term_timeout; kill_timeout; on_term_timeout; on_kill_timeout; on_start; on_message; execute; } in Queue.add task task_queue (* Information used by the main loop of the scheduler. [sigchld_pipe_exit] is a file descriptor that we set up to become readable after a child process exits. We cannot rely on [Unix.select] raising [EINTR] when a child exits because the child could exit just before the call to [Unix.select], at which point [Unix.select] could wait until its timeout, which could be forever. So we have [Unix.select] wait for [sigchld_pipe_exit] to become readable as well. *) type t = { fork : unit -> int; worker_idle_timeout : float option; worker_kill_timeout : float option; on_worker_kill_timeout : unit -> unit; on_empty_queue : unit -> unit; on_message : Message.t -> unit; on_unexpected_worker_exit : Unix.process_status -> unit; sigchld_pipe_exit : Unix.file_descr; mutable workers : Worker.parent_state option array; } (* Buffer used to write to and read from the SIGCHLD pipe (see comment of type [t] above). We don't really care what we write to this pipe, we just need [Unix.select] to detect that something has been written. *) let dummy_bytes = Bytes.make 64 'a' (* [Term] and [Kill] take the following arguments: - the time when the signal should be sent; - an event handler ([on_term_timeout] or [on_kill_timeout]) to trigger when the signal is sent. [Term] takes an additional argument: - the signal itself, since users can configure different signals. *) type deadline = | No_deadline | Term of float * (unit -> unit) * int | Kill of float * (unit -> unit) (* Compute the next deadline for a given worker. A deadline is a date after which an action must be performed. This action can be: send SIGTERM, or send SIGTERM; and usually there is a corresponding event to trigger ([on_term_timeout] or [on_kill_timeout]). Depending on whether we already sent SIGTERM and/or SIGKILL and/or [msg_stop], and depending on whether the worker is currently running a task, the deadline is different. This function makes this decision. This function is used in two places: - to decide which timeout to pass to [Unix.select] (in [prepare_select]); - to check whether some workers are passed their deadline (in [check_timeouts]). *) let get_next_deadline scheduler now (worker : Worker.parent_state_alive) = match worker with | {current_task = None; sent_msg_stop = None; _} -> (* Worker is waiting for more tasks. *) No_deadline | {current_task = None; sent_msg_stop = Some msg_stop_sent_at; _} -> ( (* We told the worker to stop using [msg_stop] in [maybe_tell_workers_to_stop]. In particular it means the worker is not running a task: it is waiting for its next task in its [main_loop]. This [main_loop] is supposed to stop on its own when it receives [msg_stop]. There is thus no point in sending SIGTERM. We still want to send SIGKILL if the worker is stuck though. *) match scheduler.worker_kill_timeout with | None -> No_deadline | Some worker_kill_timeout -> Kill ( msg_stop_sent_at +. worker_kill_timeout, scheduler.on_worker_kill_timeout )) | {sent_sigkill = true; _} -> (* No timeout to act on if we already sent SIGKILL. *) No_deadline | { sent_sigterm = None; sent_sigkill = false; current_task = Some {sigterm; _}; _; } when !stopped -> (* When stopped, we consider that all tasks have reached their SIGTERM timeout. Except that we don't trigger [on_term_timeout]. *) Term (now, (fun () -> ()), sigterm) | {current_task = Some {term_timeout = None; kill_timeout = None; _}; _} -> (* No timeout for this task. *) No_deadline | {sent_sigterm = Some _; current_task = Some {kill_timeout = None; _}; _} -> (* Sent SIGTERM already and the current task does not want to be SIGKILLed. *) No_deadline | { sent_sigterm = Some sigterm_sent_at; sent_sigkill = false; current_task = Some {kill_timeout = Some kill_timeout; on_kill_timeout; _}; _; } -> (* Sent SIGTERM already, now wait for the time to send SIGKILL. *) Kill (sigterm_sent_at +. kill_timeout, on_kill_timeout) | { sent_sigterm = None; sent_sigkill = false; current_task = Some { started_at; sigterm; term_timeout = Some term_timeout; on_term_timeout; _; }; _; } -> (* Waiting for the time to send SIGTERM. *) Term (started_at +. term_timeout, on_term_timeout, sigterm) | { sent_sigterm = None; sent_sigkill = false; current_task = Some { started_at; term_timeout = None; kill_timeout = Some kill_timeout; on_kill_timeout; _; }; _; } -> (* No SIGTERM timeout for this task, waiting for the time to send SIGKILL. *) Kill (started_at +. kill_timeout, on_kill_timeout) let give_task_to_worker task (worker : Worker.parent_state) = match worker.status with | Dead _ | Burried -> failwith "internal scheduler error: tried to make a dead worker do something" | Alive alive -> Message.Writer.push alive.pipe_to_worker_entrance Worker.msg_execute task.execute ; alive.current_task <- Some { started_at = Unix.gettimeofday (); sigterm = task.sigterm; term_timeout = task.term_timeout; kill_timeout = task.kill_timeout; on_term_timeout = task.on_term_timeout; on_kill_timeout = task.on_kill_timeout; on_message = (fun message -> task.on_message worker message); } ; task.on_start worker exception Empty_queue let next_task scheduler = if !stopped then raise Empty_queue ; match Queue.take_opt task_queue with | Some task -> task | None -> ( scheduler.on_empty_queue () ; (* [on_empty_queue] could have pushed some tasks: check again. *) match Queue.take_opt task_queue with | Some task -> task | None -> raise Empty_queue) (* Start tasks until we cannot anymore, i.e. until the queue becomes empty or we reach the maximum worker count and no worker is idle. *) let start_some_tasks scheduler = try (* Try to reuse idle workers first. *) for i = 0 to Array.length scheduler.workers - 1 do match scheduler.workers.(i) with | Some ({ status = Alive { current_task = None; sent_sigterm = None; sent_sigkill = false; _; }; } as worker) -> let task = next_task scheduler in give_task_to_worker task worker | _ -> () done ; (* Spawn new workers if needed and if possible. *) for i = 0 to Array.length scheduler.workers - 1 do match scheduler.workers.(i) with | None -> let task = next_task scheduler in let worker = Worker.spawn ~idle_timeout:scheduler.worker_idle_timeout scheduler.fork in scheduler.workers.(i) <- Some worker ; give_task_to_worker task worker | _ -> () done with Empty_queue -> () (* This is supposed to be called after [start_some_tasks]. If no worker is running a task, it means [start_some_tasks] had no task to start. So we can assume the queue is empty and we tell workers to stop. It is possible that a worker has an [at_exit] handler that causes a message to be sent to the scheduler that itself causes some tasks to be added to the queue, in that case we'll just start new workers. *) let maybe_tell_workers_to_stop (workers : Worker.parent_state option array) = let not_running_a_task (worker : Worker.parent_state option) = match worker with | None -> true | Some worker -> ( match worker.status with | Alive {current_task = Some _; _} -> false | Alive {current_task = None; _} | Dead _ | Burried -> true) in let tell_worker_to_stop (worker : Worker.parent_state option) = match worker with | Some {status = Alive alive} -> if alive.sent_msg_stop = None then ( alive.sent_msg_stop <- Some (Unix.gettimeofday ()) ; Message.Writer.push alive.pipe_to_worker_entrance Worker.msg_stop ()) | None | Some {status = Dead _ | Burried} -> () in if Array.for_all not_running_a_task workers then Array.iter tell_worker_to_stop workers type select_parameters = { read : (Unix.file_descr * Worker.parent_state option) list; write : (Unix.file_descr * Worker.parent_state) list; timeout : float; } let prepare_select scheduler = let read = ref [] in let write = ref [] in let deadlines = ref [] in let now = Unix.gettimeofday () in (* List worker pipes and deadlines corresponding to their timeouts. *) let add_worker (worker : Worker.parent_state) = match worker.status with | Burried -> () | Dead {pipe_from_worker_exit} -> if not pipe_from_worker_exit.end_of_file then read := (pipe_from_worker_exit.file_descriptor, Some worker) :: !read | Alive alive -> ( if not alive.pipe_from_worker_exit.end_of_file then read := (alive.pipe_from_worker_exit.file_descriptor, Some worker) :: !read ; if not (Message.Writer.is_empty alive.pipe_to_worker_entrance) then write := (alive.pipe_to_worker_entrance.file_descriptor, worker) :: !write ; match get_next_deadline scheduler now alive with | No_deadline -> () | Term (deadline, _, _) | Kill (deadline, _) -> deadlines := deadline :: !deadlines) in Array.iter (Option.iter add_worker) scheduler.workers ; (* Read from [sigchld_pipe_exit] only if there are workers thought to be alive. *) if scheduler.workers |> Array.exists @@ fun worker -> match worker with | None | Some {Worker.status = Dead _ | Burried} -> false | Some {status = Alive _} -> true then read := (scheduler.sigchld_pipe_exit, None) :: !read ; (* Add a deadline for the next timer. *) (match Timer.next () with | None -> () | Some timer -> deadlines := timer.deadline :: !deadlines) ; (* Deduce the timeout from the various deadlines. *) let timeout = match !deadlines with | [] -> -1. | _ :: _ as deadlines -> let min_deadline = List.fold_left min max_float deadlines in max 0. (min_deadline -. now) in (* Check if there actually is something to wait for. *) if !read = [] && !write = [] && timeout < 0. then None else Some {read = !read; write = !write; timeout} type select_result = { readable_workers : Worker.parent_state list; writeable_workers : Worker.parent_state list; readable_sigchld_pipe_exit : bool; } let select {read; write; timeout} ~sigchld_pipe_exit = let readable, writeable = match Unix.select (List.map fst read) (List.map fst write) [] timeout with | exception Unix.Unix_error (EINTR, _, _) -> ([], []) | r, w, _ -> (r, w) in let read_map = FD_map.of_list read in let readable_sigchld_pipe_exit = ref false in let readable_workers = Fun.flip List.filter_map readable @@ fun file_descriptor -> match FD_map.find_opt file_descriptor read_map with | None | Some None -> if file_descriptor = sigchld_pipe_exit then readable_sigchld_pipe_exit := true ; None | Some x -> x in let write_map = FD_map.of_list write in let writeable_workers = Fun.flip List.filter_map writeable @@ fun file_descriptor -> FD_map.find_opt file_descriptor write_map in { readable_workers; writeable_workers; readable_sigchld_pipe_exit = !readable_sigchld_pipe_exit; } let read_from_readable_worker scheduler (worker : Worker.parent_state) = match worker.status with | Burried -> (* Not actually readable, that's weird. *) () | Dead {pipe_from_worker_exit} -> (* Don't fail if end of file with partial message, this would crash the scheduler. This probably means the worker crashed anyway so we'll see it from the exit code. *) Message.Reader.read_non_blocking ~raise_partial_message:false pipe_from_worker_exit ; Message.Reader.iter_and_clear pipe_from_worker_exit scheduler.on_message ; if pipe_from_worker_exit.end_of_file then ( try_close pipe_from_worker_exit.file_descriptor ; worker.status <- Burried) | Alive {pipe_from_worker_exit; current_task = None; _} -> Message.Reader.read_non_blocking ~raise_partial_message:false pipe_from_worker_exit ; Message.Reader.iter_and_clear pipe_from_worker_exit scheduler.on_message | Alive {pipe_from_worker_exit; current_task = Some {on_message; _}; _} -> Message.Reader.read_non_blocking ~raise_partial_message:false pipe_from_worker_exit ; Message.Reader.iter_and_clear pipe_from_worker_exit on_message let write_to_writeable_worker (worker : Worker.parent_state) = match worker.status with | Burried | Dead _ -> (* Not actually writeable, that's weird. *) () | Alive {pipe_to_worker_entrance; _} -> Message.Writer.write_non_blocking pipe_to_worker_entrance let read_from_sigchld_pipe_exit sigchld_pipe_exit = let (_ : int) = Unix.read sigchld_pipe_exit dummy_bytes 0 (Bytes.length dummy_bytes) in () let check_whether_workers_exited scheduler = Fun.flip Array.iter scheduler.workers @@ fun worker_opt -> Fun.flip Option.iter worker_opt @@ fun worker -> match worker.status with | Dead _ | Burried -> (* Yes, it exited. But we already know. *) () | Alive alive -> ( match Unix.waitpid [WNOHANG] alive.pid with | exception Unix.Unix_error ((EINTR | EAGAIN), _, _) -> (* Not supposed to happen with [WNOHANG] and a PID that is not a file descriptor but you never know. *) () | 0, _ -> (* No, it did not exit. *) () | _, exit_status -> ( (* Yes, it exited. Update the worker. *) try_close alive.pipe_to_worker_entrance.file_descriptor ; worker.status <- Dead {pipe_from_worker_exit = alive.pipe_from_worker_exit} ; (* Fail current task, if any. *) match alive.current_task with | None -> if exit_status <> WEXITED 0 || alive.sent_msg_stop = None then scheduler.on_unexpected_worker_exit exit_status | Some current_task -> current_task.on_message (Message.make Worker.msg_raised ("worker " ^ show_process_status exit_status)))) let bury_workers (workers : Worker.parent_state option array) = for i = 0 to Array.length workers - 1 do match workers.(i) with | None -> () | Some worker -> ( match worker.status with | Alive _ -> () | Burried -> workers.(i) <- None | Dead {pipe_from_worker_exit} -> if pipe_from_worker_exit.end_of_file then ( try_close pipe_from_worker_exit.file_descriptor ; worker.status <- Burried ; workers.(i) <- None)) done let check_timeouts now scheduler = Fun.flip Array.iter scheduler.workers @@ fun worker_opt -> Fun.flip Option.iter worker_opt @@ fun worker -> match worker.status with | Dead _ | Burried -> () | Alive alive -> ( match get_next_deadline scheduler now alive with | No_deadline -> () | Term (deadline, on_term_timeout, signal) -> if now >= deadline && alive.sent_sigterm = None then ( alive.sent_sigterm <- Some now ; on_term_timeout () ; Unix.kill alive.pid signal) | Kill (deadline, on_kill_timeout) -> if now >= deadline && not alive.sent_sigkill then ( alive.sent_sigkill <- true ; on_kill_timeout () ; Unix.kill alive.pid Sys.sigkill)) let rec check_timers now = match Timer.next () with | None -> () | Some timer -> if now >= timer.deadline then ( Timer.trigger timer ; check_timers now) let rec main_loop scheduler = start_some_tasks scheduler ; maybe_tell_workers_to_stop scheduler.workers ; match prepare_select scheduler with | None -> (* Nothing can happen: - no pipe to read from; - no pipe to write to; - no live worker to wait to exit; - no timer. So we're done. *) () | Some select_parameters -> let sigchld_pipe_exit = scheduler.sigchld_pipe_exit in let {readable_workers; writeable_workers; readable_sigchld_pipe_exit} = select select_parameters ~sigchld_pipe_exit in List.iter (read_from_readable_worker scheduler) readable_workers ; List.iter write_to_writeable_worker writeable_workers ; if readable_sigchld_pipe_exit then read_from_sigchld_pipe_exit sigchld_pipe_exit ; check_whether_workers_exited scheduler ; bury_workers scheduler.workers ; let now = Unix.gettimeofday () in check_timeouts now scheduler ; check_timers now ; main_loop scheduler let run ?worker_idle_timeout ?worker_kill_timeout ?(on_worker_kill_timeout = fun () -> ()) ?(on_empty_queue = fun () -> ()) ?(on_message = fun _ -> ()) ?(on_unexpected_worker_exit = fun _ -> ()) ~fork max_worker_count = (* Set up a pipe that will become readable when a worker terminates. *) let sigchld_pipe_exit, sigchld_pipe_entrance = Unix.pipe () in Fun.protect ~finally:(fun () -> try_close sigchld_pipe_exit ; try_close sigchld_pipe_entrance) @@ fun () -> let handle_sigchld (_ : int) = (* This could in theory block. But writing 1 byte, that will likely be consumed quickly, should be fine. *) let (_ : int) = Unix.write sigchld_pipe_entrance dummy_bytes 0 1 in () in let old_sigchld_behavior = Sys.(signal sigchld) (Signal_handle handle_sigchld) in Fun.protect ~finally:(fun () -> Sys.(set_signal sigchld) old_sigchld_behavior) @@ fun () -> stopped := false ; main_loop { fork; worker_idle_timeout; worker_kill_timeout; on_worker_kill_timeout; on_empty_queue; on_message; on_unexpected_worker_exit; sigchld_pipe_exit; workers = Array.make (max 1 max_worker_count) None; } let stop () = stopped := true let clear () = Queue.clear task_queue let get_current_worker_context () = !Worker.current_child_state
sectionYPositions = computeSectionYPositions($el), 10)"
x-init="setTimeout(() => sectionYPositions = computeSectionYPositions($el), 10)"
>