ROSS
tw-sched.c
Go to the documentation of this file.
1#include <ross.h>
2#include <stdbool.h>
3#include <stdlib.h>
4#include <string.h>
5#include <assert.h>
6
7/**
8 * \brief Reset the event bitfield prior to entering the event handler
9 * post-reverse - reset the bitfield so that a potential re-running of the
10 * event is presented with a consistent bitfield state
11 * NOTE: the size checks are to better support the experimental reverse
12 * computation compiler, which can use a larger bitfield.
13 * Courtesy of John P Jenkins
14 */
15static inline void reset_bitfields(tw_event *revent)
16{
17 memset(&revent->cv, 0, sizeof(revent->cv));
18}
19
20// To be used instead of littering the file with ifdef's all over. If this grows
21// far too large, there might be a need to rethink how to implement the
22// tie-breaker mechanism so that it can be deactivated
23#ifdef USE_RAND_TIEBREAKER
24#define PQ_MINUMUM(pe) tw_pq_minimum_sig_ptr(pe->pq)->recv_ts
25#define CMP_KP_TO_EVENT_TIME(kp, e) tw_event_sig_compare_ptr(&kp->last_sig, &e->sig)
26#define CMP_GVT_HOOK_TO_NEXT_IN_QUEUE(trigger, pe) tw_event_sig_compare_ptr(&trigger.sig_at, tw_pq_minimum_sig_ptr(pe->pq))
27#define TRIGGER_ROLLBACK_TO_EVENT_TIME(kp, e) tw_kp_rollback_to_sig(kp, &e->sig)
28#define STIME_FROM_PE(pe) TW_STIME_DBL(pe->GVT_sig.recv_ts)
29#define STIME_FROM_KP(kp) TW_STIME_DBL(kp->last_sig.recv_ts)
30#else
31#define PQ_MINUMUM(pe) tw_pq_minimum(pe->pq)
32#define CMP_KP_TO_EVENT_TIME(kp, e) TW_STIME_CMP(kp->last_time, e->recv_ts)
33#define CMP_GVT_HOOK_TO_NEXT_IN_QUEUE(trigger, pe) (trigger.at - tw_pq_minimum(pe->pq))
34#define TRIGGER_ROLLBACK_TO_EVENT_TIME(kp, e) tw_kp_rollback_to(kp, e->recv_ts);
35#define STIME_FROM_PE(pe) TW_STIME_DBL(pe->GVT)
36#define STIME_FROM_KP(kp) TW_STIME_DBL(kp->last_time)
37#endif
38
39/**
40 * Get all events out of my event queue and spin them out into
41 * the priority queue so they can be processed in time stamp
42 * order.
43 */
44static void tw_sched_event_q(tw_pe * me) {
45 tw_clock start;
46 tw_kp *dest_kp;
47 tw_event *cev;
48 tw_event *nev;
49
50 while (me->event_q.size) {
51 cev = tw_eventq_pop_list(&me->event_q);
52
53 for (; cev; cev = nev) {
54 nev = cev->next;
55
56 if(!cev->state.owner || cev->state.owner == TW_pe_free_q) {
57 tw_error(TW_LOC, "no owner!");
58 }
59 if (cev->state.cancel_q) {
61 cev->next = cev->prev = NULL;
62 continue;
63 }
64
65 switch (cev->state.owner) {
66 case TW_pe_event_q:
67 dest_kp = cev->dest_lp->kp;
68
69 if (CMP_KP_TO_EVENT_TIME(dest_kp, cev) > 0) {
70 /* cev is a straggler message which has arrived
71 * after we processed events occuring after it.
72 * We need to jump back to before cev's timestamp.
73 */
74 start = tw_clock_read();
76 me->stats.s_rollback += tw_clock_read() - start;
78 st_collect_event_data(cev, (double)start / g_tw_clock_rate);
79 }
80 start = tw_clock_read();
81 tw_pq_enqueue(me->pq, cev);
82 me->stats.s_pq += tw_clock_read() - start;
83 break;
84
85 default:
86 tw_error(TW_LOC, "Event in event_q, but owner %d not recognized", cev->state.owner);
87 }
88 }
89 }
90}
91
92/**
93 * OPT: need to link events into canq in reverse order so
94 * that when we rollback the 1st event, we should not
95 * need to do any further rollbacks.
96 */
97static void tw_sched_cancel_q(tw_pe * me) {
98 tw_clock start=0, pq_start;
99 tw_event *cev;
100 tw_event *nev;
101
102 start = tw_clock_read();
103 while (me->cancel_q) {
104 cev = me->cancel_q;
105 me->cancel_q = NULL;
106
107 for (; cev; cev = nev) {
108 nev = cev->cancel_next;
109
110 if (!cev->state.cancel_q) {
111 tw_error(TW_LOC, "No cancel_q bit on event in cancel_q");
112 }
113
114 if(!cev->state.owner || cev->state.owner == TW_pe_free_q) {
115 tw_error(TW_LOC, "Cancelled event, no owner!");
116 }
117
118 switch (cev->state.owner) {
119 case TW_pe_event_q:
120 /* This event hasn't been added to our pq yet and we
121 * have not officially received it yet either. We'll
122 * do the actual free of this event when we receive it
123 * as we spin out the event_q chain.
124 */
125 tw_eventq_delete_any(&me->event_q, cev);
126
127 tw_event_free(me, cev);
128 break;
129
130 case TW_pe_anti_msg:
131 tw_event_free(me, cev);
132 break;
133
134 case TW_pe_pq:
135 /* Event was not cancelled directly from the event_q
136 * because the cancel message came after we popped it
137 * out of that queue but before we could process it.
138 */
139 pq_start = tw_clock_read();
140 tw_pq_delete_any(me->pq, cev);
141 me->stats.s_pq += tw_clock_read() - pq_start;
142 tw_event_free(me, cev);
143 break;
144
145 case TW_kp_pevent_q:
146 /* The event was already processed.
147 * SECONDARY ROLLBACK
148 */
150 tw_event_free(me, cev);
151 break;
152
153 default:
154 tw_error(TW_LOC, "Event in cancel_q, but owner %d not recognized", cev->state.owner);
155 }
156 }
157 }
158
159 me->stats.s_cancel_q += tw_clock_read() - start;
160}
161
162static void tw_sched_batch(tw_pe * me) {
163 /* Number of consecutive times we gave up because there were no free event buffers. */
164 static int no_free_event_buffers = 0;
165 static int warned_no_free_event_buffers = 0;
166 const int max_alloc_fail_count = 20;
167
168 tw_clock start, end, pq_start;
169 unsigned int msg_i;
170
171 /* Process g_tw_mblock events, or until the PQ is empty
172 * (whichever comes first).
173 */
174 for (msg_i = g_tw_mblock; msg_i; msg_i--) {
175 tw_event *cev;
176 tw_lp *clp;
177 tw_kp *ckp;
178
179 /* OUT OF FREE EVENT BUFFERS. BAD.
180 * Go do fossil collect immediately.
181 */
182 if (me->free_q.size <= g_tw_gvt_threshold) {
183 /* Suggested by Adam Crume */
184 if (++no_free_event_buffers > 10) {
185 if (!warned_no_free_event_buffers) {
186 fprintf(stderr, "WARNING: No free event buffers. Try increasing memory via the --extramem option.\n");
187 warned_no_free_event_buffers = 1;
188 }
189 if (no_free_event_buffers >= max_alloc_fail_count) {
190 tw_error(TW_LOC, "Event allocation failed %d consecutive times. Exiting.", max_alloc_fail_count);
191 }
192 }
194 break;
195 }
196 no_free_event_buffers = 0;
197
198 // Force GVT computation, if (local) virtual time is ahead of the triggering timestamp for the gvt hook
199 if (g_tw_gvt_hook) {
201 if (hook_type && CMP_GVT_HOOK_TO_NEXT_IN_QUEUE(g_tw_gvt_hook_trigger, me) <= 0) {
203 break;
204 }
205 }
206
207 start = tw_clock_read();
208 if (!(cev = tw_pq_dequeue(me->pq))) {
209 break;
210 }
211 me->stats.s_pq += tw_clock_read() - start;
212#ifndef USE_RAND_TIEBREAKER
213 // Note: I don't believe this captures all event ties
214 if(TW_STIME_CMP(cev->recv_ts, tw_pq_minimum(me->pq)) == 0) {
216 }
217#endif
218
219 clp = cev->dest_lp;
220
221 ckp = clp->kp;
222 me->cur_event = cev;
223#ifdef USE_RAND_TIEBREAKER
224 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
225#else
226 ckp->last_time = cev->recv_ts;
227#endif
228
229 /* Save state if no reverse computation is available */
230 if (!clp->type->revent) {
231 tw_error(TW_LOC, "Reverse Computation must be implemented!");
232 }
233
234 start = tw_clock_read();
235 reset_bitfields(cev);
236
237 // if NOT A SUSPENDED LP THEN FORWARD PROC EVENTS
238 if( !(clp->suspend_flag) )
239 {
240 // state-save and update the LP's critical path
241 unsigned int prev_cp = clp->critical_path;
243 (*clp->type->event)(clp->cur_state, &cev->cv,
244 tw_event_data(cev), clp);
247 cev->critical_path = prev_cp;
248 }
249 ckp->s_nevent_processed++;
250 // instrumentation
253 end = tw_clock_read();
254 clp->lp_stats->s_process_event += end - start;
255 me->stats.s_event_process += end - start;
256
257 /* We ran out of events while processing this event. We
258 * cannot continue without doing GVT and fossil collect.
259 */
260
261 if (me->cev_abort)
262 {
263 start = tw_clock_read();
264 me->stats.s_nevent_abort++;
265 // instrumentation
266 ckp->kp_stats->s_nevent_abort++;
267 clp->lp_stats->s_nevent_abort++;
268 me->cev_abort = 0;
269
271 pq_start = tw_clock_read();
272 tw_pq_enqueue(me->pq, cev);
273 me->stats.s_pq += tw_clock_read() - pq_start;
274
275 cev = tw_eventq_peek(&ckp->pevent_q);
276#ifdef USE_RAND_TIEBREAKER
277 if (cev) {
278 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
279 } else {
281 }
282#else
283 ckp->last_time = cev ? cev->recv_ts : me->GVT;
284#endif
285
287
288 me->stats.s_event_abort += tw_clock_read() - start;
289
290
291 break;
292 } // END ABORT CHECK
293
294 /* Thread current event into processed queue of kp */
296 tw_eventq_unshift(&ckp->pevent_q, cev);
297
298 if(g_st_rt_sampling &&
300 {
301 tw_clock current_rt = tw_clock_read();
302#ifdef USE_DAMARIS
304 {
305 if (g_st_damaris_enabled)
306 st_damaris_expose_data(me, me->GVT, RT_COL);
307 else
309 }
310#else
314 st_collect_model_data(me, ((double) current_rt) / g_tw_clock_rate, RT_STATS);
315#endif
317 }
318
319 }
320}
321
323 /* Number of consecutive times we gave up because there were no free event buffers. */
324 static int no_free_event_buffers = 0;
325 static int warned_no_free_event_buffers = 0;
326 const int max_alloc_fail_count = 20;
327
328 tw_clock start, pq_start;
329 unsigned int msg_i;
330
331 /* Process g_tw_mblock events, or until the PQ is empty
332 * (whichever comes first).
333 */
334 for (msg_i = g_tw_mblock; msg_i; msg_i--) {
335 tw_event *cev;
336 tw_lp *clp;
337 tw_kp *ckp;
338
339 /* OUT OF FREE EVENT BUFFERS. BAD.
340 * Go do fossil collect immediately.
341 */
342 if (me->free_q.size <= g_tw_gvt_threshold) {
343 /* Suggested by Adam Crume */
344 if (++no_free_event_buffers > 10) {
345 if (!warned_no_free_event_buffers) {
346 fprintf(stderr, "WARNING: No free event buffers. Try increasing memory via the --extramem option.\n");
347 warned_no_free_event_buffers = 1;
348 }
349 if (no_free_event_buffers >= max_alloc_fail_count) {
350 tw_error(TW_LOC, "Event allocation failed %d consecutive times. Exiting.", max_alloc_fail_count);
351 }
352 }
354 break;
355 }
356 no_free_event_buffers = 0;
357
358 // Force GVT computation, if (local) virtual time is ahead of the triggering timestamp for the gvt hook
359 if (g_tw_gvt_hook) {
361 if (hook_type && CMP_GVT_HOOK_TO_NEXT_IN_QUEUE(g_tw_gvt_hook_trigger, me) <= 0) {
363 break;
364 }
365 }
366
367 start = tw_clock_read();
368 if (!(cev = tw_pq_dequeue(me->pq))) {
369 break; // leave the batch function
370 }
371 me->stats.s_pq += tw_clock_read() - start;
372#ifndef USE_RAND_TIEBREAKER
373 // Note: I don't believe this captures all event ties
374 if(TW_STIME_CMP(cev->recv_ts, tw_pq_minimum(me->pq)) == 0) {
376 }
377#endif
378
379 clp = cev->dest_lp;
380
381 ckp = clp->kp;
382 me->cur_event = cev;
383#ifdef USE_RAND_TIEBREAKER
384 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
385#else
386 ckp->last_time = cev->recv_ts;
387#endif
388
389 /* Save state if no reverse computation is available */
390 if (!clp->type->revent) {
391 tw_error(TW_LOC, "Reverse Computation must be implemented!");
392 }
393
394 start = tw_clock_read();
395
396 reset_bitfields(cev);
397
398 // if NOT A SUSPENDED LP THEN FORWARD PROC EVENTS
399 if( !(clp->suspend_flag) )
400 {
401 // state-save and update the LP's critical path
402 unsigned int prev_cp = clp->critical_path;
404 (*clp->type->event)(clp->cur_state, &cev->cv,
405 tw_event_data(cev), clp);
408 cev->critical_path = prev_cp;
409 }
410 ckp->s_nevent_processed++;
411 // instrumentation
414 me->stats.s_event_process += tw_clock_read() - start;
415
416 /* We ran out of events while processing this event. We
417 * cannot continue without doing GVT and fossil collect.
418 */
419
420 if (me->cev_abort)
421 {
422 start = tw_clock_read();
423 me->stats.s_nevent_abort++;
424 // instrumentation
425 ckp->kp_stats->s_nevent_abort++;
426 clp->lp_stats->s_nevent_abort++;
427 me->cev_abort = 0;
428
430 pq_start = tw_clock_read();
431 tw_pq_enqueue(me->pq, cev);
432 me->stats.s_pq += tw_clock_read() - pq_start;
433
434 cev = tw_eventq_peek(&ckp->pevent_q);
435#ifdef USE_RAND_TIEBREAKER
436 if (cev) {
437 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
438 } else {
440 }
441#else
442 ckp->last_time = cev ? cev->recv_ts : me->GVT;
443#endif
444
446
447 me->stats.s_event_abort += tw_clock_read() - start;
448
449 break; // leave the batch function
450 } // END ABORT CHECK
451
452 /* Thread current event into processed queue of kp */
454 tw_eventq_unshift(&ckp->pevent_q, cev);
455
456 /* Check if realtime GVT time interval has expired */
458 {
460 break; // leave the batch function
461 }
462
463 if(g_st_rt_sampling &&
465 {
466 tw_clock current_rt = tw_clock_read();
470 st_collect_model_data(me, ((double)current_rt) / g_tw_clock_rate, RT_STATS);
471
473 }
474 }
475}
476
478 /* First Stage Init */
479 (*me->type.pre_lp_init)(me);
480 tw_init_kps(me);
481 tw_init_lps(me);
482 (*me->type.post_lp_init)(me);
483
485
486 /* Second Stage Init -- all LPs are created and have proper mappings */
487 tw_pre_run_lps(me);
489
490#ifdef USE_RIO
491 tw_clock start = tw_clock_read();
492 io_load_events(me);
493 me->stats.s_rio_load += (tw_clock_read() - start);
495#endif
496
497 /*
498 * Recv all of the startup events out of the network before
499 * starting simulation.. at this point, all LPs are done with init.
500 */
501 if (tw_nnodes() > 1) {
502 tw_net_read(me);
504 tw_clock_init(me);
505 }
506
507 /* This lets the signal handler know that we have started
508 * the scheduler loop, and to print out the stats before
509 * finishing if someone should type CTRL-c
510 */
512}
513
514// MPI barrier to check if any PE has a true value `val`. Returns true if anyone says "TRUE"
515static inline bool does_any_pe(bool val) {
516 bool global_val;
517 if(MPI_Allreduce(&val, &global_val, 1, MPI_C_BOOL, MPI_LOR, MPI_COMM_ROSS) != MPI_SUCCESS) {
518 tw_error(TW_LOC, "MPI_Allreduce for custom rollback and cleanup failed");
519 }
520 return global_val;
521}
522
523/**
524 * This function can be called by the GVT hook to guarantee that the state of
525 * all LPs has been backtracked to GVT and that all events to process are in the
526 * priority queue (pe->pq), i.e, all other queues are empty (cancel events and
527 * network events)
528 */
530#ifdef USE_RAND_TIEBREAKER
531 tw_event_sig const gvt_sig = pe->GVT_sig;
532 //tw_stime const gvt = gvt_sig.recv_ts;
533 // Backtracking the simulation to GVT
534 for (unsigned int i = 0; i < g_tw_nkp; i++) {
535 tw_kp_rollback_to_sig(g_tw_kp[i], &gvt_sig);
536 }
537 assert(tw_event_sig_compare_ptr(&pe->GVT_sig, &gvt_sig) == 0);
538#else
539 tw_stime const gvt = pe->GVT;
540 // Backtracking the simulation to GVT
541 for (unsigned int i = 0; i < g_tw_nkp; i++) {
542 tw_kp_rollback_to(g_tw_kp[i], gvt);
543 }
544 assert(pe->GVT == gvt);
545#endif
546
547 // Making sure that everything gets cleaned up properly
548 do {
549 if (tw_nnodes() > 1) {
550 double const start = tw_clock_read();
552 pe->stats.s_net_read += tw_clock_read() - start;
553 }
554
555 pe->gvt_status = 1;
559
560 //printf("PE %lu: Time stamp at the end of GVT time: %f - AVL-tree "
561 // "sized: %d\n", g_tw_mynode, gvt, pe->avl_tree_size);
562 } while (does_any_pe(pe->cancel_q != NULL) || does_any_pe(pe->event_q.size != 0));
563
564 //printf("PE %lu: All events rolledbacked and cancelled\n", g_tw_mynode);
565}
566
567static inline bool is_gvt_past_hook_threshold(tw_pe * me) {
568 // checking if the trigger has been activated on all PEs
569#ifdef USE_RAND_TIEBREAKER
570 bool const activate_trigger = tw_event_sig_compare_ptr(&me->GVT_sig, &g_tw_gvt_hook_trigger.sig_at) >= 0;
571#else
572 bool const activate_trigger = me->GVT >= g_tw_gvt_hook_trigger.at;
573#endif
574 bool global_triggered;
575 if(MPI_Allreduce(&activate_trigger, &global_triggered, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_ROSS) != MPI_SUCCESS) {
576 tw_error(TW_LOC, "MPI_Allreduce to check arbitrary function activation failed");
577 }
578 return global_triggered;
579}
580
581/**
582 * This function will determine if the GVT hook should be called, and if it does, it calls the hook
583 */
584static inline void tw_gvt_hook_step(tw_pe * me) {
585 if (g_tw_gvt_hook && g_tw_gvt_hook_trigger.status) {
586 bool has_hook_been_triggered = false;
587 switch (g_tw_gvt_hook_trigger.status) {
589 tw_error(TW_LOC, "This is weird. This should have never happened. This switch case is guarded by g_tw_gvt_hook_trigger.status != 0 (aka, != GVT_HOOK_STATUS_disabled). Panic.");
590 break;
592 has_hook_been_triggered = is_gvt_past_hook_threshold(me);
593 if (has_hook_been_triggered) {
595 }
596 break;
598 int const starting_at = g_tw_gvt_hook_trigger.every_n_gvt.starting_at;
599 int const every = g_tw_gvt_hook_trigger.every_n_gvt.nums;
600 has_hook_been_triggered = (g_tw_gvt_done - starting_at) % every == 0;
601 }
602 break;
604 bool const triggered_here = tw_event_sig_compare_ptr(&me->GVT_sig, &g_tw_gvt_hook_trigger.sig_at) > 0;
605 bool const triggered_somewhere = does_any_pe(triggered_here);
606 if (triggered_somewhere) {
607 // LP has triggered GVT hook
608 has_hook_been_triggered = true;
609 // reset g_tw_gvt_hook_trigger.sig_at
611 }
612 }
613 break;
614 }
615 if (has_hook_been_triggered) {
616#ifdef USE_RAND_TIEBREAKER
617 bool const past_end_time = me->GVT_sig.recv_ts >= g_tw_ts_end;
618 // resetting GVT to a time before the end of time (hopefully, it is not to a previous GVT time we have already considered)
619 if (past_end_time && g_tw_gvt_hook_trigger.status != GVT_HOOK_STATUS_timestamp) {
621 assert(tw_event_sig_compare_ptr(&me->GVT_sig, tw_pq_minimum_sig_ptr(me->pq)) <= 0);
622 }
623#else
624 bool const past_end_time = me->GVT >= g_tw_ts_end;
625 if (past_end_time && g_tw_gvt_hook_trigger.status != GVT_HOOK_STATUS_timestamp) {
626 me->GVT = g_tw_gvt_hook_trigger.at;
627 assert(me->GVT <= tw_pq_minimum(me->pq));
628 }
629#endif
630 g_tw_gvt_hook(me, past_end_time);
631 }
632 }
633}
634
635/**
636 * This function will determine if the GVT hook should be called, and if it does, it calls the hook. Sequential version
637 */
638static inline void tw_gvt_hook_step_seq(tw_pe * me) {
639 if (g_tw_gvt_hook && g_tw_gvt_hook_trigger.status) {
640 bool triggered = false;
641 switch (g_tw_gvt_hook_trigger.status) {
643 triggered = (CMP_GVT_HOOK_TO_NEXT_IN_QUEUE(g_tw_gvt_hook_trigger, me) <= 0 // the next event is ahead of our next function trigger
644 || tw_pq_get_size(me->pq) == 0); // we have no events to process
645 break;
648 break;
649 default: {
650 static bool first_seq_warning = true;
651 if (first_seq_warning) {
652 tw_warning(TW_LOC, "During sequential simulation the GVT hook cannot be triggered by other than: the timestamp trigger (set by calling `tw_trigger_gvt_hook_at`), or by an LP call (should call `tw_trigger_gvt_hook_now`). The GVT hook won't be called!");
653 first_seq_warning = false;
654 }
656 return;
657 }
658 break;
659 }
660 if (!triggered) {
661 return;
662 }
663
664#ifdef USE_RAND_TIEBREAKER
666 assert(tw_event_sig_compare_ptr(&me->GVT_sig, tw_pq_minimum_sig_ptr(me->pq)) <= 0);
667#else
668 me->GVT = g_tw_gvt_hook_trigger.at;
669 assert(me->GVT <= tw_pq_minimum(me->pq));
670#endif
671
672 switch (g_tw_gvt_hook_trigger.status) {
675 break;
678 break;
679 default:
680 tw_error(TW_LOC, "This is weird. This should have never happened. Only GVT_HOOK_STATUS_timestamp and GVT_HOOK_STATUS_model_call are allowed");
681 }
682 bool const past_end_time = TW_STIME_CMP(PQ_MINUMUM(me), g_tw_ts_end) > 0;
683 g_tw_gvt_hook(me, past_end_time);
684 }
685}
686
687/*************************************************************************/
688/* Primary Schedulers -- In order: Sequential, Conservative, Optimistic */
689/*************************************************************************/
690
692 tw_stime gvt = TW_STIME_CRT(0.0);
693
694 if(tw_nnodes() > 1) {
695 tw_error(TW_LOC, "Sequential Scheduler used for world size greater than 1.");
696 }
697
698 tw_event *cev;
699
700 printf("*** START SEQUENTIAL SIMULATION ***\n\n");
701
704
705 while (1) {
706 // Checking whether we have to call the GVT hook
708
709 // This is only needed in the case a GVT hook changes the timestamp of an event in the queue, otherwise it is always false
710 if (TW_STIME_CMP(PQ_MINUMUM(me), g_tw_ts_end) > 0) { break; } // Stop simulation if event scheduled past the end of time
711
712 cev = tw_pq_dequeue(me->pq);
713 if (!cev) { break; } // Stop simulation, if there are no new events
714 tw_lp *clp = cev->dest_lp;
715 tw_kp *ckp = clp->kp;
716
717 me->cur_event = cev;
718#ifdef USE_RAND_TIEBREAKER
719 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
720#else
721 ckp->last_time = cev->recv_ts;
722
723 // Note: I believe that this doesn't fully capture all event ties
724 if(TW_STIME_CMP(cev->recv_ts, tw_pq_minimum(me->pq)) == 0) {
726 }
727#endif
728
729 gvt = cev->recv_ts;
731 gvt_print(gvt);
732 }
733
734 reset_bitfields(cev);
736 tw_clock const event_start = tw_clock_read();
737 (*clp->type->event)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
740 if (*clp->type->commit) {
741 (*clp->type->commit)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
742 }
743 tw_clock const total_event_process = tw_clock_read() - event_start;
744 clp->lp_stats->s_process_event += total_event_process;
745 me->stats.s_event_process += total_event_process;
746
747 if (me->cev_abort){
748 tw_error(TW_LOC, "insufficient event memory");
749 }
750
751 ckp->s_nevent_processed++;
752 // instrumentation
755 tw_event_free(me, cev);
756
757 if(g_st_rt_sampling &&
759 {
760 tw_clock current_rt = tw_clock_read();
762 st_collect_model_data(me, ((double)current_rt) / g_tw_clock_rate, RT_STATS);
763
765 }
766 }
767 tw_wall_now(&me->end_time);
769
770 printf("*** END SIMULATION ***\n\n");
771
772 tw_stats(me);
773 tw_all_lp_stats(me);
774
775 (*me->type.final)(me);
776}
777
779 tw_clock start;
780 unsigned int msg_i;
781
783 printf("Warning: GVT Hook will not be triggered in Parallel Conservative Simulation by the function `tw_trigger_gvt_hook_now` (The function `tw_trigger_gvt_hook_when_model_calls` has activated the GVT hook functionality for LP modes.) Please use sequential or parallel optimistic simulation modes.\n");
784 }
785
787 printf("*** START PARALLEL CONSERVATIVE SIMULATION ***\n\n");
788 }
789
792
793 for (;;){
794 if (tw_nnodes() > 1){
795 start = tw_clock_read();
796 tw_net_read(me);
797 me->stats.s_net_read += tw_clock_read() - start;
798 }
799
800 tw_gvt_step1(me);
802 int const gvt_triggered = me->gvt_status;
803 tw_gvt_step2(me);
804 if (gvt_triggered) {
806 }
807
808 if (STIME_FROM_PE(me) > g_tw_ts_end) {
809 break;
810 }
811
812 // put "batch" loop directly here
813 /* Process g_tw_mblock events, or until the PQ is empty
814 * (whichever comes first).
815 */
816 for (msg_i = g_tw_mblock; msg_i; msg_i--) {
817 tw_event *cev;
818 tw_lp *clp;
819 tw_kp *ckp;
820
821 /* OUT OF FREE EVENT BUFFERS. BAD.
822 * Go do fossil collect immediately.
823 */
824 if (me->free_q.size <= g_tw_gvt_threshold) {
826 break;
827 }
828
830 break;
831 }
832
833 if (g_tw_gvt_hook
837 break;
838 }
839
840 start = tw_clock_read();
841 if (!(cev = tw_pq_dequeue(me->pq))) {
842 break;
843 }
844 me->stats.s_pq += tw_clock_read() - start;
845
846#ifndef USE_RAND_TIEBREAKER
847 //Note: I believe that this doesn't really capture all event ties
848 if(TW_STIME_CMP(cev->recv_ts, tw_pq_minimum(me->pq)) == 0) {
850 }
851#endif
852
853 clp = cev->dest_lp;
854 ckp = clp->kp;
855 me->cur_event = cev;
856
857 if (CMP_KP_TO_EVENT_TIME(ckp, cev) > 0) {
858 tw_error(TW_LOC, "Found KP last time %lf > current event time %lf for LP %d, PE %lu"
859 "src LP %lu, src PE %lu",
860 STIME_FROM_KP(ckp), cev->recv_ts, clp->gid, clp->pe->id,
861 cev->send_lp, cev->send_pe);
862 }
863#ifdef USE_RAND_TIEBREAKER
864 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
865#else
866 ckp->last_time = cev->recv_ts;
867#endif
868
869 start = tw_clock_read();
870 reset_bitfields(cev);
872 (*clp->type->event)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
875 if (*clp->type->commit) {
876 (*clp->type->commit)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
877 }
878
879 ckp->s_nevent_processed++;
880 // instrumentation
883 me->stats.s_event_process += tw_clock_read() - start;
884
885 if (me->cev_abort) {
886 tw_error(TW_LOC, "insufficient event memory");
887 }
888
889 tw_event_free(me, cev);
890
891 if(g_st_rt_sampling &&
893 {
894 tw_clock current_rt = tw_clock_read();
898 st_collect_model_data(me, ((double)current_rt) / g_tw_clock_rate, RT_STATS);
899
901 }
902 }
903 }
904
905 tw_wall_now(&me->end_time);
907
909 printf("*** END SIMULATION ***\n\n");
910 }
911
913
914 // call the model PE finalize function
915 (*me->type.final)(me);
916
918
919 tw_stats(me);
920}
921
923 tw_clock start;
924
926 printf("*** START PARALLEL OPTIMISTIC SIMULATION WITH SUSPEND LP FEATURE ***\n\n");
927 }
928
931
932 for (;;) {
933 if (tw_nnodes() > 1) {
934 start = tw_clock_read();
935 tw_net_read(me);
936 me->stats.s_net_read += tw_clock_read() - start;
937 }
938
939 tw_gvt_step1(me);
942 int const gvt_triggered = me->gvt_status;
943 tw_gvt_step2(me);
944 if (gvt_triggered) {
946 }
947
948 if (STIME_FROM_PE(me) > g_tw_ts_end)
949 break;
950
951 tw_sched_batch(me);
952 }
953
954 tw_wall_now(&me->end_time);
956
958
960 printf("*** END SIMULATION ***\n\n");
961 }
962
963 // call the model PE finalize function
964 (*me->type.final)(me);
965
967
968 tw_stats(me);
969 tw_all_lp_stats(me);
970}
971
973 tw_clock start;
974
976
978 printf("*** START PARALLEL OPTIMISTIC SIMULATION WITH SUSPEND LP FEATURE AND REAL TIME GVT ***\n\n");
979 }
980
983
984 // init the realtime GVT
986
987 for (;;) {
988 if (tw_nnodes() > 1) {
989 start = tw_clock_read();
990 tw_net_read(me);
991 me->stats.s_net_read += tw_clock_read() - start;
992 }
993
997 int const gvt_triggered = me->gvt_status;
998 tw_gvt_step2(me); // use regular step2 at this point
999 if (gvt_triggered) {
1000 tw_gvt_hook_step(me);
1001 }
1002
1003 if (STIME_FROM_PE(me) > g_tw_ts_end)
1004 break;
1005
1007 }
1008
1009 tw_wall_now(&me->end_time);
1010 me->stats.s_total = tw_clock_read() - me->stats.s_total;
1011
1013
1015 printf("*** END SIMULATION ***\n\n");
1016 }
1017
1018 // call the model PE finalize function
1019 (*me->type.final)(me);
1020
1021 st_inst_finalize(me);
1022
1023 tw_stats(me);
1024}
1025
1026double g_tw_rollback_time = 0.000000001;
1027
1029 tw_event *cev=NULL;
1030
1031 if(tw_nnodes() > 1) {
1032 tw_error(TW_LOC, "Sequential Scheduler used for world size greater than 1.");
1033 }
1034
1035 printf("/***************************************************************************/\n");
1036 printf("/***** WARNING: Starting Optimistic Debug Scheduler!! **********************/\n");
1037 printf("This schedule assumes the following: \n");
1038 printf(" 1) One 1 Processor/Core is used.\n");
1039 printf(" 2) One 1 KP is used.\n");
1040 printf(" NOTE: use the --nkp=1 argument to the simulation to ensure that\n");
1041 printf(" it only uses 1 KP.\n");
1042 printf(" 3) Events ARE NEVER RECLAIMED (LP Commit Functions are not called).\n");
1043 printf(" 4) Executes til out of memory (16 events left) and \n injects rollback to first before primodal init event.\n");
1044 printf(" 5) g_tw_rollback_time = %13.12lf \n", g_tw_rollback_time);
1045 printf("/***************************************************************************/\n");
1046
1047 if( g_tw_nkp > 1 ) {
1048 tw_error(TW_LOC, "Number of KPs is greater than 1.");
1049 }
1050
1051 if (g_tw_gvt_hook && g_tw_gvt_hook_trigger.status) {
1052 printf("Warning: GVT Hook will not be triggered in the Optimistic Debug Scheduler.\n");
1053 }
1054
1055 printf("\n");
1056 printf("*** START OPTIMISTIC DEBUG SIMULATION ***\n\n");
1057 tw_wall_now(&me->start_time);
1058
1059 while ((cev = tw_pq_dequeue(me->pq))) {
1060 tw_lp *clp = cev->dest_lp;
1061 tw_kp *ckp = clp->kp;
1062
1063 me->cur_event = cev;
1064#ifdef USE_RAND_TIEBREAKER
1065 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
1066#else
1067 ckp->last_time = cev->recv_ts;
1068#endif
1069
1070 /* don't update GVT */
1071 reset_bitfields(cev);
1072
1073 // state-save and update the LP's critical path
1074 unsigned int prev_cp = clp->critical_path;
1076 (*clp->type->event)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
1077 cev->critical_path = prev_cp;
1078
1079 ckp->s_nevent_processed++;
1080
1081 /* Thread current event into processed queue of kp */
1082 cev->state.owner = TW_kp_pevent_q;
1083 tw_eventq_unshift(&ckp->pevent_q, cev);
1084
1085 /* stop when we have 1024 events left */
1086 if ( me->free_q.size <= 1024) {
1087 break;
1088 }
1089 }
1090
1091 // If we've run out of free events or events to process (maybe we're past end time?)
1092 // Perform all the rollbacks!
1093 printf("/******************* Starting Rollback Phase ******************************/\n");
1094#ifdef USE_RAND_TIEBREAKER
1095 tw_event_sig const zero_time = (tw_event_sig){ g_tw_rollback_time, 0 };
1096 tw_kp_rollback_to_sig( g_tw_kp[0], &zero_time );
1097#else
1099#endif
1100 printf("/******************* Completed Rollback Phase ******************************/\n");
1101
1102 tw_wall_now(&me->end_time);
1103
1104 printf("*** END SIMULATION ***\n\n");
1105
1106 tw_stats(me);
1107
1108 (*me->type.final)(me);
1109}
1110
1112 tw_stime gvt = TW_STIME_CRT(0.0);
1113
1114 if(tw_nnodes() > 1) {
1115 tw_error(TW_LOC, "Sequential Scheduler used for world size greater than 1.");
1116 }
1117
1118 // Finding size of largest LP
1119 size_t largest_lp_size = 0;
1120 for (size_t i = 0; i < g_tw_nlp; i++) {
1121 size_t const lp_size = g_tw_lp[i]->type->state_sz;
1122 if (lp_size > largest_lp_size) {
1123 largest_lp_size = lp_size;
1124 }
1125 }
1126 tw_event *cev;
1128 prev.state = malloc(largest_lp_size);
1129 cur.state = malloc(largest_lp_size);
1130 if (prev.state == NULL || cur.state == NULL) {
1131 tw_error(TW_LOC, "Failed to allocate memory to save state");
1132 }
1133 size_t const largest_lp_checkpoint = crv_init_checkpoints();
1134 if (largest_lp_checkpoint > largest_lp_size) {
1135 largest_lp_size = largest_lp_checkpoint;
1136 }
1137
1138 printf("*** START SEQUENTIAL ROLLBACK TEST SIMULATION ***\n\n");
1139
1140 tw_wall_now(&me->start_time);
1141 me->stats.s_total = tw_clock_read();
1142
1143 while (1) {
1144 // Checking whether we have to call the GVT hook
1146
1147 // This is only needed in the case a GVT hook changes the timestamp of an event in the queue, otherwise it is always false
1148 if (TW_STIME_CMP(PQ_MINUMUM(me), g_tw_ts_end) > 0) { break; } // Stop simulation if event scheduled past the end of time
1149
1150 cev = tw_pq_dequeue(me->pq);
1151 if (!cev) { break; } // Stop simulation, if there are no new events
1152 tw_lp *clp = cev->dest_lp;
1153 tw_kp *ckp = clp->kp;
1154
1155 me->cur_event = cev;
1156#ifdef USE_RAND_TIEBREAKER
1157 tw_copy_event_sig(&ckp->last_sig, &cev->sig);
1158#else
1159 ckp->last_time = cev->recv_ts;
1160
1161 // Note: I believe that this doesn't fully capture all event ties
1162 if(TW_STIME_CMP(cev->recv_ts, tw_pq_minimum(me->pq)) == 0) {
1163 me->stats.s_pe_event_ties++;
1164 }
1165#endif
1166
1167 gvt = cev->recv_ts;
1169 gvt_print(gvt);
1170 }
1171
1172 reset_bitfields(cev);
1174
1175 crv_copy_lpstate(&prev, clp);
1176
1177 // Forward pass
1178 tw_clock total_event_process = 0.0;
1179 tw_clock event_start = tw_clock_read();
1180 (*clp->type->event)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
1181 total_event_process += tw_clock_read() - event_start;
1182
1183 if (me->cev_abort){
1184 tw_error(TW_LOC, "insufficient event memory");
1185 }
1186
1187 crv_copy_lpstate(&cur, clp);
1188
1189 // Rollback pass
1190 tw_clock const start_rollback = tw_clock_read();
1191 tw_event_rollback(cev);
1192 me->stats.s_rollback += tw_clock_read() - start_rollback;
1193
1194 crv_check_lpstates(clp, cev, &prev, "before processing event", "after processing event and rollback");
1195 crv_clean_lpstate(&prev, clp);
1196
1197 // Forward pass (again)
1198 event_start = tw_clock_read();
1199 (*clp->type->event)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
1202 if (*clp->type->commit) {
1203 (*clp->type->commit)(clp->cur_state, &cev->cv, tw_event_data(cev), clp);
1204 }
1205 total_event_process += tw_clock_read() - event_start;
1206
1207 crv_check_lpstates(clp, cev, &cur, "after processing event", "after processing, rollback, processing event again and commiting");
1208 crv_clean_lpstate(&cur, clp);
1209
1210 clp->lp_stats->s_process_event += total_event_process;
1211 me->stats.s_event_process += total_event_process;
1212
1213 if (me->cev_abort){
1214 tw_error(TW_LOC, "insufficient event memory");
1215 }
1216
1217 ckp->s_nevent_processed+=2;
1218 ckp->s_rb_total++;
1219 // instrumentation
1222 ckp->kp_stats->s_rb_total++;
1223 tw_event_free(me, cev);
1224
1225 if(g_st_rt_sampling &&
1227 {
1228 tw_clock current_rt = tw_clock_read();
1230 st_collect_model_data(me, ((double)current_rt) / g_tw_clock_rate, RT_STATS);
1231
1233 }
1234 }
1235 tw_wall_now(&me->end_time);
1236 me->stats.s_total = tw_clock_read() - me->stats.s_total;
1237
1238 printf("*** END SIMULATION ***\n\n");
1239
1240 free(cur.state);
1241 free(prev.state);
1242
1243 tw_stats(me);
1244 tw_all_lp_stats(me);
1245
1246 (*me->type.final)(me);
1247}
tw_pe * pe
Definition avl_tree.c:10
void crv_clean_lpstate(crv_lpstate_checkpoint_internal *state, tw_lp const *clp)
Definition crv-state.c:189
void crv_copy_lpstate(crv_lpstate_checkpoint_internal *into, tw_lp const *clp)
Definition crv-state.c:176
void crv_check_lpstates(tw_lp *clp, tw_event *cev, crv_lpstate_checkpoint_internal const *before_state, char const *before_msg, char const *after_msg)
Definition crv-state.c:105
size_t crv_init_checkpoints(void)
Definition crv-state.c:47
static tw_clock tw_clock_read(void)
Definition aarch64.h:8
uint64_t tw_clock
Definition aarch64.h:6
static double percent_complete
static void gvt_print(tw_stime gvt)
void st_collect_event_data(tw_event *cev, double recv_rt)
void st_inst_finalize(tw_pe *me)
@ ALL_STATS
tw_clock g_st_rt_samp_start_cycles
int g_st_model_stats
void st_collect_engine_data(tw_pe *me, int col_type)
tw_clock g_st_rt_interval
void st_collect_model_data(tw_pe *pe, double current_rt, int stats_type)
int g_st_engine_stats
int g_st_ev_trace
@ FULL_TRACE
int g_st_rt_sampling
void tw_net_read(tw_pe *)
starts service_queues() to poll network
unsigned tw_nnodes(void)
void tw_net_barrier(void)
MPI_Comm MPI_COMM_ROSS
Definition network-mpi.c:4
void tw_pq_enqueue(tw_pq *, tw_event *)
Definition splay.c:245
void tw_pq_delete_any(tw_pq *, tw_event *)
Definition splay.c:341
tw_event * tw_pq_dequeue(tw_pq *)
Definition splay.c:298
tw_stime tw_pq_minimum(tw_pq *)
Definition splay.c:398
unsigned int tw_pq_get_size(tw_pq *)
Definition splay.c:418
void io_load_events(tw_pe *me)
Definition io-mpi.c:227
#define TW_STIME_DBL(x)
Definition ross-base.h:42
#define TW_STIME_CRT(x)
Definition ross-base.h:41
#define TW_STIME_CMP(x, y)
Definition ross-base.h:43
double tw_stime
Definition ross-base.h:39
void tw_stats(tw_pe *me)
Definition tw-stats.c:126
unsigned int g_tw_gvt_threshold
Definition ross-global.c:84
static void * tw_event_data(tw_event *event)
void tw_kp_rollback_to_sig(tw_kp *kp, tw_event_sig const *to_sig)
tw_lp ** g_tw_lp
Definition ross-global.c:28
unsigned long long g_tw_clock_rate
tw_kpid g_tw_nkp
Definition ross-global.c:27
void tw_init_kps(tw_pe *me)
Definition tw-kp.c:179
static void tw_event_free(tw_pe *, tw_event *)
unsigned long long g_tw_gvt_interval_start_cycles
Definition ross-global.c:70
void tw_kp_rollback_to(tw_kp *kp, tw_stime to)
Definition tw-kp.c:59
tw_kp ** g_tw_kp
Definition ross-global.c:29
void tw_kp_rollback_event(tw_event *event)
Definition tw-kp.c:104
void tw_error(const char *file, int line, const char *fmt,...)
Definition tw-util.c:77
tw_peid g_tw_mynode
Definition ross-global.c:92
void tw_pre_run_lps(tw_pe *me)
Definition tw-lp.c:147
unsigned int g_tw_gvt_done
Definition ross-global.c:85
tw_lpid g_tw_nlp
Definition ross-global.c:24
tw_peid g_tw_masternode
Definition ross-global.c:93
void tw_wall_now(tw_wtime *t)
Definition tw-timing.c:7
void tw_init_lps(tw_pe *me)
Definition tw-lp.c:83
void tw_warning(const char *file, int line, const char *fmt,...)
Definition tw-util.c:93
unsigned int g_tw_gvt_interval
Definition ross-global.c:67
unsigned int g_tw_mblock
Definition ross-global.c:66
unsigned int g_tw_sim_started
Definition ross-global.c:36
void tw_event_rollback(tw_event *event)
Definition tw-event.c:221
void tw_clock_init(tw_pe *me)
Definition aarch64.c:28
void tw_all_lp_stats(tw_pe *me)
Definition tw-stats.c:248
unsigned long long g_tw_gvt_realtime_interval
Definition ross-global.c:69
double g_tw_lookahead
Definition ross-global.c:53
double g_tw_ts_end
Definition ross-global.c:72
#define TW_LOC
@ GVT_HOOK_STATUS_disabled
@ GVT_HOOK_STATUS_timestamp
@ GVT_HOOK_STATUS_every_n_gvt
void tw_gvt_step2(tw_pe *)
struct gvt_hook_trigger g_tw_gvt_hook_trigger
void tw_gvt_force_update(void)
void tw_gvt_force_update_realtime(void)
void(* g_tw_gvt_hook)(tw_pe *pe, bool is_queue_empty)
void tw_gvt_step1(tw_pe *)
void tw_gvt_step1_realtime(tw_pe *)
#define ROSS_MAX(a, b)
static int tw_event_sig_compare_ptr(tw_event_sig const *e_sig, tw_event_sig const *n_sig)
Definition ross-types.h:512
tw_event_sig const g_tw_max_sig
static void tw_copy_event_sig(tw_event_sig *e, tw_event_sig const *sig)
Definition ross-types.h:493
@ TW_pe_anti_msg
Anti-message.
Definition ross-types.h:227
@ TW_pe_free_q
In tw_pe.free_q.
Definition ross-types.h:232
@ TW_pe_event_q
In a tw_pe.event_q list.
Definition ross-types.h:224
@ TW_kp_pevent_q
In a tw_kp.pevent_q.
Definition ross-types.h:226
@ TW_pe_pq
In a tw_pe.pq.
Definition ross-types.h:225
static tw_event * tw_eventq_peek(tw_eventq *q)
Definition tw-eventq.h:304
static tw_event * tw_eventq_pop_list(tw_eventq *q)
Definition tw-eventq.h:436
static void tw_eventq_delete_any(tw_eventq *q, tw_event *e)
Definition tw-eventq.h:408
static void tw_eventq_unshift(tw_eventq *q, tw_event *e)
Definition tw-eventq.h:344
@ GVT_HOOK_STATUS_model_call
unsigned int s_nevent_processed
unsigned int s_rb_total
unsigned int s_nevent_abort
tw_clock s_process_event
unsigned int s_nevent_processed
unsigned int s_nevent_abort
tw_stime recv_ts
Definition ross-types.h:260
Event Stucture.
Definition ross-types.h:277
struct tw_event::@130070134144252114152124341363102114315067064025 state
tw_stime recv_ts
Actual time to be received.
Definition ross-types.h:314
unsigned int critical_path
Critical path of this event.
Definition ross-types.h:310
unsigned char owner
Owner of the next/prev pointers; see tw_event_owner.
Definition ross-types.h:300
tw_lpid send_lp
sending LP ID for data collection uses
Definition ross-types.h:317
tw_bf cv
Used by app during reverse computation.
Definition ross-types.h:306
tw_event * next
Definition ross-types.h:278
tw_lp * dest_lp
Destination LP ID.
Definition ross-types.h:312
tw_event * prev
Definition ross-types.h:279
tw_peid send_pe
Definition ross-types.h:316
tw_event * cancel_next
Next event in the cancel queue for the dest_pe.
Definition ross-types.h:287
tw_event_sig sig
Event signature, to be used by tiebreaker.
Definition ross-types.h:294
unsigned char cancel_q
Actively on a dest_lp->pe's cancel_q.
Definition ross-types.h:301
size_t size
Definition ross-types.h:176
tw_eventq pevent_q
Events processed by LPs bound to this KP.
Definition ross-types.h:396
struct st_kp_stats * kp_stats
Definition ross-types.h:407
tw_event_sig last_sig
Event signature of the current event being processed.
Definition ross-types.h:398
tw_stime last_time
Time of the current event being processed.
Definition ross-types.h:401
tw_stat s_nevent_processed
Number of events processed.
Definition ross-types.h:402
long s_rb_total
Number of total rollbacks by this LP.
Definition ross-types.h:405
LP State Structure.
Definition ross-types.h:336
tw_pe * pe
Definition ross-types.h:340
tw_kp * kp
kp – Kernel process that we belong to (must match pe).
Definition ross-types.h:345
tw_lpid gid
global LP id
Definition ross-types.h:338
tw_lptype * type
Type of this LP, including service callbacks.
Definition ross-types.h:348
struct st_lp_stats * lp_stats
Definition ross-types.h:356
unsigned int critical_path
Critical path value for this LP.
Definition ross-types.h:352
void * cur_state
Current application LP data.
Definition ross-types.h:347
unsigned int suspend_flag
Definition ross-types.h:368
event_f event
LP event handler routine.
Definition ross-types.h:100
revent_f revent
LP Reverse event handler routine.
Definition ross-types.h:101
commit_f commit
LP Commit event routine.
Definition ross-types.h:102
Holds the entire PE state.
Definition ross-types.h:416
tw_eventq free_q
Linked list of free tw_events.
Definition ross-types.h:424
tw_wtime end_time
When this PE finished its execution.
Definition ross-types.h:461
tw_petype type
Model defined PE type routines.
Definition ross-types.h:418
tw_eventq event_q
Linked list of events sent to this PE.
Definition ross-types.h:420
tw_pq * pq
Priority queue used to sort events.
Definition ross-types.h:422
tw_event * cancel_q
List of canceled events.
Definition ross-types.h:421
tw_stime GVT
Global Virtual Time.
Definition ross-types.h:451
unsigned char cev_abort
Current event being processed must be aborted.
Definition ross-types.h:440
tw_peid id
Definition ross-types.h:417
tw_event * cur_event
Current event being processed.
Definition ross-types.h:426
unsigned char gvt_status
Bits available for gvt computation.
Definition ross-types.h:441
tw_statistics stats
per PE counters
Definition ross-types.h:463
tw_event_sig GVT_sig
Global Virtual Time Signature.
Definition ross-types.h:445
tw_wtime start_time
When this PE first started execution.
Definition ross-types.h:460
pe_final_f final
PE finilization routine.
Definition ross-types.h:73
pe_init_f post_lp_init
PE initialization routine, after LP init.
Definition ross-types.h:71
pe_init_f pre_lp_init
PE initialization routine, before LP init.
Definition ross-types.h:70
tw_clock s_net_read
Definition ross-types.h:146
tw_clock s_cancel_q
Definition ross-types.h:156
tw_stat s_pe_event_ties
Definition ross-types.h:140
tw_clock s_pq
Definition ross-types.h:153
tw_clock s_total
Definition ross-types.h:144
tw_stat s_nevent_abort
Definition ross-types.h:122
tw_clock s_rollback
Definition ross-types.h:154
tw_clock s_event_abort
Definition ross-types.h:151
tw_clock s_event_process
Definition ross-types.h:152
static void tw_sched_event_q(tw_pe *me)
Definition tw-sched.c:44
static void tw_sched_batch_realtime(tw_pe *me)
Definition tw-sched.c:322
static void tw_gvt_hook_step_seq(tw_pe *me)
Definition tw-sched.c:638
void tw_scheduler_optimistic(tw_pe *me)
Definition tw-sched.c:922
#define STIME_FROM_PE(pe)
Definition tw-sched.c:35
static bool does_any_pe(bool val)
Definition tw-sched.c:515
#define CMP_GVT_HOOK_TO_NEXT_IN_QUEUE(trigger, pe)
Definition tw-sched.c:33
double g_tw_rollback_time
Definition tw-sched.c:1026
static void tw_gvt_hook_step(tw_pe *me)
Definition tw-sched.c:584
static void reset_bitfields(tw_event *revent)
Reset the event bitfield prior to entering the event handler post-reverse - reset the bitfield so tha...
Definition tw-sched.c:15
static bool is_gvt_past_hook_threshold(tw_pe *me)
Definition tw-sched.c:567
void tw_scheduler_sequential_rollback_check(tw_pe *me)
Definition tw-sched.c:1111
#define PQ_MINUMUM(pe)
Definition tw-sched.c:31
void tw_scheduler_conservative(tw_pe *me)
Definition tw-sched.c:778
static void tw_sched_cancel_q(tw_pe *me)
Definition tw-sched.c:97
#define STIME_FROM_KP(kp)
Definition tw-sched.c:36
void tw_scheduler_optimistic_realtime(tw_pe *me)
Definition tw-sched.c:972
static void tw_sched_batch(tw_pe *me)
Definition tw-sched.c:162
void tw_scheduler_optimistic_debug(tw_pe *me)
Definition tw-sched.c:1028
#define TRIGGER_ROLLBACK_TO_EVENT_TIME(kp, e)
Definition tw-sched.c:34
void tw_scheduler_rollback_and_cancel_events_pe(tw_pe *pe)
Definition tw-sched.c:529
void tw_scheduler_sequential(tw_pe *me)
Definition tw-sched.c:691
void tw_sched_init(tw_pe *me)
Definition tw-sched.c:477
#define CMP_KP_TO_EVENT_TIME(kp, e)
Definition tw-sched.c:32