The Sparta Modeling Framework
Loading...
Searching...
No Matches
Scheduler.hpp
Go to the documentation of this file.
1// <Scheduler> -*- C++ -*-
2
9#pragma once
10
11#include <ctime>
12#include <unistd.h>
13#include <boost/timer/timer.hpp>
14//#include <boost/algorithm/string/predicate.hpp>
15#include <cstdint>
16#include <string>
17#include <cmath>
18#include <vector>
19#include <chrono>
20#include <mutex>
21#include <array>
22#include <memory>
23#include <algorithm>
24#include <limits>
25#include <list>
26#include <ostream>
27
32#include "sparta/log/MessageSource.hpp"
34#include "sparta/statistics/ReadOnlyCounter.hpp"
35#include "sparta/statistics/Counter.hpp"
39#include "sparta/statistics/CounterBase.hpp"
41
42#ifdef SYSTEMC_SUPPORT
43#include "sparta/log/NotificationSource.hpp"
44#endif
45
46namespace sparta {
47class GlobalTreeNode;
48} // namespace sparta
49
50
52#define PS_PER_SECOND 1000000000000
53
54namespace sparta {
55 class Scheduler;
56 class Clock;
57 class DAG;
58
59 class Scheduleable;
60 class StartupEvent;
61 // Forward declaration to support the addition of sparta::GlobalEvent
62 template<typename DataT>
63 class PhasedPayloadEvent;
64 class EventSet;
65 class GlobalEventProxy;
66}
67
68namespace sparta
69{
70
194{
195public:
196
198 typedef uint64_t Tick;
199
200private:
201
207 struct TickQuantum
208 {
210 public:
211 using Scheduleables = std::vector<Scheduleable *>;
212
213 void addScheduleable(Scheduleable* sched) {
214 if(SPARTA_EXPECT_FALSE(current_idx_ == size_)) {
215 scheduleables_.resize(scheduleables_.size() * 2, nullptr);
216 size_ = scheduleables_.size();
217 }
218 scheduleables_[current_idx_++] = sched;
219 }
220
221 size_t size() const {
222 return current_idx_;
223 }
224
225 void clear() {
226 current_idx_ = 0;
227 }
228
229 Scheduleables::const_reference operator[](const size_t index) const {
230 sparta_assert(index < current_idx_);
231 return scheduleables_[index];
232 }
233
234 Scheduleables::reference operator[](const size_t index) {
235 sparta_assert(index < current_idx_);
236 return scheduleables_[index];
237 }
238
239 private:
240 size_t size_ = 16;
241 size_t current_idx_ = 0;
242 Scheduleables scheduleables_{size_, nullptr}; // start with 16 events.
243 };
244
245 using ScheduleableGroups = std::vector<ScheduleableGroup>;
246
253 TickQuantum(uint32_t num_firing_groups) :
254 groups(num_firing_groups)
255 { }
256
263 void addEvent(uint32_t firing_group, Scheduleable * scheduleable) {
264 sparta_assert(firing_group > 0);
265 sparta_assert(firing_group < groups.size());
266 groups[firing_group].addScheduleable(scheduleable);
267 first_group_idx = std::min(first_group_idx, firing_group);
268 }
269
270 void addEventIfNotScheduled(uint32_t firing_group, Scheduleable * scheduleable) {
271 sparta_assert(firing_group > 0);
272 sparta_assert(firing_group < groups.size());
273 auto & grp = groups[firing_group];
274 const auto grp_size = grp.size();
275 for(uint32_t idx = 0; idx < grp_size; ++idx) {
276 if(grp[idx] == scheduleable) {
277 return;
278 }
279 }
280 grp.addScheduleable(scheduleable);
281 first_group_idx = std::min(first_group_idx, firing_group);
282 }
283
284 Tick tick = 0;
285 ScheduleableGroups groups;
286 uint32_t first_group_idx = std::numeric_limits<uint32_t>::max();
287 TickQuantum * next = nullptr;
288 };
289
291 TickQuantum * current_tick_quantum_ = nullptr;
292
294 ObjectAllocator<TickQuantum> tick_quantum_allocator_;
295
297 bool watchdogExpired_() const
298 {
299 bool watchdoc_expired = false;
300 if (wdt_period_ticks_ > 0) {
301 sparta_assert(current_tick_ >= prev_wdt_tick_);
302 uint64_t num_ticks_since_wdt = current_tick_ - prev_wdt_tick_;
303 if (num_ticks_since_wdt >= wdt_period_ticks_) {
304 watchdoc_expired = true;
305 }
306 }
307 return watchdoc_expired;
308 }
309
310public:
311
313 constexpr Tick calcIndexTime(const Tick rel_time) const {
314 return current_tick_ + rel_time;
315 }
316
318 template <typename DurationT>
319 DurationT getRunCpuTime() const
320 {
321 return std::chrono::duration_cast<DurationT>(std::chrono::nanoseconds(timer_.elapsed().user));
322 }
323
325 template <typename DurationT>
326 DurationT getRunWallTime() const
327 {
328 return std::chrono::duration_cast<DurationT>(std::chrono::nanoseconds(timer_.elapsed().wall));
329 }
330
334 static const Tick INDEFINITE;
335
339 static constexpr char NODE_NAME[] = "scheduler";
340
343
349 Scheduler(const std::string& name) :
350 Scheduler(name, nullptr)
351 {
352 // Delegated Constructor
353 }
354
363 Scheduler(const std::string& name, GlobalTreeNode* search_scope);
364
367
370 void reset();
371
379
384
388
397 void finalize();
398
401 DAG * getDAG() const{
402 return dag_.get();
403 }
404
407
411
412
424 void stopRunning() {
425 running_ = false;
426 }
427
439
460
470 template<class StreamType>
471 void printNextCycleEventTree(StreamType & os, uint32_t curr_grp = 0,
472 uint32_t curr_event = 0, uint32_t future=0) const;
473
494 void scheduleEvent(Scheduleable * scheduleable,
495 Tick rel_time,
496 uint32_t dag_group=0,
497 bool continuing=true,
498 bool add_if_not_scheduled=false);
499
515
529 bool isScheduled(const Scheduleable * scheduleable) const;
530
544 bool isScheduled(const Scheduleable * scheduleable, Tick rel_time) const;
545
546
554 void cancelEvent(const Scheduleable * scheduleable);
555
564 void cancelEvent(const Scheduleable * scheduleable, Tick rel_time);
565
572 void cancelAsyncEvent(Scheduleable *scheduleable);
573
576
580
657 void run(Tick num_ticks=INDEFINITE,
658 const bool exacting_run = false,
659 const bool measure_run_time = true);
660
665 bool isFinished() const {
666 return is_finished_;
667 }
668
678 if(current_tick_quantum_ == nullptr) {
679 return INDEFINITE;
680 }
681 return (current_tick_quantum_->tick);
682 }
683
686
690
696 bool isFinalized() const noexcept override
697 {
698 return dag_finalized_;
699 }
700
706 bool isRunning() const noexcept
707 {
708 return running_;
709 }
710
718 Tick getCurrentTick() const noexcept
719 {
720 return current_tick_;
721 }
722
738 Tick getElapsedTicks() const noexcept
739 {
740 return elapsed_ticks_;
741 }
742
747 {
748 return current_tick_;
749 }
750
754 void kickTheDog() noexcept {
755 prev_wdt_tick_ = getCurrentTick();
756 }
757
768 void enableWatchDog(uint64_t watchdog_timeout_ps) {
769 // TODO: Handle the case when different callers want different
770 // enable/disable behavior
771 if (watchdog_timeout_ps == 0) {
772 sparta_assert(wdt_period_ticks_ == 0);
773 }
774
775 // Only increase the period; don't allow it to decrease
776 if (wdt_period_ticks_ < watchdog_timeout_ps) {
777 // Scheduler ticks are currently pico-seconds, so this is 1:1
778 wdt_period_ticks_ = watchdog_timeout_ps;
779 }
780 }
781
785 Tick getNumFired() const noexcept
786 {
787 return events_fired_;
788 }
789
801 return latest_continuing_event_;
802 }
803
809 {
810 if(current_tick_quantum_) {
811 if(current_tick_quantum_->groups.size() > current_group_firing_) {
812 if(current_tick_quantum_->groups[current_group_firing_].size() > current_event_firing_) {
813 return current_tick_quantum_->groups[current_group_firing_][current_event_firing_];
814 }
815 }
816 }
817 return nullptr;
818 }
819
821 uint32_t getCurrentFiringEventIdx() const {
822 return current_event_firing_;
823 }
824
827 return current_scheduling_phase_;
828 }
829
832
836
844 return PS_PER_SECOND; // 1tick = 1ps
845 }
846
852 return ticks_roctr_;
853 }
854
860 return picoseconds_roctr_;
861 }
862
868 return seconds_stat_;
869 }
870
876 return milliseconds_stat_;
877 }
878
884 return microseconds_stat_;
885 }
886
892 return nanoseconds_stat_;
893 }
894
897
898private:
899
900 // The startup event adds itself to internal structures
901 friend class StartupEvent;
902
913 void scheduleStartupHandler_(const SpartaHandler & event_del) {
914 startup_events_.emplace_back(event_del);
915 }
916
919 void throwPrecedenceIssue_(const Scheduleable * scheduleable, const uint32_t firing_group) const;
920 const char * getScheduleableLabel_(const Scheduleable * sched) const;
921
928 TickQuantum* determineTickQuantum_(Tick rel_time);
929
931 std::unique_ptr<DAG> dag_;
932
934 uint32_t dag_group_count_ = 0;
935
937 uint32_t firing_group_count_ = 0;
938
940 uint32_t group_zero_ = 0;
941
944 bool dag_finalized_ = false;
945
947 bool first_tick_ = true;
948
950 Tick current_tick_ = 0; //init tick 0
951
953 Tick elapsed_ticks_ = 0;
954
956 Tick prev_wdt_tick_ = 0;
957
960 Tick wdt_period_ticks_ = 0;
961
963 bool running_ = false;
964
966 std::unique_ptr<Scheduleable> stop_event_;
967
969 std::unique_ptr<Scheduleable> cancelled_event_;
970 void cancelCallback_() {}
971
974 bool is_finished_ = false;
975
977 std::vector<SpartaHandler> startup_events_;
978
981 std::vector<sparta::Clock*> registered_clocks_;
982
984 uint32_t current_group_firing_ = 0;
985
987 uint32_t current_event_firing_ = 0;
988
990 SchedulingPhase current_scheduling_phase_ = SchedulingPhase::Trigger;
991
993 log::MessageSource debug_;
994
996 log::MessageSource call_trace_logger_;
997
999 std::ostringstream call_trace_stream_;
1000
1002 Tick latest_continuing_event_ = 0;
1003
1005 StatisticSet sset_;
1006
1008 std::unique_ptr<sparta::Clock> scheduler_internal_clk_;
1009
1011 ReadOnlyCounter ticks_roctr_;
1013 class PicoSecondCounter : public ReadOnlyCounter {
1014 Scheduler& sched_;
1015 public:
1016 PicoSecondCounter(Scheduler& sched,
1017 sparta::Clock * clk,
1018 StatisticSet* parent);
1019
1020 counter_type get() const override {
1021 return static_cast<counter_type>(static_cast<double>(sched_.getElapsedTicks()) *
1022 (PS_PER_SECOND / static_cast<double>(sched_.getFrequency()))); // 150000.0
1023 }
1025 } picoseconds_roctr_;
1026
1028 StatisticDef seconds_stat_;
1029
1031 StatisticDef milliseconds_stat_;
1032
1034 StatisticDef microseconds_stat_;
1035
1037 StatisticDef nanoseconds_stat_;
1038
1040 StatisticDef user_runtime_stat_;
1041
1043 StatisticDef system_runtime_stat_;
1044
1046 StatisticDef wall_runtime_stat_;
1047
1049 boost::timer::cpu_timer timer_;
1050
1053 uint64_t events_fired_ = 0;
1054 ReadOnlyCounter events_fired_cnt_;
1055
1057 uint64_t user_time_ = 0;
1058 ReadOnlyCounter user_time_cnt_;
1059
1060 uint64_t system_time_ = 0;
1061 ReadOnlyCounter system_time_cnt_;
1062
1063 uint64_t wall_time_ = 0;
1064 ReadOnlyCounter wall_time_cnt_;
1065
1066public:
1073 template<SchedulingPhase sched_phase_T = SchedulingPhase::Update>
1075 static_assert(static_cast<uint32_t>(sched_phase_T) < NUM_SCHEDULING_PHASES,
1076 "Invalid Scheduling Phase is provided!");
1077
1078 return gbl_events_[static_cast<uint32_t>(sched_phase_T)].get();
1079 }
1080
1081private:
1083 std::unique_ptr<EventSet> es_uptr_;
1084
1086 std::array<std::unique_ptr<PhasedPayloadEvent<GlobalEventProxy>>,
1087 NUM_SCHEDULING_PHASES> gbl_events_;
1088
1090 void fireGlobalEvent_(const GlobalEventProxy &);
1091
1092 struct AsyncEventInfo {
1093 AsyncEventInfo(Scheduleable *sched, Scheduler::Tick tick)
1094 : sched(sched), tick(tick) { }
1095
1096 AsyncEventInfo(Scheduleable *sched)
1097 : AsyncEventInfo(sched, 0) { }
1098
1099 bool operator() (const AsyncEventInfo &info)
1100 {
1101 return info.sched == sched;
1102 }
1103
1104 Scheduleable *sched = nullptr;
1105 Scheduler::Tick tick = 0;
1106 };
1107
1109 volatile bool async_event_list_empty_hint_ = true;
1110
1112 std::list<AsyncEventInfo> async_event_list_;
1113
1115 std::mutex async_event_list_mutex_;
1116
1120#ifdef SYSTEMC_SUPPORT
1121 sparta::NotificationSource<Tick> item_scheduled_;
1122#endif
1123};
1124
1125
1126template<class StreamType>
1127inline void Scheduler::printNextCycleEventTree(StreamType& os,
1128 uint32_t curr_grp,
1129 uint32_t curr_event,
1130 uint32_t future) const
1131{
1132 if(current_tick_quantum_ == nullptr) {
1133 os << "sparta::Scheduler is empty" << std::endl;
1134 return;
1135 }
1136
1137 uint32_t scheduler_map_idx = current_tick_ + future;
1138 os << "Scheduler's event tree for tick: " << scheduler_map_idx << std::endl;
1139 if(current_tick_quantum_->tick > scheduler_map_idx) {
1140 os << "\tNo events for time: '"
1141 << scheduler_map_idx << "' next event @"
1142 << current_tick_quantum_->tick << std::endl;
1143 }
1144 const TickQuantum::ScheduleableGroups & group_array = current_tick_quantum_->groups;
1145 for(uint32_t i = curr_grp; i < group_array.size(); ++i)
1146 {
1147 std::stringstream output;
1148 if((i + 1) == group_array.size()) {
1149 output << "\tGroup[zero]: ";
1150 }
1151 else {
1152 output << "\tGroup[" << i + 1 << "]: ";
1153 }
1154 const TickQuantum::ScheduleableGroup & scheduleables = group_array[i];
1155
1156 output << SPARTA_CURRENT_COLOR_GREEN;
1157 for(uint32_t x = 0; x < scheduleables.size(); ++x)
1158 {
1159 if(x) {
1160 output << ", ";
1161 }
1162 if((curr_grp == i) && (curr_event == x)) {
1163 output << SPARTA_CURRENT_COLOR_BRIGHT_GREEN << getScheduleableLabel_(scheduleables[x]);
1164 }
1165 else {
1166 output << SPARTA_CURRENT_COLOR_GREEN << getScheduleableLabel_(scheduleables[x]);
1167 }
1168 }
1169
1170 os << output.str() << SPARTA_CURRENT_COLOR_NORMAL << std::endl;
1171 }
1172}
1173
1174
1175
1176
1177}
Color code for SPARTA.
#define SPARTA_CURRENT_COLOR_NORMAL
Macros for accessing the colors through the default scheme.
Definition Colors.hpp:40
File that defines the ObjectAllocator class.
TreeNode refinement representing the root (or "top") of a device tree.
#define PS_PER_SECOND
Picoseconds per second constant.
Definition Scheduler.hpp:52
File that defines the phases used in simulation.
Set of macros for Sparta assertions. Caught by the framework.
#define sparta_assert(...)
Simple variadic assertion that will throw a sparta_exception if the condition fails.
#define SPARTA_EXPECT_FALSE(x)
A macro for hinting to the compiler a particular condition should be considered most likely false.
File that contains the macro used to generate the class callbacks.
Contains a statistic definition (some useful information which can be computed)
File that defines the StatisticSet class.
File that defines a ValidValue.
A representation of simulated time.
Definition Clock.hpp:51
uint64_t counter_type
Counter value type.
A helper class of GlobalEvent.
TreeNode which represents some "global" namespace of the device tree, containing only RootTreeNodes,...
A TreeNode that generates a specific type of notification which propagates up a tree of TreeNodes (us...
Class to schedule a Scheduleable in the future with a payload, but the class itself is not typed on t...
Represents a non-writable and non-observable counter with a very similar interface to sparta::Counter...
TreeNode which represents the root ("top") of a device tree.
A class that defines the basic scheduling interface to the Scheduler. Not intended to be used by mode...
A class that lets you schedule events now and in the future.
bool isScheduled(const Scheduleable *scheduleable) const
Is the given Scheduleable item anywhere (in time now -> future) on the Scheduler?
bool isScheduled(const Scheduleable *scheduleable, Tick rel_time) const
Is the given Scheduleable item already scheduled?
bool isFinished() const
Returns true if there are no more pending non-continuing events.
ReadOnlyCounter & getCurrentPicosecondsROCounter()
Returns a counter holding the current picosecond count of this scheduler.
DAG * getDAG() const
Get the internal DAG.
void cancelEvent(const Scheduleable *scheduleable)
Cancel the given Scheduleable if on the Scheduler.
void scheduleAsyncEvent(Scheduleable *sched, Scheduler::Tick delay)
Asynchronously schedule an event.
~Scheduler()
Dey-stroy.
PhasedPayloadEvent< GlobalEventProxy > * getGlobalPhasedPayloadEventPtr()
Get the raw pointer of "global" PhasedPayloadEvent inside sparta::Scheduler.
Tick getCurrentTick() const noexcept
The current tick the Scheduler is working on or just finished.
StatisticDef & getCurrentNanosecondsStatisticDef()
Returns a StatisticDef holding the nanosecond count of this scheduler.
StatisticDef & getSecondsStatisticDef()
Returns a StatisticDef holding the picosecond count of this scheduler.
StatisticDef & getCurrentMillisecondsStatisticDef()
Returns a StatisticDef holding the millisecond count of this scheduler.
void scheduleEvent(Scheduleable *scheduleable, Tick rel_time, uint32_t dag_group=0, bool continuing=true, bool add_if_not_scheduled=false)
Schedule a single event. This method should also be thread safe.
Tick getNextContinuingEventTime() const noexcept
Returns the Tick quantum where the next continuing event resides.
bool isRunning() const noexcept
Query if the scheduler is running.
void enableWatchDog(uint64_t watchdog_timeout_ps)
Enable the watchdog timer.
void restartAt(Tick t)
Clears the events in the scheduler, sets the current tick to tick and the elapsed ticks to either tic...
Tick getFrequency() const
Returns the frequency (in ticks per simulated second) of this Scheduler.
void kickTheDog() noexcept
Reset the watchdog timer.
void cancelEvent(const Scheduleable *scheduleable, Tick rel_time)
Cancel the given Scheduleable if on the Scheduler at the given time.
SchedulingPhase getCurrentSchedulingPhase() const
Tick nextEventTick() const
Returns the next tick an event is pending.
Scheduler(const std::string &name)
Constructor with name.
DurationT getRunWallTime() const
Get the wall clock run time.
uint64_t Tick
Typedef for our unit of time.
void stopRunning()
Tell the scheduler to stop running.
Scheduler(const std::string &name, GlobalTreeNode *search_scope)
Construct with a name and a specific global search scope (global parent)
void cancelAsyncEvent(Scheduleable *scheduleable)
Cancel the given Scheduleable.
uint32_t getCurrentFiringEventIdx() const
DurationT getRunCpuTime() const
Return the number of nanoseconds the scheduler has been in run.
static constexpr char NODE_NAME[]
Name of the Scheduler' TreeNode.
Scheduler()
Constructor.
void clearEvents()
Clears all events in the scheduler without executing any of them.
void run(Tick num_ticks=INDEFINITE, const bool exacting_run=false, const bool measure_run_time=true)
Enter running state and runs the scheduler until running is stopped (e.g. through a stop event) or th...
void registerClock(sparta::Clock *clk)
void finalize()
Finalize the scheduler and allow running.
const Scheduleable * getCurrentFiringEvent() const
Tick getElapsedTicks() const noexcept
The total elapsed ticks.
bool isFinalized() const noexcept override
Is the scheduler finalized.
StatisticDef & getCurrentMicrosecondsStatisticDef()
Returns a StatisticDef holding the microsecond count of this scheduler.
Tick getNumFired() const noexcept
ReadOnlyCounter & getCurrentTicksROCounter()
Returns a counter holding the current tick count of this scheduler.
void printNextCycleEventTree(StreamType &os, uint32_t curr_grp=0, uint32_t curr_event=0, uint32_t future=0) const
A method used for debugging the scheduler. Prints the scheduler's schedule of events.
static const Tick INDEFINITE
Constant for infinite tick count.
constexpr Tick calcIndexTime(const Tick rel_time) const
Const expression to calculate tick value for indexing.
void deregisterClock(sparta::Clock *clk)
Tick getSimulatedPicoSeconds() const noexcept
StartupEvent is a simple class for scheduling a starting event on the Scheduler. It does not support ...
Contains a statistic definition (some useful information which can be computed)
Macros for handling exponential backoff.
const uint32_t NUM_SCHEDULING_PHASES
The number of phases.
SchedulingPhase
The SchedulingPhases used for events (Tick, Update, PortUpdate, etc)