13#include <boost/timer/timer.hpp>
32#include "sparta/log/MessageSource.hpp"
34#include "sparta/statistics/ReadOnlyCounter.hpp"
35#include "sparta/statistics/Counter.hpp"
39#include "sparta/statistics/CounterBase.hpp"
43#include "sparta/log/NotificationSource.hpp"
52#define PS_PER_SECOND 1000000000000
62 template<
typename DataT>
63 class PhasedPayloadEvent;
65 class GlobalEventProxy;
211 using Scheduleables = std::vector<Scheduleable *>;
215 scheduleables_.resize(scheduleables_.size() * 2,
nullptr);
216 size_ = scheduleables_.size();
218 scheduleables_[current_idx_++] = sched;
221 size_t size()
const {
229 Scheduleables::const_reference operator[](
const size_t index)
const {
231 return scheduleables_[index];
234 Scheduleables::reference operator[](
const size_t index) {
236 return scheduleables_[index];
241 size_t current_idx_ = 0;
242 Scheduleables scheduleables_{size_,
nullptr};
245 using ScheduleableGroups = std::vector<ScheduleableGroup>;
253 TickQuantum(uint32_t num_firing_groups) :
254 groups(num_firing_groups)
263 void addEvent(uint32_t firing_group,
Scheduleable * scheduleable) {
266 groups[firing_group].addScheduleable(scheduleable);
267 first_group_idx = std::min(first_group_idx, firing_group);
270 void addEventIfNotScheduled(uint32_t firing_group,
Scheduleable * scheduleable) {
273 auto & grp = groups[firing_group];
274 const auto grp_size = grp.size();
275 for(uint32_t idx = 0; idx < grp_size; ++idx) {
276 if(grp[idx] == scheduleable) {
280 grp.addScheduleable(scheduleable);
281 first_group_idx = std::min(first_group_idx, firing_group);
285 ScheduleableGroups groups;
286 uint32_t first_group_idx = std::numeric_limits<uint32_t>::max();
287 TickQuantum * next =
nullptr;
291 TickQuantum * current_tick_quantum_ =
nullptr;
294 ObjectAllocator<TickQuantum> tick_quantum_allocator_;
297 bool watchdogExpired_()
const
299 bool watchdoc_expired =
false;
300 if (wdt_period_ticks_ > 0) {
302 uint64_t num_ticks_since_wdt = current_tick_ - prev_wdt_tick_;
303 if (num_ticks_since_wdt >= wdt_period_ticks_) {
304 watchdoc_expired =
true;
307 return watchdoc_expired;
314 return current_tick_ + rel_time;
318 template <
typename DurationT>
321 return std::chrono::duration_cast<DurationT>(std::chrono::nanoseconds(timer_.elapsed().user));
325 template <
typename DurationT>
328 return std::chrono::duration_cast<DurationT>(std::chrono::nanoseconds(timer_.elapsed().wall));
470 template<
class StreamType>
472 uint32_t curr_event = 0, uint32_t future=0)
const;
496 uint32_t dag_group=0,
497 bool continuing=
true,
498 bool add_if_not_scheduled=
false);
658 const bool exacting_run =
false,
659 const bool measure_run_time =
true);
678 if(current_tick_quantum_ ==
nullptr) {
681 return (current_tick_quantum_->tick);
698 return dag_finalized_;
720 return current_tick_;
740 return elapsed_ticks_;
748 return current_tick_;
771 if (watchdog_timeout_ps == 0) {
776 if (wdt_period_ticks_ < watchdog_timeout_ps) {
778 wdt_period_ticks_ = watchdog_timeout_ps;
787 return events_fired_;
801 return latest_continuing_event_;
810 if(current_tick_quantum_) {
811 if(current_tick_quantum_->groups.size() > current_group_firing_) {
812 if(current_tick_quantum_->groups[current_group_firing_].size() > current_event_firing_) {
813 return current_tick_quantum_->groups[current_group_firing_][current_event_firing_];
822 return current_event_firing_;
827 return current_scheduling_phase_;
860 return picoseconds_roctr_;
868 return seconds_stat_;
876 return milliseconds_stat_;
884 return microseconds_stat_;
892 return nanoseconds_stat_;
913 void scheduleStartupHandler_(
const SpartaHandler & event_del) {
914 startup_events_.emplace_back(event_del);
919 void throwPrecedenceIssue_(
const Scheduleable * scheduleable,
const uint32_t firing_group)
const;
920 const char * getScheduleableLabel_(
const Scheduleable * sched)
const;
928 TickQuantum* determineTickQuantum_(
Tick rel_time);
931 std::unique_ptr<DAG> dag_;
934 uint32_t dag_group_count_ = 0;
937 uint32_t firing_group_count_ = 0;
940 uint32_t group_zero_ = 0;
944 bool dag_finalized_ =
false;
947 bool first_tick_ =
true;
950 Tick current_tick_ = 0;
953 Tick elapsed_ticks_ = 0;
956 Tick prev_wdt_tick_ = 0;
960 Tick wdt_period_ticks_ = 0;
963 bool running_ =
false;
966 std::unique_ptr<Scheduleable> stop_event_;
969 std::unique_ptr<Scheduleable> cancelled_event_;
970 void cancelCallback_() {}
974 bool is_finished_ =
false;
977 std::vector<SpartaHandler> startup_events_;
981 std::vector<sparta::Clock*> registered_clocks_;
984 uint32_t current_group_firing_ = 0;
987 uint32_t current_event_firing_ = 0;
993 log::MessageSource debug_;
996 log::MessageSource call_trace_logger_;
999 std::ostringstream call_trace_stream_;
1002 Tick latest_continuing_event_ = 0;
1008 std::unique_ptr<sparta::Clock> scheduler_internal_clk_;
1011 ReadOnlyCounter ticks_roctr_;
1013 class PicoSecondCounter :
public ReadOnlyCounter {
1018 StatisticSet* parent);
1021 return static_cast<counter_type>(
static_cast<double>(sched_.getElapsedTicks()) *
1022 (
PS_PER_SECOND /
static_cast<double>(sched_.getFrequency())));
1025 } picoseconds_roctr_;
1028 StatisticDef seconds_stat_;
1031 StatisticDef milliseconds_stat_;
1034 StatisticDef microseconds_stat_;
1037 StatisticDef nanoseconds_stat_;
1040 StatisticDef user_runtime_stat_;
1043 StatisticDef system_runtime_stat_;
1046 StatisticDef wall_runtime_stat_;
1049 boost::timer::cpu_timer timer_;
1053 uint64_t events_fired_ = 0;
1054 ReadOnlyCounter events_fired_cnt_;
1057 uint64_t user_time_ = 0;
1058 ReadOnlyCounter user_time_cnt_;
1060 uint64_t system_time_ = 0;
1061 ReadOnlyCounter system_time_cnt_;
1063 uint64_t wall_time_ = 0;
1064 ReadOnlyCounter wall_time_cnt_;
1073 template<SchedulingPhase sched_phase_T = SchedulingPhase::Update>
1076 "Invalid Scheduling Phase is provided!");
1078 return gbl_events_[
static_cast<uint32_t
>(sched_phase_T)].get();
1083 std::unique_ptr<EventSet> es_uptr_;
1086 std::array<std::unique_ptr<PhasedPayloadEvent<GlobalEventProxy>>,
1092 struct AsyncEventInfo {
1094 : sched(sched), tick(tick) { }
1096 AsyncEventInfo(Scheduleable *sched)
1097 : AsyncEventInfo(sched, 0) { }
1099 bool operator() (
const AsyncEventInfo &info)
1101 return info.sched == sched;
1104 Scheduleable *sched =
nullptr;
1109 volatile bool async_event_list_empty_hint_ =
true;
1112 std::list<AsyncEventInfo> async_event_list_;
1115 std::mutex async_event_list_mutex_;
1120#ifdef SYSTEMC_SUPPORT
1126template<
class StreamType>
1129 uint32_t curr_event,
1130 uint32_t future)
const
1132 if(current_tick_quantum_ ==
nullptr) {
1133 os <<
"sparta::Scheduler is empty" << std::endl;
1137 uint32_t scheduler_map_idx = current_tick_ + future;
1138 os <<
"Scheduler's event tree for tick: " << scheduler_map_idx << std::endl;
1139 if(current_tick_quantum_->tick > scheduler_map_idx) {
1140 os <<
"\tNo events for time: '"
1141 << scheduler_map_idx <<
"' next event @"
1142 << current_tick_quantum_->tick << std::endl;
1144 const TickQuantum::ScheduleableGroups & group_array = current_tick_quantum_->groups;
1145 for(uint32_t i = curr_grp; i < group_array.size(); ++i)
1147 std::stringstream output;
1148 if((i + 1) == group_array.size()) {
1149 output <<
"\tGroup[zero]: ";
1152 output <<
"\tGroup[" << i + 1 <<
"]: ";
1156 output << SPARTA_CURRENT_COLOR_GREEN;
1157 for(uint32_t x = 0; x < scheduleables.size(); ++x)
1162 if((curr_grp == i) && (curr_event == x)) {
1163 output << SPARTA_CURRENT_COLOR_BRIGHT_GREEN << getScheduleableLabel_(scheduleables[x]);
1166 output << SPARTA_CURRENT_COLOR_GREEN << getScheduleableLabel_(scheduleables[x]);
#define SPARTA_CURRENT_COLOR_NORMAL
Macros for accessing the colors through the default scheme.
File that defines the ObjectAllocator class.
TreeNode refinement representing the root (or "top") of a device tree.
#define PS_PER_SECOND
Picoseconds per second constant.
File that defines the phases used in simulation.
Set of macros for Sparta assertions. Caught by the framework.
#define sparta_assert(...)
Simple variadic assertion that will throw a sparta_exception if the condition fails.
#define SPARTA_EXPECT_FALSE(x)
A macro for hinting to the compiler a particular condition should be considered most likely false.
File that contains the macro used to generate the class callbacks.
Contains a statistic definition (some useful information which can be computed)
File that defines the StatisticSet class.
File that defines a ValidValue.
A representation of simulated time.
uint64_t counter_type
Counter value type.
A helper class of GlobalEvent.
TreeNode which represents some "global" namespace of the device tree, containing only RootTreeNodes,...
A TreeNode that generates a specific type of notification which propagates up a tree of TreeNodes (us...
Class to schedule a Scheduleable in the future with a payload, but the class itself is not typed on t...
Represents a non-writable and non-observable counter with a very similar interface to sparta::Counter...
TreeNode which represents the root ("top") of a device tree.
A class that defines the basic scheduling interface to the Scheduler. Not intended to be used by mode...
A class that lets you schedule events now and in the future.
bool isScheduled(const Scheduleable *scheduleable) const
Is the given Scheduleable item anywhere (in time now -> future) on the Scheduler?
bool isScheduled(const Scheduleable *scheduleable, Tick rel_time) const
Is the given Scheduleable item already scheduled?
bool isFinished() const
Returns true if there are no more pending non-continuing events.
ReadOnlyCounter & getCurrentPicosecondsROCounter()
Returns a counter holding the current picosecond count of this scheduler.
DAG * getDAG() const
Get the internal DAG.
void cancelEvent(const Scheduleable *scheduleable)
Cancel the given Scheduleable if on the Scheduler.
void scheduleAsyncEvent(Scheduleable *sched, Scheduler::Tick delay)
Asynchronously schedule an event.
PhasedPayloadEvent< GlobalEventProxy > * getGlobalPhasedPayloadEventPtr()
Get the raw pointer of "global" PhasedPayloadEvent inside sparta::Scheduler.
Tick getCurrentTick() const noexcept
The current tick the Scheduler is working on or just finished.
StatisticDef & getCurrentNanosecondsStatisticDef()
Returns a StatisticDef holding the nanosecond count of this scheduler.
StatisticDef & getSecondsStatisticDef()
Returns a StatisticDef holding the picosecond count of this scheduler.
StatisticDef & getCurrentMillisecondsStatisticDef()
Returns a StatisticDef holding the millisecond count of this scheduler.
void scheduleEvent(Scheduleable *scheduleable, Tick rel_time, uint32_t dag_group=0, bool continuing=true, bool add_if_not_scheduled=false)
Schedule a single event. This method should also be thread safe.
Tick getNextContinuingEventTime() const noexcept
Returns the Tick quantum where the next continuing event resides.
bool isRunning() const noexcept
Query if the scheduler is running.
void enableWatchDog(uint64_t watchdog_timeout_ps)
Enable the watchdog timer.
void restartAt(Tick t)
Clears the events in the scheduler, sets the current tick to tick and the elapsed ticks to either tic...
Tick getFrequency() const
Returns the frequency (in ticks per simulated second) of this Scheduler.
void kickTheDog() noexcept
Reset the watchdog timer.
void cancelEvent(const Scheduleable *scheduleable, Tick rel_time)
Cancel the given Scheduleable if on the Scheduler at the given time.
SchedulingPhase getCurrentSchedulingPhase() const
Tick nextEventTick() const
Returns the next tick an event is pending.
Scheduler(const std::string &name)
Constructor with name.
DurationT getRunWallTime() const
Get the wall clock run time.
uint64_t Tick
Typedef for our unit of time.
void stopRunning()
Tell the scheduler to stop running.
Scheduler(const std::string &name, GlobalTreeNode *search_scope)
Construct with a name and a specific global search scope (global parent)
void cancelAsyncEvent(Scheduleable *scheduleable)
Cancel the given Scheduleable.
uint32_t getCurrentFiringEventIdx() const
DurationT getRunCpuTime() const
Return the number of nanoseconds the scheduler has been in run.
static constexpr char NODE_NAME[]
Name of the Scheduler' TreeNode.
void clearEvents()
Clears all events in the scheduler without executing any of them.
void run(Tick num_ticks=INDEFINITE, const bool exacting_run=false, const bool measure_run_time=true)
Enter running state and runs the scheduler until running is stopped (e.g. through a stop event) or th...
void registerClock(sparta::Clock *clk)
void finalize()
Finalize the scheduler and allow running.
const Scheduleable * getCurrentFiringEvent() const
Tick getElapsedTicks() const noexcept
The total elapsed ticks.
bool isFinalized() const noexcept override
Is the scheduler finalized.
StatisticDef & getCurrentMicrosecondsStatisticDef()
Returns a StatisticDef holding the microsecond count of this scheduler.
Tick getNumFired() const noexcept
ReadOnlyCounter & getCurrentTicksROCounter()
Returns a counter holding the current tick count of this scheduler.
void printNextCycleEventTree(StreamType &os, uint32_t curr_grp=0, uint32_t curr_event=0, uint32_t future=0) const
A method used for debugging the scheduler. Prints the scheduler's schedule of events.
static const Tick INDEFINITE
Constant for infinite tick count.
constexpr Tick calcIndexTime(const Tick rel_time) const
Const expression to calculate tick value for indexing.
void deregisterClock(sparta::Clock *clk)
Tick getSimulatedPicoSeconds() const noexcept
StartupEvent is a simple class for scheduling a starting event on the Scheduler. It does not support ...
Contains a statistic definition (some useful information which can be computed)
Macros for handling exponential backoff.
const uint32_t NUM_SCHEDULING_PHASES
The number of phases.
SchedulingPhase
The SchedulingPhases used for events (Tick, Update, PortUpdate, etc)