17 dispatch_queue_(
"dispatch_queue", p->dispatch_queue_depth,
18 node->getClock(), getStatisticSet()),
19 num_to_dispatch_(p->num_to_dispatch)
21 weighted_unit_distribution_context_.assignContextWeights(p->context_weights);
25 stall_counters_[current_stall_].startCounting();
28 in_dispatch_queue_write_.
62 void Dispatch::sendInitialCredits_()
64 out_dispatch_queue_credits_.
send(dispatch_queue_.
capacity());
67 void Dispatch::fpuCredits_ (
const uint32_t& credits) {
68 credits_fpu_ += credits;
69 if (credits_rob_ >0 && dispatch_queue_.
size() > 0) {
70 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
73 info_logger_ <<
"FPU got " << credits <<
" credits, total: " << credits_fpu_;
77 void Dispatch::alu0Credits_ (
const uint32_t& credits) {
78 credits_alu0_ += credits;
79 if (credits_rob_ >0 && dispatch_queue_.
size() > 0) {
80 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
83 info_logger_ <<
"ALU0 got " << credits <<
" credits, total: " << credits_alu0_;
87 void Dispatch::alu1Credits_ (
const uint32_t& credits) {
88 credits_alu1_ += credits;
89 if (credits_rob_ >0 && dispatch_queue_.
size() > 0) {
90 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
93 info_logger_ <<
"ALU1 got " << credits <<
" credits, total: " << credits_alu1_;
97 void Dispatch::brCredits_ (
const uint32_t& credits) {
98 credits_br_ += credits;
99 if (credits_rob_ >0 && dispatch_queue_.
size() > 0) {
100 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
103 info_logger_ <<
"BR got " << credits <<
" credits, total: " << credits_br_;
107 void Dispatch::lsuCredits_(
const uint32_t& credits) {
108 credits_lsu_ += credits;
109 if (credits_rob_ >0 && dispatch_queue_.
size() > 0) {
110 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
113 info_logger_ <<
"LSU got " << credits <<
" credits, total: " << credits_lsu_;
117 void Dispatch::robCredits_(
const uint32_t&) {
118 uint32_t nc = in_reorder_credits_.
pullData();
120 if (((credits_fpu_ > 0)|| (credits_alu0_ > 0) || (credits_alu1_ > 0) || (credits_br_ > 0))
121 && dispatch_queue_.
size() > 0) {
122 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
125 info_logger_ <<
"ROB got " << nc <<
" credits, total: " << credits_rob_;
129 void Dispatch::dispatchQueueAppended_(
const InstGroup &) {
130 for(
auto & i : in_dispatch_queue_write_.pullData()) {
131 dispatch_queue_.
push(i);
134 if (((credits_fpu_ > 0)|| (credits_alu0_ > 0) || (credits_alu1_ > 0) || (credits_br_ > 0) || credits_lsu_ > 0)
135 && credits_rob_ >0) {
136 ev_dispatch_insts_.
schedule(sparta::Clock::Cycle(0));
140 void Dispatch::handleFlush_(
const FlushManager::FlushingCriteria & criteria)
145 out_dispatch_queue_credits_.
send(dispatch_queue_.
size());
146 dispatch_queue_.
clear();
147 credits_fpu_ += out_fpu_write_.
cancel();
148 credits_alu0_ += out_alu0_write_.
cancel();
149 credits_alu1_ += out_alu1_write_.
cancel();
150 credits_br_ += out_br_write_.
cancel();
151 credits_lsu_ += out_lsu_write_.
cancel();
152 out_reorder_write_.
cancel();
155 void Dispatch::dispatchInstructions_()
157 uint32_t num_dispatch = std::min(dispatch_queue_.
size(), num_to_dispatch_);
158 num_dispatch = std::min(credits_rob_, num_dispatch);
165 stall_counters_[current_stall_].stopCounting();
167 if(num_dispatch == 0) {
168 stall_counters_[current_stall_].startCounting();
172 current_stall_ = NOT_STALLED;
174 InstGroup insts_dispatched;
175 bool keep_dispatching =
true;
176 for(uint32_t i = 0; (i < num_dispatch) && keep_dispatching; ++i)
178 bool dispatched =
false;
179 ExampleInstPtr & ex_inst_ptr = dispatch_queue_.
access(0);
180 ExampleInst & ex_inst = *ex_inst_ptr;
182 switch(ex_inst.getUnit())
184 case ExampleInst::TargetUnit::FPU:
186 if(credits_fpu_ > 0) {
189 out_fpu_write_.
send(ex_inst_ptr);
190 ++unit_distribution_[
static_cast<uint32_t
>(ExampleInst::TargetUnit::FPU)];
191 ++(unit_distribution_context_.
context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::FPU)));
192 ++(weighted_unit_distribution_context_.context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::FPU)));
196 << ex_inst_ptr <<
" to FPU ";
200 current_stall_ = FPU_BUSY;
201 keep_dispatching =
false;
205 case ExampleInst::TargetUnit::ALU0:
207 if(credits_alu0_ > 0) {
211 out_alu0_write_.
send(ex_inst_ptr, 1);
212 ++unit_distribution_[
static_cast<uint32_t
>(ExampleInst::TargetUnit::ALU0)];
213 ++(unit_distribution_context_.
context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::ALU0)));
214 ++(weighted_unit_distribution_context_.context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::ALU0)));
219 << ex_inst_ptr <<
" to ALU0 ";
223 current_stall_ = ALU0_BUSY;
224 keep_dispatching =
false;
228 case ExampleInst::TargetUnit::ALU1:
230 if(credits_alu1_ > 0)
234 out_alu1_write_.
send(ex_inst_ptr, 1);
235 ++unit_distribution_[
static_cast<uint32_t
>(ExampleInst::TargetUnit::ALU1)];
236 ++(unit_distribution_context_.
context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::ALU1)));
237 ++(weighted_unit_distribution_context_.context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::ALU1)));
241 << ex_inst_ptr <<
" to ALU1 ";
245 current_stall_ = ALU0_BUSY;
246 keep_dispatching =
false;
250 case ExampleInst::TargetUnit::BR:
256 out_br_write_.
send(ex_inst_ptr, 1);
257 ++unit_distribution_[
static_cast<uint32_t
>(ExampleInst::TargetUnit::BR)];
258 ++(unit_distribution_context_.
context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::BR)));
259 ++(weighted_unit_distribution_context_.context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::BR)));
263 << ex_inst_ptr <<
" to BR ";
267 current_stall_ = BR_BUSY;
268 keep_dispatching =
false;
272 case ExampleInst::TargetUnit::LSU:
278 out_lsu_write_.
send(ex_inst_ptr, 1);
279 ++unit_distribution_[
static_cast<uint32_t
>(ExampleInst::TargetUnit::LSU)];
280 ++(unit_distribution_context_.
context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::LSU)));
281 ++(weighted_unit_distribution_context_.context(
static_cast<uint32_t
>(ExampleInst::TargetUnit::LSU)));
285 << ex_inst_ptr <<
" to LSU ";
289 current_stall_ = LSU_BUSY;
290 keep_dispatching =
false;
294 case ExampleInst::TargetUnit::ROB:
296 ex_inst.setStatus(ExampleInst::Status::COMPLETED);
307 insts_dispatched.emplace_back(ex_inst_ptr);
308 dispatch_queue_.
pop();
314 <<
" ALU0_B(" << std::boolalpha << (credits_alu0_ == 0)
315 <<
") ALU1_B(" << (credits_alu1_ == 0)
316 <<
") FPU_B(" << (credits_fpu_ == 0)
317 <<
") BR_B(" << (credits_br_ == 0) <<
")";
323 if(!insts_dispatched.empty()) {
324 out_dispatch_queue_credits_.
send(insts_dispatched.size());
325 out_reorder_write_.
send(insts_dispatched);
328 if ((credits_rob_ > 0) && (dispatch_queue_.
size() > 0) && (current_stall_ == NOT_STALLED)) {
332 stall_counters_[current_stall_].startCounting();
#define sparta_assert(...)
Simple variadic assertion that will throw a sparta_exception if the condition fails.
#define SPARTA_EXPECT_FALSE(x)
A macro for hinting to the compiler a particular condition should be considered most likely false.
#define CREATE_SPARTA_HANDLER_WITH_DATA(clname, meth, dataT)
#define CREATE_SPARTA_HANDLER(clname, meth)
File that defines the StartupEvent class.
Parameters for Dispatch model.
Dispatch(sparta::TreeNode *node, const DispatchParameterSet *p)
Constructor for Dispatch.
static const char name[]
Name of this resource. Required by sparta::UnitFactory.
const counter_type & context(const uint32_t idx) const
Return the internal counter at the given context.
DataT pullData()
Return the last data received by the port, then clear it.
void enableCollection(TreeNode *node) override
Enable pipeline collection.
uint32_t cancel()
Cancel all outstanding port sends regardless of criteria.
void send(const DataT &dat, sparta::Clock::Cycle rel_time=0)
Send data to bound receivers.
size_type size() const
Return the number of valid entries.
uint32_t capacity() const
Return the fixed size of this queue.
iterator push(const value_type &dat)
push data to the Queue.
value_type & access(uint32_t idx)
Read and return the data at the given index, reference, non-const method.
void enableCollection(TreeNode *parent)
Request that this queue begin collecting its contents for pipeline collection.
void clear()
Empty the queue.
void pop()
Pops the data at the front of the structure (oldest element) After pop iterator always points to the ...
void schedule(Clock::Cycle rel_cycle=0)
Schedule this PhasedSingleCycleUniqueEvent exactly zero or one cycle into the future....
StartupEvent is a simple class for scheduling a starting event on the Scheduler. It does not support ...
Node in a composite tree representing a sparta Tree item.
log::MessageSource info_logger_
Default info logger.
Macros for handling exponential backoff.