45 #define REBIND_INTERVAL 10.0 48 double workload_factor;
57 static unsigned int *new_LPS_binding;
58 static timer rebinding_timer;
60 #ifdef HAVE_LP_REBINDING 61 static int binding_acquire_phase = 0;
62 static __thread
int local_binding_acquire_phase = 0;
64 static int binding_phase = 0;
65 static __thread
int local_binding_phase = 0;
68 static atomic_t worker_thread_reduction;
80 unsigned int block_leftover;
86 if (block_leftover > 0) {
107 if (block_leftover == 0) {
129 return (B->workload_factor - A->workload_factor);
145 register unsigned int i, j;
146 double reference_knapsack = 0;
154 for (i = 0; i <
n_prc; i++) {
155 reference_knapsack += lp_cost[i].workload_factor;
163 bzero(assignments,
sizeof(
double) *
n_cores);
165 for (i = 0; i <
n_cores; i++) {
166 assignments[j] += lp_cost[i].workload_factor;
167 new_LPS_binding[i] = j;
172 for (; i <
n_prc; i++) {
175 for (j = 0; j <
n_cores; j++) {
177 if (assignments[j] + lp_cost[i].workload_factor <=
178 reference_knapsack) {
179 assignments[j] += lp_cost[i].workload_factor;
180 new_LPS_binding[i] = j;
186 if (assigned ==
false)
193 for (; i <
n_prc; i++) {
194 new_LPS_binding[i] = j;
195 j = (j + 1) % n_cores;
200 #ifdef HAVE_LP_REBINDING 202 static void post_local_reduction(
void)
205 msg_t *first_evt, *last_evt;
207 foreach_bound_lp(lp) {
211 lp_cost[lp->lid.to_int].id = i++;
212 lp_cost[lp->lid.to_int].workload_factor =
213 list_sizeof(lp->queue_in);
214 lp_cost[lp->lid.to_int].workload_factor *=
215 statistics_get_lp_data(lp, STAT_GET_EVENT_TIME_LP);
216 lp_cost[lp->lid.to_int].workload_factor /= (last_evt->
223 static void install_binding(
void)
258 initialize_binding_blocks();
262 timer_start(rebinding_timer);
265 new_LPS_binding = rsalloc(
sizeof(
int) *
n_prc);
267 lp_cost = rsalloc(
sizeof(
struct lp_cost_id) * n_prc);
274 #ifdef HAVE_LP_REBINDING 277 (timer_value_seconds(rebinding_timer) >= REBIND_INTERVAL)) {
278 timer_restart(rebinding_timer);
286 binding_acquire_phase++;
290 if (local_binding_phase < binding_phase) {
291 local_binding_phase = binding_phase;
292 post_local_reduction();
296 if (local_binding_acquire_phase < binding_acquire_phase) {
297 local_binding_acquire_phase = binding_acquire_phase;
301 #ifdef HAVE_PREEMPTION struct lp_struct ** lps_blocks
Maintain LPs' simulation and execution states.
#define atomic_read(v)
Read operation on an atomic counter.
Core ROOT-Sim functionalities.
unsigned int n_cores
Total number of cores required for simulation.
void atomic_dec(atomic_t *)
The ROOT-Sim scheduler main module header.
Load sharing rules across worker threads.
Generic thread management facilities.
#define atomic_set(v, i)
Set operation on an atomic counter.
static void LP_knapsack(void)
bool thread_barrier(barrier_t *b)
barrier_t all_thread_barrier
Barrier for all worker threads.
#define master_thread()
This macro expands to true if the current KLT is the master thread for the local kernel.
static __thread bool first_lp_binding
A guard to know whether this is the first invocation or not.
unsigned int n_prc
Number of logical processes hosted by the current kernel instance.
__thread unsigned int n_prc_per_thread
This is used to keep track of how many LPs were bound to the current KLT.
static int compare_lp_cost(const void *a, const void *b)
__thread unsigned int local_tid
unsigned int worker_thread
ID of the worker thread towards which the LP is bound.
#define unlikely(exp)
Optimize the branch as likely not taken.
static void LPs_block_binding(void)