79 void *ptr = NULL, *ckpt = NULL;
81 size_t size, chunk_size, bitmap_size;
85 timer checkpoint_timer;
86 timer_start(checkpoint_timer);
94 rootsim_error(
true,
"(%d) Unable to acquire memory for checkpointing the current state (memory exhausted?)", lp->
lid.
to_int);
104 for (i = 0; i < lp->
mm->m_state->num_areas; i++) {
106 m_area = &lp->
mm->m_state->areas[i];
113 if (
unlikely(m_area->alloc_chunks == 0)) {
115 m_area->dirty_chunks = 0;
116 m_area->state_changed = 0;
118 if (
likely(m_area->use_bitmap != NULL)) {
119 memset(m_area->dirty_bitmap, 0, bitmap_size);
128 memcpy(ptr, m_area->use_bitmap, bitmap_size);
129 ptr = (
void *)((
char *)ptr + bitmap_size);
131 chunk_size = UNTAGGED_CHUNK_SIZE(m_area);
135 if (CHECK_LOG_MODE_BIT(m_area)) {
138 memcpy(ptr, m_area->area, m_area->num_chunks * chunk_size);
139 ptr = (
void *)((
char *)ptr + m_area->num_chunks * chunk_size);
143 #define copy_from_area(x) ({\ 144 memcpy(ptr, (void*)((char*)m_area->area + ((x) * chunk_size)), chunk_size);\ 145 ptr = (void*)((char*)ptr + chunk_size);}) 150 #undef copy_from_area 154 m_area->dirty_chunks = 0;
155 m_area->state_changed = 0;
156 bzero((
void *)m_area->dirty_bitmap, bitmap_size);
161 if (
unlikely((
char *)ckpt + size != ptr))
162 rootsim_error(
true,
"Actual (full) ckpt size is wrong by %d bytes!\nlid = %d ckpt = %p size = %#x (%d), ptr = %p, ckpt + size = %p\n",
163 (
char *)ckpt + size - (
char *)ptr, lp->
lid.
to_int,
164 ckpt, size, size, ptr, (
char *)ckpt + size);
166 lp->
mm->m_state->dirty_areas = 0;
167 lp->
mm->m_state->dirty_bitmap_size = 0;
168 lp->
mm->m_state->total_inc_size = 0;
170 statistics_post_data(lp, STAT_CKPT_TIME, (
double)timer_value_micro(checkpoint_timer));
171 statistics_post_data(lp, STAT_CKPT_MEM, (
double)size);
197 statistics_post_data(lp, STAT_CKPT, 1.0);
228 int i, original_num_areas, restored_areas;
229 size_t chunk_size, bitmap_size;
233 timer recovery_timer;
234 timer_start(recovery_timer);
237 original_num_areas = lp->
mm->m_state->num_areas;
238 new_area = lp->
mm->m_state->areas;
244 lp->
mm->m_state->areas = new_area;
247 for (i = 0; i < lp->
mm->m_state->num_areas; i++) {
249 m_area = &lp->
mm->m_state->areas[i];
253 if (restored_areas == lp->
mm->m_state->busy_areas || m_area->idx != ((
malloc_area *) ptr)->idx) {
255 m_area->dirty_chunks = 0;
256 m_area->state_changed = 0;
257 m_area->alloc_chunks = 0;
258 m_area->next_chunk = 0;
259 RESET_LOG_MODE_BIT(m_area);
260 RESET_AREA_LOCK_BIT(m_area);
262 if (
likely(m_area->use_bitmap != NULL)) {
263 memset(m_area->use_bitmap, 0, bitmap_size);
264 memset(m_area->dirty_bitmap, 0, bitmap_size);
266 m_area->last_access = lp->
mm->m_state->timestamp;
277 memcpy(m_area->use_bitmap, ptr, bitmap_size);
278 ptr = (
void *)((
char *)ptr + bitmap_size);
281 bzero(m_area->dirty_bitmap, bitmap_size);
282 m_area->dirty_chunks = 0;
283 m_area->state_changed = 0;
285 chunk_size = UNTAGGED_CHUNK_SIZE(m_area);
288 if (CHECK_LOG_MODE_BIT(m_area)) {
290 memcpy(m_area->area, ptr, m_area->num_chunks * chunk_size);
291 ptr = (
void *)((
char *)ptr + m_area->num_chunks * chunk_size);
298 #define copy_to_area(x) ({\ 299 memcpy((void*)((char*)m_area->area + ((x) * chunk_size)), ptr, chunk_size);\ 300 ptr = (void*)((char*)ptr + chunk_size);}) 310 if (original_num_areas > lp->
mm->m_state->num_areas) {
312 for (i = lp->
mm->m_state->num_areas; i < original_num_areas; i++) {
314 m_area = &lp->
mm->m_state->areas[i];
315 m_area->alloc_chunks = 0;
316 m_area->dirty_chunks = 0;
317 m_area->state_changed = 0;
318 m_area->next_chunk = 0;
319 m_area->last_access = lp->
mm->m_state->timestamp;
320 lp->
mm->m_state->areas[m_area->prev].next = m_area->idx;
322 RESET_LOG_MODE_BIT(m_area);
323 RESET_AREA_LOCK_BIT(m_area);
325 if (
likely(m_area->use_bitmap != NULL)) {
328 memset(m_area->use_bitmap, 0, bitmap_size);
329 memset(m_area->dirty_bitmap, 0, bitmap_size);
332 lp->
mm->m_state->num_areas = original_num_areas;
335 lp->
mm->m_state->timestamp = -1;
337 lp->
mm->m_state->dirty_areas = 0;
338 lp->
mm->m_state->dirty_bitmap_size = 0;
339 lp->
mm->m_state->total_inc_size = 0;
341 statistics_post_data(lp, STAT_RECOVERY_TIME, (
double)timer_value_micro(recovery_timer));
360 statistics_post_data(lp, STAT_RECOVERY, 1.0);
376 if (
likely(ckpt != NULL)) {
#define likely(exp)
Optimize the branch as likely taken.
Structure for LP's state.
void log_delete(void *ckpt)
Core ROOT-Sim functionalities.
#define bitmap_required_size(requested_bits)
Computes the required size of a bitmap with requested_bits entries.
unsigned int to_int
The LID numerical value.
The ROOT-Sim scheduler main module header.
struct memory_map * mm
Memory map of the LP.
void log_restore(struct lp_struct *lp, state_t *state_queue_node)
bool is_incremental
Tells if it is an incremental log or a full one (when used for logging)
void * log_state(struct lp_struct *lp)
Memory Manager main header.
void * log
A pointer to the actual log.
Definition of the memory map.
LID_t lid
Local ID of the LP.
size_t get_log_size(malloc_state *logged_state)
#define bitmap_foreach_set(bitmap, bitmap_size, func)
This executes a user supplied function for each set bit in bitmap.
void * log_full(struct lp_struct *lp)
void restore_full(struct lp_struct *lp, void *ckpt)
#define unlikely(exp)
Optimize the branch as likely not taken.
This structure let DyMeLoR handle one malloc area (for serving given-size memory requests) ...