79 void *ptr = NULL, *ckpt = NULL;
81 size_t size, chunk_size, bitmap_size;
86 timer checkpoint_timer;
87 timer_start(checkpoint_timer);
89 m_state = lp->
mm->m_state;
96 rootsim_error(
true,
"(%d) Unable to acquire memory for checkpointing the current state (memory exhausted?)", lp->
lid.
to_int);
106 for (i = 0; i < m_state->num_areas; i++) {
108 m_area = &m_state->areas[i];
115 if (
unlikely(m_area->alloc_chunks == 0)) {
117 m_area->dirty_chunks = 0;
118 m_area->state_changed = 0;
120 if (
likely(m_area->use_bitmap != NULL)) {
121 memset(m_area->dirty_bitmap, 0, bitmap_size);
130 memcpy(ptr, m_area->use_bitmap, bitmap_size);
131 ptr = (
void *)((
char *)ptr + bitmap_size);
133 chunk_size = UNTAGGED_CHUNK_SIZE(m_area);
137 if (CHECK_LOG_MODE_BIT(m_area)) {
140 memcpy(ptr, m_area->area, m_area->num_chunks * chunk_size);
141 ptr = (
void *)((
char *)ptr + m_area->num_chunks * chunk_size);
145 #define copy_from_area(x) ({\ 146 memcpy(ptr, (void*)((char*)m_area->area + ((x) * chunk_size)), chunk_size);\ 147 ptr = (void*)((char*)ptr + chunk_size);}) 152 #undef copy_from_area 156 m_area->dirty_chunks = 0;
157 m_area->state_changed = 0;
158 bzero(m_area->dirty_bitmap, bitmap_size);
163 if (
unlikely((
char *)ckpt + size != ptr))
164 rootsim_error(
true,
"Actual (full) ckpt size is wrong by %d bytes!\nlid = %d ckpt = %p size = %#x (%d), ptr = %p, ckpt + size = %p\n",
165 (
char *)ckpt + size - (
char *)ptr, lp->
lid.
to_int,
166 ckpt, size, size, ptr, (
char *)ckpt + size);
170 statistics_post_data(lp, STAT_CKPT_TIME, (
double)timer_value_micro(checkpoint_timer));
171 statistics_post_data(lp, STAT_CKPT_MEM, (
double)size);
197 statistics_post_data(lp, STAT_CKPT, 1.0);
227 char *ptr, *target_ptr;
228 int i, original_num_areas;
229 size_t chunk_size, bitmap_size;
234 timer recovery_timer;
235 timer_start(recovery_timer);
237 m_state = lp->
mm->m_state;
238 target_ptr = ptr + ((
malloc_state *) ptr)->total_log_size;
239 original_num_areas = m_state->num_areas;
242 m_area = m_state->areas;
244 m_state->areas = m_area;
248 for (i = 0; i < m_state->num_areas; i++) {
250 m_area = &m_state->areas[i];
254 if (ptr >= target_ptr || m_area->idx != ((
malloc_area *) ptr)->idx) {
256 m_area->dirty_chunks = 0;
257 m_area->state_changed = 0;
258 m_area->alloc_chunks = 0;
259 m_area->next_chunk = 0;
260 RESET_LOG_MODE_BIT(m_area);
261 RESET_AREA_LOCK_BIT(m_area);
263 if (
likely(m_area->use_bitmap != NULL)) {
264 memset(m_area->use_bitmap, 0, bitmap_size);
265 memset(m_area->dirty_bitmap, 0, bitmap_size);
267 m_area->last_access = m_state->timestamp;
276 memcpy(m_area->use_bitmap, ptr, bitmap_size);
280 bzero(m_area->dirty_bitmap, bitmap_size);
281 m_area->dirty_chunks = 0;
282 m_area->state_changed = 0;
284 chunk_size = UNTAGGED_CHUNK_SIZE(m_area);
287 if (CHECK_LOG_MODE_BIT(m_area)) {
289 memcpy(m_area->area, ptr, m_area->num_chunks * chunk_size);
290 ptr += m_area->num_chunks * chunk_size;
297 #define copy_to_area(x) ({\ 298 memcpy((void*)((char*)m_area->area + ((x) * chunk_size)), ptr, chunk_size);\ 309 if (original_num_areas > m_state->num_areas) {
311 for (i = m_state->num_areas; i < original_num_areas; i++) {
313 m_area = &m_state->areas[i];
314 m_area->alloc_chunks = 0;
315 m_area->dirty_chunks = 0;
316 m_area->state_changed = 0;
317 m_area->next_chunk = 0;
318 m_area->last_access = m_state->timestamp;
319 m_state->areas[m_area->prev].next = m_area->idx;
321 RESET_LOG_MODE_BIT(m_area);
322 RESET_AREA_LOCK_BIT(m_area);
324 if (
likely(m_area->use_bitmap != NULL)) {
327 memset(m_area->use_bitmap, 0, bitmap_size);
328 memset(m_area->dirty_bitmap, 0, bitmap_size);
331 m_state->num_areas = original_num_areas;
334 m_state->timestamp = -1;
338 statistics_post_data(lp, STAT_RECOVERY_TIME, (
double)timer_value_micro(recovery_timer));
357 statistics_post_data(lp, STAT_RECOVERY, 1.0);
373 if (
likely(ckpt != NULL)) {
#define likely(exp)
Optimize the branch as likely taken.
Structure for LP's state.
void log_delete(void *ckpt)
Core ROOT-Sim functionalities.
#define bitmap_required_size(requested_bits)
Computes the required size of a bitmap with requested_bits entries.
Dynamic Memory Logger and Restorer (DyMeLoR)
unsigned int to_int
The LID numerical value.
The ROOT-Sim scheduler main module header.
struct memory_map * mm
Memory map of the LP.
void log_restore(struct lp_struct *lp, state_t *state_queue_node)
bool is_incremental
Tells if it is an incremental log or a full one (when used for logging)
void * log_state(struct lp_struct *lp)
void * log
A pointer to the actual log.
Definition of the memory map.
LID_t lid
Local ID of the LP.
size_t get_log_size(malloc_state *logged_state)
#define bitmap_foreach_set(bitmap, bitmap_size, func)
This executes a user supplied function for each set bit in bitmap.
void * log_full(struct lp_struct *lp)
void restore_full(struct lp_struct *lp, void *ckpt)
#define unlikely(exp)
Optimize the branch as likely not taken.
This structure let DyMeLoR handle one malloc area (for serving given-size memory requests) ...