Commit d9c396a0 authored by iker_martin's avatar iker_martin
Browse files

Added names for setting data in MaM. Valgrind related bugfixes. Bugfix when...

Added names for setting data in MaM. Valgrind related bugfixes. Bugfix when sending size_t variables.
parent bd962e07
......@@ -65,6 +65,7 @@ int main(int argc, char *argv[]) {
if(im_child) {
update_targets();
} else {
init_application();
init_originals();
......@@ -176,6 +177,7 @@ int work() {
MAM_Checkpoint(&state, wait_completed, user_redistribution, NULL);
}
//if(state == MAM_COMPLETED) {}
if(config_file->n_groups == group->grp + 1) { res=1; }
return res;
}
......@@ -471,6 +473,9 @@ void free_application_data() {
free(group->async_array);
group->async_array = NULL;
}
/* MPI_Barrier(MPI_COMM_WORLD); fflush(stdout);
printf("TEST %d\n", group->myId);
MPI_Barrier(MPI_COMM_WORLD); fflush(stdout);*/
MAM_Finalize();
free_zombie_process();
}
......@@ -524,18 +529,18 @@ void init_originals() {
size_t i;
if(config_file->n_groups > 1) {
malleability_add_data(&(group->grp), 1, MPI_INT, 1, 1);
malleability_add_data(&run_id, 1, MPI_INT, 1, 1);
malleability_add_data(&(group->iter_start), 1, MPI_INT, 1, 0);
malleability_add_data(&(group->grp), 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
malleability_add_data(&run_id, 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
malleability_add_data(&(group->iter_start), 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_VARIABLE);
if(config_file->sdr) {
for(i=0; i<group->sync_data_groups; i++) {
malleability_add_data(group->sync_array[i], group->sync_qty[i], MPI_CHAR, 0, 0);
malleability_add_data(group->sync_array[i], group->sync_qty[i], MPI_CHAR, MAM_DATA_DISTRIBUTED, MAM_DATA_VARIABLE);
}
}
if(config_file->adr) {
for(i=0; i<group->async_data_groups; i++) {
malleability_add_data(group->async_array[i], group->async_qty[i], MPI_CHAR, 0, 1);
malleability_add_data(group->async_array[i], group->async_qty[i], MPI_CHAR, MAM_DATA_DISTRIBUTED, MAM_DATA_CONSTANT);
}
}
}
......@@ -545,7 +550,7 @@ void init_targets() {
size_t i, entries;
void *value = NULL;
malleability_get_data(&value, 0, 1, 1);
malleability_get_data(&value, 0, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
group->grp = *((int *)value);
group->grp = group->grp + 1;
......@@ -554,7 +559,7 @@ void init_targets() {
init_results_data(results, config_file->n_resizes, config_file->n_stages, config_file->groups[group->grp].iters);
results_comm(results, ROOT, config_file->n_resizes, new_comm);
malleability_get_data(&value, 1, 1, 1);
malleability_get_data(&value, 1, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
run_id = *((int *)value);
if(config_file->adr) {
......@@ -562,7 +567,7 @@ void init_targets() {
group->async_qty = (int *) malloc(entries * sizeof(int));
group->async_array = (char **) malloc(entries * sizeof(char *));
for(i=0; i<entries; i++) {
malleability_get_data(&value, i, 0, 1);
malleability_get_data(&value, i, MAM_DATA_DISTRIBUTED, MAM_DATA_CONSTANT);
group->async_array[i] = (char *)value;
group->async_qty[i] = DR_MAX_SIZE;
}
......@@ -575,7 +580,7 @@ void update_targets() { //FIXME Should not be needed after redist -- Declarar an
size_t i, entries;
void *value = NULL;
malleability_get_data(&value, 0, 1, 0);
malleability_get_data(&value, 0, MAM_DATA_REPLICATED, MAM_DATA_VARIABLE);
group->iter_start = *((int *)value);
if(config_file->sdr) {
......@@ -583,7 +588,7 @@ void update_targets() { //FIXME Should not be needed after redist -- Declarar an
group->sync_qty = (int *) malloc(entries * sizeof(int));
group->sync_array = (char **) malloc(entries * sizeof(char *));
for(i=0; i<entries; i++) {
malleability_get_data(&value, i, 0, 0);
malleability_get_data(&value, i, MAM_DATA_DISTRIBUTED, MAM_DATA_VARIABLE);
group->sync_array[i] = (char *)value;
group->sync_qty[i] = DR_MAX_SIZE;
}
......
......@@ -319,6 +319,7 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
config_file->n_resizes = config_file->n_groups-1;
malloc_config_stages(config_file); // Inicializar a NULL vectores stage
malloc_config_resizes(config_file); // Inicializar valores de grupos
MPI_Bcast(config_file->stages, config_file->n_stages, config_file->iter_stage_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
......@@ -332,7 +333,6 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
*config_file_out = config_file;
}
/*
* Tipo derivado para enviar 7 elementos especificos
* de la estructura de configuracion con una sola comunicacion.
......@@ -341,10 +341,11 @@ void def_struct_config_file(configuration *config_file) {
int i, counts = 7;
int blocklengths[7] = {1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype types[counts];
MPI_Datatype types[counts], type_size_t;
MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(size_t), &type_size_t);
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = MPI_UNSIGNED_LONG;
types[0] = types[1] = types[2] = types[3] = type_size_t;
types[4] = types[5] = types[6] = MPI_INT;
// Rellenar vector displs
......@@ -373,12 +374,13 @@ void def_struct_groups(configuration *config_file) {
int i, counts = 8;
int blocklengths[8] = {1, 1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype aux, types[counts];
MPI_Datatype types[counts], type_size_t, aux;
group_config_t *groups = config_file->groups;
MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(size_t), &type_size_t);
// Rellenar vector types
types[0] = types[1] = types[2] = types[4] = types[5] = MPI_INT;
types[3] = types[6] = MPI_UNSIGNED_LONG;
types[3] = types[6] = type_size_t;
types[7] = MPI_FLOAT;
// Rellenar vector displs
......@@ -424,7 +426,7 @@ void def_struct_groups_strategies(configuration *config_file) {
MPI_Get_address(config_file->groups, &dir);
for(i = 0; i < counts; i+=2) {
group = &config_file->groups[i/2];
group = &(config_file->groups[i/2]);
MPI_Get_address(group->ss, &displs[i]);
MPI_Get_address(group->rs, &displs[i+1]);
......
......@@ -304,8 +304,8 @@ double init_comm_ptop_pt(group_data group, configuration *config_file, iter_stag
free(stage->full_array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char));
stage->full_array = malloc(stage->real_bytes * sizeof(char));
stage->array = calloc(stage->real_bytes * sizeof(char));
stage->full_array = calloc(stage->real_bytes * sizeof(char));
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......@@ -326,8 +326,8 @@ double init_comm_iptop_pt(group_data group, configuration *config_file, iter_sta
free(stage->reqs);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char));
stage->full_array = malloc(stage->real_bytes * sizeof(char));
stage->array = calloc(stage->real_bytes * sizeof(char));
stage->full_array = calloc(stage->real_bytes * sizeof(char));
if(compute && !stage->bytes) { // t_capped is not considered in this case
stage->req_count = 2 * stage->operations; //FIXME Magical number
......@@ -354,7 +354,7 @@ double init_comm_bcast_pt(group_data group, configuration *config_file, iter_sta
free(stage->array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char)); //FIXME Valgrind indica unitialised
stage->array = calloc(stage->real_bytes * sizeof(char)); //FIXME Valgrind indica unitialised
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......@@ -383,8 +383,8 @@ double init_comm_allgatherv_pt(group_data group, configuration *config_file, ite
get_block_dist(stage->real_bytes, group.myId, group.numP, &dist_data);
stage->my_bytes = dist_data.tamBl;
stage->array = malloc(stage->my_bytes * sizeof(char));
stage->full_array = malloc(stage->real_bytes * sizeof(char));
stage->array = calloc(stage->my_bytes, sizeof(char));
stage->full_array = calloc(stage->real_bytes, sizeof(char));
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......@@ -404,9 +404,9 @@ double init_comm_reduce_pt(group_data group, configuration *config_file, iter_st
free(stage->full_array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char));
stage->array = calloc(stage->real_bytes, sizeof(char));
//Full array para el reduce necesita el mismo tamanyo
stage->full_array = malloc(stage->real_bytes * sizeof(char));
stage->full_array = calloc(stage->real_bytes, sizeof(char));
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......
......@@ -122,9 +122,9 @@ void sync_point2point(void *send, void *recv, MPI_Datatype datatype, struct Coun
init = s_counts.idI;
end = s_counts.idE;
if(mall_conf->spawn_method == MALL_SPAWN_MERGE && (s_counts.idI == mall->myId || s_counts.idE == mall->myId + 1)) {
offset = s_counts.displs[mall->myId] + datasize;
offset2 = r_counts.displs[mall->myId] + datasize;
memcpy(send+offset, recv+offset2, s_counts.counts[mall->myId]);
offset = s_counts.displs[mall->myId] * datasize;
offset2 = r_counts.displs[mall->myId] * datasize;
memcpy(recv+offset2, send+offset, s_counts.counts[mall->myId]);
if(s_counts.idI == mall->myId) init = s_counts.idI+1;
else end = s_counts.idE-1;
......@@ -156,6 +156,7 @@ void sync_point2point(void *send, void *recv, MPI_Datatype datatype, struct Coun
if(total_sends > 0) {
MPI_Waitall(total_sends, sends, MPI_STATUSES_IGNORE);
free(sends);
}
}
......
......@@ -193,7 +193,8 @@ void MAM_Check_configuration() {
if(MAM_I_contains_strat(mall_conf->spawn_strategies, MAM_STRAT_SPAWN_INTERCOMM)) {
MAM_I_remove_strat(&mall_conf->spawn_strategies, MAM_MASK_SPAWN_INTERCOMM);
}
if(MAM_I_contains_strat(mall_conf->red_strategies, MAM_STRAT_RED_WAIT_SOURCES)) {
if(!MAM_I_contains_strat(mall_conf->red_strategies, MAM_STRAT_RED_WAIT_TARGETS) &&
!MAM_I_contains_strat(mall_conf->red_strategies, MAM_STRAT_RED_PTHREAD)) {
MAM_I_set_red_strat(MAM_STRAT_RED_WAIT_TARGETS, &mall_conf->red_strategies);
}
}
......
......@@ -12,17 +12,22 @@ void MAM_Def_main_datatype() {
MPI_Aint displs[counts];
MPI_Datatype types[counts];
for(i=0; i<counts; i++) {
for(i=0; i<5; i++) {
blocklengths[i] = 1;
types[i] = MPI_UNSIGNED;
}
for(i=5; i<counts; i++) {
blocklengths[i] = 1;
types[i] = MPI_INT;
}
// Obtener direccion base
// Obtain base direction
MPI_Get_address(&(mall_conf->spawn_method), &displs[0]);
MPI_Get_address(&(mall_conf->spawn_strategies), &displs[1]);
MPI_Get_address(&(mall_conf->spawn_dist), &displs[2]);
MPI_Get_address(&(mall_conf->red_method), &displs[3]);
MPI_Get_address(&(mall_conf->red_strategies), &displs[4]);
MPI_Get_address(&(mall->root_parents), &displs[5]);
MPI_Get_address(&(mall->num_parents), &displs[6]); //TODO Add only when Intercomm strat active?
MPI_Get_address(&(mall->num_cpus), &displs[7]);
......@@ -48,7 +53,7 @@ void MAM_Comm_main_structures(int rootBcast) {
MPI_Bcast(MPI_BOTTOM, 1, mall->struct_type, rootBcast, mall->intercomm);
if(mall->nodelist == NULL) {
mall->nodelist = malloc((mall->nodelist_len+1) * sizeof(char));
mall->nodelist = calloc(mall->nodelist_len+1, sizeof(char));
mall->nodelist[mall->nodelist_len] = '\0';
}
MPI_Bcast(mall->nodelist, mall->nodelist_len, MPI_CHAR, rootBcast, mall->intercomm);
......
......@@ -102,6 +102,7 @@ int MAM_Init(int root, MPI_Comm *comm, char *name_exec, void (*user_function)(vo
mall->name_exec = name_exec;
mall->nodelist = NULL;
mall->nodelist_len = 0;
rep_s_data->entries = 0;
rep_a_data->entries = 0;
......
......@@ -23,6 +23,11 @@ void MAM_Resume_redistribution(int *mam_state);
int MAM_Get_Reconf_Info(mam_user_reconf_t *reconf_info);
//void MAM_Data_add(void *data, size_t *index, size_t total_qty, MPI_Datatype type, int is_replicated, int is_constant);
//void MAM_Data_modify(void *data, size_t index, size_t total_qty, MPI_Datatype type, int is_replicated, int is_constant);
//void MAM_Data_get_entries(int is_replicated, int is_constant, size_t *entries);
//void MAM_Data_get_pointer(size_t index, int is_replicated, int is_constant, void **data, size_t *total_qty, size_t *local_qty);
void malleability_add_data(void *data, size_t total_qty, MPI_Datatype type, int is_replicated, int is_constant);
void malleability_modify_data(void *data, size_t index, size_t total_qty, MPI_Datatype type, int is_replicated, int is_constant);
void malleability_get_entries(size_t *entries, int is_replicated, int is_constant);
......
......@@ -43,4 +43,9 @@ enum mam_key_values{MAM_SPAWN_METHOD=0, MAM_SPAWN_STRATEGIES, MAM_PHYSICAL_DISTR
#define MALLEABILITY_CHILDREN 1
#define MALLEABILITY_NOT_CHILDREN 0
#define MAM_DATA_DISTRIBUTED 0
#define MAM_DATA_REPLICATED 1
#define MAM_DATA_VARIABLE 0
#define MAM_DATA_CONSTANT 1
#endif
......@@ -247,10 +247,11 @@ void def_malleability_entries(malleability_data_t *data_struct_rep, malleability
int counts = 2;
int blocklengths[counts];
MPI_Aint displs[counts];
MPI_Datatype types[counts];
MPI_Datatype types[counts], type_size_t;
MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(size_t), &type_size_t);
blocklengths[0] = blocklengths[1] = 1;
types[0] = types[1] = MPI_UNSIGNED_LONG;
types[0] = types[1] = type_size_t;
// Obtener direccion base
MPI_Get_address(&(data_struct_rep->entries), &displs[0]);
......@@ -271,9 +272,10 @@ void def_malleability_qty_type(malleability_data_t *data_struct_rep, malleabilit
int counts = 6;
int blocklengths[counts];
MPI_Aint displs[counts];
MPI_Datatype types[counts];
MPI_Datatype types[counts], type_size_t;
MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(size_t), &type_size_t);
types[0] = types[1] = types[3] = types[4] = MPI_UNSIGNED_LONG;
types[0] = types[1] = types[3] = types[4] = type_size_t;
types[2] = types[5] = MPI_INT;
blocklengths[0] = blocklengths[1] = blocklengths[2] = data_struct_rep->entries;
blocklengths[3] = blocklengths[4] = blocklengths[5] = data_struct_dist->entries;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment