Commit 2f81e29c authored by Iker Martín Álvarez's avatar Iker Martín Álvarez
Browse files

Merge branch 'results_feature' into 'dev'

Refactor over results and configuration codes

See merge request martini/malleability_benchmark!2
parents cff5fc10 83293e65
......@@ -20,12 +20,12 @@ static int handler(void* user, const char* section, const char* name,
int ret_value=1;
configuration* pconfig = (configuration*)user;
if(pconfig->actual_resize >= pconfig->n_resizes && pconfig->actual_stage >= pconfig->n_stages) {
if(pconfig->actual_group >= pconfig->n_groups && pconfig->actual_stage >= pconfig->n_stages) {
return 1; // There is no more work to perform
}
char *resize_name = malloc(10 * sizeof(char));
snprintf(resize_name, 10, "resize%zu", pconfig->actual_resize);
snprintf(resize_name, 10, "resize%zu", pconfig->actual_group);
char *stage_name = malloc(10 * sizeof(char));
snprintf(stage_name, 10, "stage%zu", pconfig->actual_stage);
......@@ -33,7 +33,8 @@ static int handler(void* user, const char* section, const char* name,
#define MATCH(s, n) strcmp(section, s) == 0 && strcmp(name, n) == 0
#define LAST(iter, total) iter < total
if (MATCH("general", "Total_Resizes")) {
pconfig->n_resizes = strtoul(value, NULL, 10) + 1;
pconfig->n_resizes = strtoul(value, NULL, 10);
pconfig->n_groups = pconfig->n_resizes+1;
user_functions->resizes_f(pconfig);
} else if (MATCH("general", "Total_Stages")) {
pconfig->n_stages = strtoul(value, NULL, 10);
......@@ -59,26 +60,25 @@ static int handler(void* user, const char* section, const char* name,
pconfig->actual_stage = pconfig->actual_stage+1; // Ultimo elemento del grupo
// Resize stage
} else if (MATCH(resize_name, "Iters") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
//if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->groups[pconfig->actual_resize].iters = atoi(value);
} else if (MATCH(resize_name, "Procs") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
pconfig->groups[pconfig->actual_resize].procs = atoi(value);
} else if (MATCH(resize_name, "FactorS") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
pconfig->groups[pconfig->actual_resize].factor =(float) atof(value);
} else if (MATCH(resize_name, "Dist") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
} else if (MATCH(resize_name, "Iters") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].iters = atoi(value);
} else if (MATCH(resize_name, "Procs") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].procs = atoi(value);
} else if (MATCH(resize_name, "FactorS") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].factor =(float) atof(value);
} else if (MATCH(resize_name, "Dist") && LAST(pconfig->actual_group, pconfig->n_groups)) {
int aux_value = MALL_DIST_COMPACT;
if (strcmp(value, "spread") == 0) {
aux_value = MALL_DIST_SPREAD;
}
pconfig->groups[pconfig->actual_resize].phy_dist = aux_value;
} else if (MATCH(resize_name, "Asynch_Redistribution_Type") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
pconfig->groups[pconfig->actual_resize].at = atoi(value);
} else if (MATCH(resize_name, "Spawn_Method") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
pconfig->groups[pconfig->actual_resize].sm = atoi(value);
} else if (MATCH(resize_name, "Spawn_Strategy") && LAST(pconfig->actual_resize, pconfig->n_resizes)) {
pconfig->groups[pconfig->actual_resize].ss = atoi(value);
pconfig->actual_resize = pconfig->actual_resize+1; // Ultimo elemento del grupo
pconfig->groups[pconfig->actual_group].phy_dist = aux_value;
} else if (MATCH(resize_name, "Asynch_Redistribution_Type") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].at = atoi(value);
} else if (MATCH(resize_name, "Spawn_Method") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].sm = atoi(value);
} else if (MATCH(resize_name, "Spawn_Strategy") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].ss = atoi(value);
pconfig->actual_group = pconfig->actual_group+1; // Ultimo elemento de la estructura
// Unkown case
} else {
......@@ -105,9 +105,10 @@ configuration *read_ini_file(char *file_name, ext_functions_t init_functions) {
printf("Error when reserving configuration structure\n");
return NULL;
}
config->n_resizes = 1;
config->n_resizes = 0;
config->n_groups = 1;
config->n_stages = 1;
config->actual_resize=0;
config->actual_group=0;
config->actual_stage=0;
user_functions = &init_functions;
......
......@@ -48,18 +48,17 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = MPI_DOUBLE;
//blocklengths[3] = blocklengths[4] = resizes;
blocklengths[3] = blocklengths[4] = 1;
blocklengths[2] = blocklengths[3] = blocklengths[4] = blocklengths[5] = resizes;
// Rellenar vector displs
MPI_Get_address(results, &dir);
MPI_Get_address(&(results->sync_start), &displs[0]);
MPI_Get_address(&(results->async_start), &displs[1]);
MPI_Get_address(&(results->exec_start), &displs[2]);
MPI_Get_address(&(results->wasted_time), &displs[3]);
MPI_Get_address(&(results->spawn_real_time[0]), &displs[4]);
MPI_Get_address(&(results->spawn_time[0]), &displs[5]); //TODO Revisar si se puede simplificar //FIXME Si hay mas de un spawn error?
MPI_Get_address(&(results->exec_start), &displs[0]);
MPI_Get_address(&(results->wasted_time), &displs[1]);
MPI_Get_address(results->sync_time, &displs[2]);
MPI_Get_address(results->async_time, &displs[3]);
MPI_Get_address(results->spawn_real_time, &displs[4]);
MPI_Get_address(results->spawn_time, &displs[5]);
for(i=0;i<counts;i++) displs[i] -= dir;
......@@ -79,14 +78,14 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
*/
void set_results_post_reconfig(results_data *results, int grp, int sdr, int adr) {
if(sdr) { // Si no hay datos sincronos, el tiempo es 0
results->sync_time[grp] = results->sync_end - results->sync_start;
results->sync_time[grp-1] = results->sync_end - results->sync_time[grp-1];
} else {
results->sync_time[grp] = 0;
results->sync_time[grp-1] = 0;
}
if(adr) { // Si no hay datos asincronos, el tiempo es 0
results->async_time[grp] = results->async_end - results->async_start;
results->async_time[grp-1] = results->async_end - results->async_time[grp-1];
} else {
results->async_time[grp] = 0;
results->async_time[grp-1] = 0;
}
}
......@@ -222,22 +221,22 @@ void print_global_results(results_data results, size_t resizes) {
size_t i;
printf("T_spawn: ");
for(i=0; i < resizes - 1; i++) {
for(i=0; i < resizes; i++) {
printf("%lf ", results.spawn_time[i]);
}
printf("\nT_spawn_real: ");
for(i=0; i< resizes - 1; i++) {
for(i=0; i< resizes; i++) {
printf("%lf ", results.spawn_real_time[i]);
}
printf("\nT_SR: ");
for(i=0; i < resizes - 1; i++) {
for(i=0; i < resizes; i++) {
printf("%lf ", results.sync_time[i]);
}
printf("\nT_AR: ");
for(i=0; i < resizes - 1; i++) {
for(i=0; i < resizes; i++) {
printf("%lf ", results.async_time[i]);
}
......@@ -303,15 +302,36 @@ void realloc_results_iters(results_data *results, size_t stages, size_t needed)
void free_results_data(results_data *results, size_t stages) {
size_t i;
if(results != NULL) {
free(results->spawn_time);
free(results->spawn_real_time);
free(results->sync_time);
free(results->async_time);
if(results->spawn_time != NULL) {
free(results->spawn_time);
results->spawn_time = NULL;
}
if(results->spawn_real_time != NULL) {
free(results->spawn_real_time);
results->spawn_real_time = NULL;
}
if(results->sync_time != NULL) {
free(results->sync_time);
results->sync_time = NULL;
}
if(results->async_time != NULL) {
free(results->async_time);
results->async_time = NULL;
}
free(results->iters_time);
if(results->iters_time != NULL) {
free(results->iters_time);
results->iters_time = NULL;
}
for(i=0; i<stages; i++) {
free(results->stage_times[i]);
if(results->stage_times[i] != NULL) {
free(results->stage_times[i]);
results->stage_times[i] = NULL;
}
}
if(results->stage_times != NULL) {
free(results->stage_times);
results->stage_times = NULL;
}
free(results->stage_times);
}
}
......@@ -91,7 +91,6 @@ int main(int argc, char *argv[]) {
malleability_get_data(&value, 2, 1, 1);
group->iter_start = *((int *)value);
set_results_post_reconfig(results, group->grp, config_file->sdr, config_file->adr); //TODO Cambio al añadir nueva redistribucion
group->grp = group->grp + 1;
}
......@@ -107,10 +106,11 @@ int main(int argc, char *argv[]) {
group->grp = group->grp + 1;
set_benchmark_grp(group->grp);
if(group->grp != 0) {
obtain_op_times(0); //Obtener los nuevos valores de tiempo para el computo
obtain_op_times(1); //Obtener los nuevos valores de tiempo para el computo
set_results_post_reconfig(results, group->grp, config_file->sdr, config_file->adr);
}
if(config_file->n_resizes != group->grp + 1) { //TODO Llevar a otra funcion
if(config_file->n_groups != group->grp + 1) { //TODO Llevar a otra funcion
set_malleability_configuration(config_file->groups[group->grp+1].sm, config_file->groups[group->grp+1].ss,
config_file->groups[group->grp+1].phy_dist, config_file->groups[group->grp+1].at, -1);
set_children_number(config_file->groups[group->grp+1].procs); // TODO TO BE DEPRECATED
......@@ -129,7 +129,7 @@ int main(int argc, char *argv[]) {
}
print_local_results();
reset_results_index(results);
} while(config_file->n_resizes > group->grp + 1 && config_file->groups[group->grp+1].sm == MALL_SPAWN_MERGE);
} while(config_file->n_groups > group->grp + 1 && config_file->groups[group->grp+1].sm == MALL_SPAWN_MERGE);
//
// TERMINA LA EJECUCION ----------------------------------------------------------
......@@ -144,7 +144,7 @@ int main(int argc, char *argv[]) {
if(group->myId == ROOT && config_file->groups[group->grp].sm == MALL_SPAWN_MERGE) {
MPI_Abort(MPI_COMM_WORLD, -100);
}
free_application_data(); //FIXME Error al liberar memoria de SDR/ADR
free_application_data();
MPI_Finalize();
return 0;
......@@ -176,7 +176,7 @@ int work() {
iterate(state);
}
if(config_file->n_resizes != group->grp + 1)
if(config_file->n_groups != group->grp + 1)
state = malleability_checkpoint();
iter = 0;
......@@ -190,7 +190,7 @@ int work() {
}
if(config_file->n_resizes - 1 == group->grp) res=1;
if(config_file->n_groups == group->grp + 1) res=1;
if(state == MALL_ZOMBIE) res=state;
return res;
}
......@@ -351,7 +351,7 @@ int print_final_results() {
if(group->myId == ROOT) {
if(group->grp == config_file->n_resizes -1) {
if(config_file->n_groups == group->grp+1) {
file_name = NULL;
file_name = malloc(20 * sizeof(char));
if(file_name == NULL) return -1; // No ha sido posible alojar la memoria
......@@ -449,11 +449,11 @@ void free_application_data() {
free_malleability();
if(group->grp == 0) { //FIXME Revisar porque cuando es diferente a 0 no funciona
free_results_data(results, config_file->n_stages);
free(results);
}
free_results_data(results, config_file->n_stages);
free(results);
free_config(config_file);
free(group);
}
......
......@@ -54,11 +54,12 @@ typedef struct
typedef struct
{
size_t n_resizes, n_stages;
size_t actual_resize, actual_stage;
size_t n_groups, n_resizes, n_stages; // n_groups==n_resizes+1
size_t actual_group, actual_stage;
int rigid_times;
int granularity, sdr, adr;
MPI_Datatype config_type, group_type, iter_stage_type;
iter_stage_t *stages;
group_config_t *groups;
} configuration;
......
......@@ -10,9 +10,9 @@
void malloc_config_resizes(configuration *user_config);
void malloc_config_stages(configuration *user_config);
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type);
void def_struct_groups(group_config_t *groups, size_t n_resizes, MPI_Datatype *config_type);
void def_struct_iter_stage(iter_stage_t *stages, size_t n_stages, MPI_Datatype *config_type);
void def_struct_config_file(configuration *config_file);
void def_struct_groups(configuration *config_file);
void def_struct_iter_stage(configuration *config_file);
/*
* Inicializa una estructura de configuracion
......@@ -35,7 +35,8 @@ void init_config(char *file_name, configuration **user_config) {
configuration *config = NULL;
config = malloc(sizeof(configuration));
config->n_resizes=1;
config->n_resizes=0;
config->n_groups=1;
malloc_config_resizes(config);
config->n_stages=1;
malloc_config_stages(config);
......@@ -46,6 +47,7 @@ void init_config(char *file_name, configuration **user_config) {
}
*user_config=config;
}
def_struct_config_file(*user_config);
}
/*
......@@ -62,8 +64,8 @@ void init_config(char *file_name, configuration **user_config) {
void malloc_config_resizes(configuration *user_config) {
size_t i;
if(user_config != NULL) {
user_config->groups = malloc(sizeof(group_config_t) * user_config->n_resizes);
for(i=0; i<user_config->n_resizes; i++) {
user_config->groups = malloc(sizeof(group_config_t) * user_config->n_groups);
for(i=0; i<user_config->n_groups; i++) {
user_config->groups[i].iters = 0;
user_config->groups[i].procs = 1;
user_config->groups[i].sm = 0;
......@@ -72,6 +74,7 @@ void malloc_config_resizes(configuration *user_config) {
user_config->groups[i].at = 0;
user_config->groups[i].factor = 1;
}
def_struct_groups(user_config);
}
}
......@@ -93,9 +96,16 @@ void malloc_config_stages(configuration *user_config) {
user_config->stages[i].full_array = NULL;
user_config->stages[i].double_array = NULL;
user_config->stages[i].counts.counts = NULL;
user_config->stages[i].bytes = 0;
user_config->stages[i].my_bytes = 0;
user_config->stages[i].real_bytes = 0;
user_config->stages[i].operations = 0;
user_config->stages[i].pt = 0;
user_config->stages[i].t_op = 0;
user_config->stages[i].t_stage = 0;
user_config->stages[i].t_capped = 0;
}
def_struct_iter_stage(user_config);
}
}
......@@ -106,7 +116,6 @@ void malloc_config_stages(configuration *user_config) {
void free_config(configuration *user_config) {
size_t i;
if(user_config != NULL) {
for(i=0; i < user_config->n_stages; i++) {
if(user_config->stages[i].array != NULL) {
......@@ -124,8 +133,20 @@ void free_config(configuration *user_config) {
if(user_config->stages[i].counts.counts != NULL) {
freeCounts(&(user_config->stages[i].counts));
}
}
//Liberar tipos derivados
if(user_config->config_type != MPI_DATATYPE_NULL) {
MPI_Type_free(&(user_config->config_type));
user_config->config_type = MPI_DATATYPE_NULL;
}
if(user_config->group_type != MPI_DATATYPE_NULL) {
MPI_Type_free(&(user_config->group_type));
user_config->group_type = MPI_DATATYPE_NULL;
}
if(user_config->iter_stage_type != MPI_DATATYPE_NULL) {
MPI_Type_free(&(user_config->iter_stage_type));
user_config->iter_stage_type = MPI_DATATYPE_NULL;
}
free(user_config->groups);
free(user_config->stages);
......@@ -147,7 +168,7 @@ void print_config(configuration *user_config) {
printf("Stage %zu: PT=%d, T_stage=%lf, bytes=%d, T_capped=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].t_capped);
}
for(i=0; i<user_config->n_resizes; i++) {
for(i=0; i<user_config->n_groups; i++) {
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d\n",
i, user_config->groups[i].iters, user_config->groups[i].procs, user_config->groups[i].factor,
user_config->groups[i].phy_dist, user_config->groups[i].at, user_config->groups[i].sm,
......@@ -169,7 +190,7 @@ void print_config_group(configuration *user_config, size_t grp) {
if(grp > 0) {
parents = user_config->groups[grp-1].procs;
}
if(grp < user_config->n_resizes - 1) {
if(grp < user_config->n_groups - 1) {
sons = user_config->groups[grp+1].procs;
}
......@@ -202,25 +223,9 @@ void print_config_group(configuration *user_config, size_t grp) {
* configuracion al otro grupo.
*/
void send_config_file(configuration *config_file, int root, MPI_Comm intercomm) {
MPI_Datatype config_type, group_type, iter_stage_type;
// Obtener un tipo derivado para enviar todos los
// datos escalares con una sola comunicacion
def_struct_config_file(config_file, &config_type);
// Obtener un tipo derivado para enviar las estructuras de fases de iteracion
// con una sola comunicacion
def_struct_groups(&(config_file->groups[0]), config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file, 1, config_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_resizes, group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
//Liberar tipos derivados
MPI_Type_free(&config_type);
MPI_Type_free(&group_type);
MPI_Type_free(&iter_stage_type);
MPI_Bcast(config_file, 1, config_file->config_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, config_file->iter_stage_type, root, intercomm);
}
......@@ -237,41 +242,28 @@ void send_config_file(configuration *config_file, int root, MPI_Comm intercomm)
* la funcion "free_config".
*/
void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_out) {
MPI_Datatype config_type, group_type, iter_stage_type;
configuration *config_file = malloc(sizeof(configuration) * 1);
configuration *config_file = malloc(sizeof(configuration));
def_struct_config_file(config_file);
// Obtener un tipo derivado para recibir todos los
// datos escalares con una sola comunicacion
def_struct_config_file(config_file, &config_type);
MPI_Bcast(config_file, 1, config_type, root, intercomm);
MPI_Bcast(config_file, 1, config_file->config_type, root, intercomm);
//Inicializado de estructuras internas
config_file->groups = malloc(sizeof(group_config_t) * config_file->n_resizes);
config_file->stages = malloc(sizeof(iter_stage_t) * config_file->n_stages);
config_file->n_resizes = config_file->n_groups-1;
malloc_config_resizes(config_file); // Inicializar valores de grupos
malloc_config_stages(config_file); // Inicializar a NULL vectores stage
// Obtener un tipo derivado para enviar los tres vectores
// de enteros con una sola comunicacion
def_struct_groups(&(config_file->groups[0]), config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file->groups, config_file->n_resizes, group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
//Liberar tipos derivados
MPI_Type_free(&config_type);
MPI_Type_free(&group_type);
MPI_Type_free(&iter_stage_type);
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, config_file->iter_stage_type, root, intercomm);
*config_file_out = config_file;
}
/*
* Tipo derivado para enviar 11 elementos especificos
* Tipo derivado para enviar 6 elementos especificos
* de la estructura de configuracion con una sola comunicacion.
*/
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type) {
void def_struct_config_file(configuration *config_file) {
int i, counts = 6;
int blocklengths[6] = {1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
......@@ -284,7 +276,7 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
// Rellenar vector displs
MPI_Get_address(config_file, &dir);
MPI_Get_address(&(config_file->n_resizes), &displs[0]);
MPI_Get_address(&(config_file->n_groups), &displs[0]);
MPI_Get_address(&(config_file->n_stages), &displs[1]);
MPI_Get_address(&(config_file->granularity), &displs[2]);
MPI_Get_address(&(config_file->sdr), &displs[3]);
......@@ -293,8 +285,8 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
for(i=0;i<counts;i++) displs[i] -= dir;
MPI_Type_create_struct(counts, blocklengths, displs, types, config_type);
MPI_Type_commit(config_type);
MPI_Type_create_struct(counts, blocklengths, displs, types, &(config_file->config_type));
MPI_Type_commit(&(config_file->config_type));
}
/*
......@@ -302,15 +294,16 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
* de la estructuras de la configuracion de cada grupo
* en una sola comunicacion.
*/
void def_struct_groups(group_config_t *groups, size_t n_resizes, MPI_Datatype *config_type) {
void def_struct_groups(configuration *config_file) {
int i, counts = 7;
int blocklengths[7] = {1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype aux, types[counts];
group_config_t *groups = config_file->groups;
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = MPI_INT;
types[6] = MPI_DOUBLE;
types[6] = MPI_FLOAT;
// Rellenar vector displs
MPI_Get_address(groups, &dir);
......@@ -325,47 +318,52 @@ void def_struct_groups(group_config_t *groups, size_t n_resizes, MPI_Datatype *c
for(i=0;i<counts;i++) displs[i] -= dir;
if (n_resizes == 1) {
MPI_Type_create_struct(counts, blocklengths, displs, types, config_type);
if (config_file->n_groups == 1) {
MPI_Type_create_struct(counts, blocklengths, displs, types, &(config_file->group_type));
MPI_Type_commit(&(config_file->group_type));
} else { // Si hay mas de una fase(estructura), el "extent" se modifica.
MPI_Type_create_struct(counts, blocklengths, displs, types, &aux);
// Tipo derivado para enviar N elementos de la estructura
MPI_Type_create_resized(aux, 0, sizeof(group_config_t), config_type);
MPI_Type_create_resized(aux, 0, sizeof(group_config_t), &(config_file->group_type));
MPI_Type_commit(&(config_file->group_type));
// MPI_Type_free(&aux); //FIXME It should be freed
}
MPI_Type_commit(config_type);
}
/*
* Tipo derivado para enviar elementos especificos
* de la estructuras de fases de iteracion en una sola comunicacion.
*/
void def_struct_iter_stage(iter_stage_t *stages, size_t n_stages, MPI_Datatype *config_type) {
void def_struct_iter_stage(configuration *config_file) {
int i, counts = 5;
int blocklengths[5] = {1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype aux, types[counts];
iter_stage_t *stages = config_file->stages;
// Rellenar vector types
types[0] = types[3] = types[4] = MPI_INT;
types[1] = types[2] = MPI_DOUBLE;
types[0] = types[1] = types[2] = MPI_INT;
types[3] = types[4] = MPI_DOUBLE;
// Rellenar vector displs
MPI_Get_address(stages, &dir);
MPI_Get_address(&(stages->pt), &displs[0]);
MPI_Get_address(&(stages->t_stage), &displs[1]);
MPI_Get_address(&(stages->t_op), &displs[2]);
MPI_Get_address(&(stages->bytes), &displs[3]);
MPI_Get_address(&(stages->t_capped), &displs[4]);
MPI_Get_address(&(stages->bytes), &displs[1]);
MPI_Get_address(&(stages->t_capped), &displs[2]);
MPI_Get_address(&(stages->t_stage), &displs[3]);
MPI_Get_address(&(stages->t_op), &displs[4]);
for(i=0;i<counts;i++) displs[i] -= dir;
if (n_stages == 1) {
MPI_Type_create_struct(counts, blocklengths, displs, types, config_type);
if (config_file->n_stages == 1) {
MPI_Type_create_struct(counts, blocklengths, displs, types, &(config_file->iter_stage_type));
MPI_Type_commit(&(config_file->iter_stage_type));
} else { // Si hay mas de una fase(estructura), el "extent" se modifica.
MPI_Type_create_struct(counts, blocklengths, displs, types, &aux);
// Tipo derivado para enviar N elementos de la estructura
MPI_Type_create_resized(aux, 0, sizeof(iter_stage_t), config_type);
MPI_Type_create_resized(aux, 0, sizeof(iter_stage_t), &(config_file->iter_stage_type));
MPI_Type_commit(&(config_file->iter_stage_type));
// MPI_Type_free(&aux); //FIXME It should be freed
}
MPI_Type_commit(config_type);
}
......@@ -239,7 +239,7 @@ double init_comm_bcast_pt(group_data group, configuration *config_file, iter_sta
free(stage->array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char));
stage->array = malloc(stage->real_bytes * sizeof(char)); //FIXME Valgrind indica unitialised
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......
......@@ -110,6 +110,8 @@ int init_malleability(int myId, int numP, int root, MPI_Comm comm, char *name_ex
state = MALL_NOT_STARTED;
zombies_service_init();
// Si son el primer grupo de procesos, obtienen los datos de los padres
MPI_Comm_get_parent(&(mall->intercomm));
if(mall->intercomm != MPI_COMM_NULL ) {
......@@ -125,7 +127,6 @@ int init_malleability(int myId, int numP, int root, MPI_Comm comm, char *name_ex
//TODO Get name of each process and create real nodelist
}
zombies_service_init();
return MALLEABILITY_NOT_CHILDREN;
}
......@@ -134,7 +135,7 @@ int init_malleability(int myId, int numP, int root, MPI_Comm comm, char *name_ex
* de maleabilidad y asegura que los zombies
* despierten si los hubiese.
*/
void free_malleability() {
void free_malleability() {
free_malleability_data_struct(rep_s_data);
free_malleability_data_struct(rep_a_data);
free_malleability_data_struct(dist_s_data);
......@@ -494,8 +495,8 @@ void Children_init() {
mall_conf->results = (results_data *) malloc(sizeof(results_data));
init_results_data(mall_conf->results, mall_conf->config_file->n_resizes, mall_conf->config_file->n_stages, RESULTS_INIT_DATA_QTY);
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_CHILDREN, mall->myId, root_parents, mall->intercomm);
if(dist_a_data->entries || rep_a_data->entries) { // Recibir datos asincronos
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_CHILDREN, mall->myId, root_parents, mall->intercomm);
if(mall_conf->comm_type == MAL_USE_NORMAL || mall_conf->comm_type == MAL_USE_IBARRIER || mall_conf->comm_type == MAL_USE_POINT) {
recv_data(numP_parents, dist_a_data, 1);
......@@ -597,9 +598,9 @@ int start_redistribution() {
send_config_file(mall_conf->config_file, rootBcast, mall->intercomm);
comm_node_data(rootBcast, MALLEABILITY_NOT_CHILDREN);
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
if(dist_a_data->entries || rep_a_data->entries) { // Enviar datos asincronos
mall_conf->results->async_start = MPI_Wtime();
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
mall_conf->results->async_time[mall_conf->grp] = MPI_Wtime();
if(mall_conf->comm_type == MAL_USE_THREAD) {
return thread_creation();
} else {
......@@ -688,6 +689,7 @@ int end_redistribution() {
comm_data_info(rep_s_data, dist_s_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
if(dist_s_data->entries || rep_s_data->entries) { // Enviar datos sincronos
mall_conf->results->sync_time[mall_conf->grp] = MPI_Wtime();
send_data(mall->numC, dist_s_data, MALLEABILITY_USE_SYNCHRONOUS);
// TODO Crear funcion especifica y anyadir para Asinc
......
......@@ -95,9 +95,9 @@ void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *d
def_malleability_entries(data_struct_dist, data_struct_rep, &entries_type);
MPI_Bcast(MPI_BOTTOM, 1, entries_type, rootBcast, intercomm);
if(is_children_group) {
if(data_struct_rep->entries != 0) init_malleability_data_struct(data_struct_rep, data_struct_rep->entries);
if(data_struct_dist->entries != 0) init_malleability_data_struct(data_struct_dist, data_struct_dist->entries);
if(is_children_group && ( data_struct_rep->entries != 0 || data_struct_dist->entries != 0 )) {
init_malleability_data_struct(data_struct_rep, data_struct_rep->entries);
init_malleability_data_struct(data_struct_dist, data_struct_dist->entries);
}
def_malleability_qty_type(data_struct_dist, data_struct_rep, &struct_type);
......@@ -182,10 +182,18 @@ void free_malleability_data_struct(malleability_data_t *data_struct) {
//free(data_struct->requests[i]); //TODO Plantear como crearlos
}
free(data_struct->qty);
free(data_struct->types);
free(data_struct->requests);
free(data_struct->arrays);
if(data_struct->qty != NULL) {
free(data_struct->qty);
}
if(data_struct->types != NULL) {
free(data_struct->types);
}
if(data_struct->requests != NULL) {
free(data_struct->requests);
}
if(data_struct->arrays != NULL) {
free(data_struct->arrays);
}
}
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment