Commit 4b37d0dd authored by iker_martin's avatar iker_martin
Browse files

Refactor de codigo para simplificar los nombres de las variables

parent 8542201a
......@@ -12,7 +12,7 @@ void malloc_config_resizes(configuration *user_config, int resizes);
void init_config_stages(configuration *user_config, int stages);
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type);
void def_struct_config_file_array(configuration *config_file, MPI_Datatype *config_type);
void def_struct_iter_stage(iter_stage_t *iter_stage, int stages, MPI_Datatype *config_type);
void def_struct_iter_stage(iter_stage_t *stages, int n_stages, MPI_Datatype *config_type);
/*
* Funcion utilizada para leer el fichero de configuracion
......@@ -26,64 +26,62 @@ static int handler(void* user, const char* section, const char* name,
configuration* pconfig = (configuration*)user;
char *resize_name = malloc(10 * sizeof(char));
int act_resize = pconfig->actual_resize;
snprintf(resize_name, 10, "resize%d", act_resize);
snprintf(resize_name, 10, "resize%d", pconfig->actual_resize);
char *iter_name = malloc(10 * sizeof(char));
int act_iter = pconfig->actual_iter;
snprintf(iter_name, 10, "stage%d", act_iter);
char *stage_name = malloc(10 * sizeof(char));
snprintf(stage_name, 10, "stage%d", pconfig->actual_stage);
#define MATCH(s, n) strcmp(section, s) == 0 && strcmp(name, n) == 0
if (MATCH("general", "resizes")) {
pconfig->resizes = atoi(value) + 1;
malloc_config_resizes(pconfig, pconfig->resizes);
} else if (MATCH("general", "iter_stages")) {
pconfig->iter_stages = atoi(value);
pconfig->iter_stage = malloc(sizeof(iter_stage_t) * pconfig->iter_stages);
init_config_stages(pconfig, pconfig->iter_stages);
} else if (MATCH("general", "matrix_tam")) { //TODO Refactor cambiar nombre
pconfig->matrix_tam = atoi(value);
if (MATCH("general", "R")) {
pconfig->n_resizes = atoi(value) + 1;
malloc_config_resizes(pconfig, pconfig->n_resizes);
} else if (MATCH("general", "S")) {
pconfig->n_stages = atoi(value);
pconfig->stages = malloc(sizeof(iter_stage_t) * pconfig->n_stages);
init_config_stages(pconfig, pconfig->n_stages);
} else if (MATCH("general", "Granularity")) {
pconfig->granularity = atoi(value);
} else if (MATCH("general", "SDR")) {
pconfig->sdr = atoi(value);
} else if (MATCH("general", "ADR")) {
pconfig->adr = atoi(value);
} else if (MATCH("general", "AIB")) { //TODO Refactor cambiar nombre
pconfig->aib = atoi(value);
} else if (MATCH("general", "CST")) {
pconfig->cst = atoi(value);
} else if (MATCH("general", "CSS")) {
pconfig->css = atoi(value);
} else if (MATCH("general", "AT")) {
pconfig->at = atoi(value);
} else if (MATCH("general", "SM")) {
pconfig->sm = atoi(value);
} else if (MATCH("general", "SS")) {
pconfig->ss = atoi(value);
// Iter stage
} else if (MATCH(iter_name, "PT")) {
if(pconfig->actual_iter < pconfig->iter_stages)
pconfig->iter_stage[act_iter].pt = atoi(value);
} else if (MATCH(iter_name, "bytes")) {
if(pconfig->actual_iter < pconfig->iter_stages)
pconfig->iter_stage[act_iter].bytes = atoi(value);
} else if (MATCH(iter_name, "t_stage")) {
if(pconfig->actual_iter < pconfig->iter_stages) {
pconfig->iter_stage[act_iter].t_stage = atof(value);
pconfig->actual_iter = pconfig->actual_iter+1; // Ultimo elemento del grupo
} else if (MATCH(stage_name, "PT")) {
if(pconfig->actual_stage < pconfig->n_stages)
pconfig->stages[pconfig->actual_stage].pt = atoi(value);
} else if (MATCH(stage_name, "bytes")) {
if(pconfig->actual_stage < pconfig->n_stages)
pconfig->stages[pconfig->actual_stage].bytes = atoi(value);
} else if (MATCH(stage_name, "t_stage")) {
if(pconfig->actual_stage < pconfig->n_stages) {
pconfig->stages[pconfig->actual_stage].t_stage = atof(value);
pconfig->actual_stage = pconfig->actual_stage+1; // Ultimo elemento del grupo
}
// Resize stage
} else if (MATCH(resize_name, "iters")) {
if(pconfig->actual_resize < pconfig->resizes)
pconfig->iters[act_resize] = atoi(value);
} else if (MATCH(resize_name, "procs")) {
if(pconfig->actual_resize < pconfig->resizes)
pconfig->procs[act_resize] = atoi(value);
} else if (MATCH(resize_name, "factor")) {
if(pconfig->actual_resize < pconfig->resizes)
pconfig->factors[act_resize] = atof(value);
} else if (MATCH(resize_name, "physical_dist")) {
if(pconfig->actual_resize < pconfig->resizes) {
} else if (MATCH(resize_name, "Iters")) {
if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->iters[pconfig->actual_resize] = atoi(value);
} else if (MATCH(resize_name, "Procs")) {
if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->procs[pconfig->actual_resize] = atoi(value);
} else if (MATCH(resize_name, "FactorS")) {
if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->factors[pconfig->actual_resize] = atof(value);
} else if (MATCH(resize_name, "Dist")) {
if(pconfig->actual_resize < pconfig->n_resizes) {
char *aux = strdup(value);
if (strcmp(aux, "node") == 0) {
pconfig->phy_dist[act_resize] = COMM_PHY_NODES;
if (strcmp(aux, "spread") == 0) {
pconfig->phy_dist[pconfig->actual_resize] = COMM_PHY_SPREAD;
} else {
pconfig->phy_dist[act_resize] = COMM_PHY_CPU;
pconfig->phy_dist[pconfig->actual_resize] = COMM_PHY_COMPACT;
}
free(aux);
pconfig->actual_resize = pconfig->actual_resize+1; // Ultimo elemento del grupo
......@@ -94,7 +92,7 @@ static int handler(void* user, const char* section, const char* name,
}
free(resize_name);
free(iter_name);
free(stage_name);
return 1;
}
......@@ -114,7 +112,7 @@ configuration *read_ini_file(char *file_name) {
return NULL;
}
config->actual_resize=0;
config->actual_iter=0;
config->actual_stage=0;
if(ini_parse(file_name, handler, config) < 0) { // Obtener configuracion
printf("Can't load '%s'\n", file_name);
......@@ -155,12 +153,12 @@ void malloc_config_resizes(configuration *user_config, int resizes) {
void init_config_stages(configuration *user_config, int stages) {
int i;
if(user_config != NULL) {
for(i=0; i<user_config->iter_stages; i++) {
user_config->iter_stage[i].array = NULL;
user_config->iter_stage[i].full_array = NULL;
user_config->iter_stage[i].double_array = NULL;
user_config->iter_stage[i].counts.counts = NULL;
user_config->iter_stage[i].real_bytes = 0;
for(i=0; i<user_config->n_stages; i++) {
user_config->stages[i].array = NULL;
user_config->stages[i].full_array = NULL;
user_config->stages[i].double_array = NULL;
user_config->stages[i].counts.counts = NULL;
user_config->stages[i].real_bytes = 0;
}
}
}
......@@ -176,27 +174,27 @@ void free_config(configuration *user_config) {
free(user_config->factors);
free(user_config->phy_dist);
for(i=0; i < user_config->iter_stages; i++) {
for(i=0; i < user_config->n_stages; i++) {
if(user_config->iter_stage[i].array != NULL) {
free(user_config->iter_stage[i].array);
user_config->iter_stage[i].array = NULL;
if(user_config->stages[i].array != NULL) {
free(user_config->stages[i].array);
user_config->stages[i].array = NULL;
}
if(user_config->iter_stage[i].full_array != NULL) {
free(user_config->iter_stage[i].full_array);
user_config->iter_stage[i].full_array = NULL;
if(user_config->stages[i].full_array != NULL) {
free(user_config->stages[i].full_array);
user_config->stages[i].full_array = NULL;
}
if(user_config->iter_stage[i].double_array != NULL) {
free(user_config->iter_stage[i].double_array);
user_config->iter_stage[i].double_array = NULL;
if(user_config->stages[i].double_array != NULL) {
free(user_config->stages[i].double_array);
user_config->stages[i].double_array = NULL;
}
if(user_config->iter_stage[i].counts.counts != NULL) {
freeCounts(&(user_config->iter_stage[i].counts));
if(user_config->stages[i].counts.counts != NULL) {
freeCounts(&(user_config->stages[i].counts));
}
}
//free(user_config->iter_stage); //FIXME ERROR de memoria relacionado con la carpeta malleability
//free(user_config->stages); //FIXME ERROR de memoria relacionado con la carpeta malleability
free(user_config);
}
}
......@@ -208,14 +206,15 @@ void free_config(configuration *user_config) {
void print_config(configuration *user_config, int grp) {
if(user_config != NULL) {
int i;
printf("Config loaded: resizes=%d, stages=%d, matrix=%d, sdr=%d, adr=%d, aib=%d, css=%d, cst=%d || grp=%d\n",
user_config->resizes, user_config->iter_stages, user_config->matrix_tam, user_config->sdr, user_config->adr, user_config->aib, user_config->css, user_config->cst, grp);
for(i=0; i<user_config->iter_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d\n",
i, user_config->iter_stage[i].pt, user_config->iter_stage[i].t_stage, user_config->iter_stage[i].real_bytes);
printf("Config loaded: resizes=%d, stages=%d, granularity=%d, sdr=%d, adr=%d, at=%d, sm=%d, ss=%d, latency=%lf, bw=%lf || grp=%d\n",
user_config->n_resizes, user_config->n_stages, user_config->granularity, user_config->sdr, user_config->adr,
user_config->at, user_config->sm, user_config->ss, user_config->latency_m, user_config->bw_m, grp);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d, Intercept=%lf, Slope=%lf\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].intercept, user_config->stages[i].slope);
}
for(i=0; i<user_config->resizes; i++) {
printf("Resize %d: Iters=%d, Procs=%d, Factors=%f, Phy=%d\n",
for(i=0; i<user_config->n_resizes; i++) {
printf("Resize %d: Iters=%d, Procs=%d, Factors=%f, Dist=%d\n",
i, user_config->iters[i], user_config->procs[i], user_config->factors[i], user_config->phy_dist[i]);
}
}
......@@ -234,15 +233,15 @@ void print_config_group(configuration *user_config, int grp) {
if(grp > 0) {
parents = user_config->procs[grp-1];
}
if(grp < user_config->resizes - 1) {
if(grp < user_config->n_resizes - 1) {
sons = user_config->procs[grp+1];
}
printf("Config: matrix=%d, sdr=%d, adr=%d, aib=%d, css=%d, cst=%d, latency=%lf, bw=%lf\n",
user_config->matrix_tam, user_config->sdr, user_config->adr, user_config->aib, user_config->css, user_config->cst, user_config->latency_m, user_config->bw_m);
for(i=0; i<user_config->iter_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d\n",
i, user_config->iter_stage[i].pt, user_config->iter_stage[i].t_stage, user_config->iter_stage[i].real_bytes);
printf("Config: granularity=%d, sdr=%d, adr=%d, at=%d, sm=%d, ss=%d, latency=%lf, bw=%lf\n",
user_config->granularity, user_config->sdr, user_config->adr, user_config->at, user_config->sm, user_config->ss, user_config->latency_m, user_config->bw_m);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d, Intercept=%lf, Slope=%lf\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].intercept, user_config->stages[i].slope);
}
printf("Config Group: iters=%d, factor=%f, phy=%d, procs=%d, parents=%d, sons=%d\n",
user_config->iters[grp], user_config->factors[grp], user_config->phy_dist[grp], user_config->procs[grp], parents, sons);
......@@ -278,12 +277,12 @@ void send_config_file(configuration *config_file, int root, MPI_Comm intercomm)
// Obtener un tipo derivado para enviar las estructuras de fases de iteracion
// con una sola comunicacion
def_struct_iter_stage(&(config_file->iter_stage[0]), config_file->iter_stages, &iter_stage_type);
def_struct_iter_stage(&(config_file->stages[0]), config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file, 1, config_type, root, intercomm);
MPI_Bcast(config_file, 1, config_type_array, root, intercomm);
MPI_Bcast(config_file->factors, config_file->resizes, MPI_FLOAT, root, intercomm);
MPI_Bcast(config_file->iter_stage, config_file->iter_stages, iter_stage_type, root, intercomm);
MPI_Bcast(config_file->factors, config_file->n_resizes, MPI_FLOAT, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
//Liberar tipos derivados
MPI_Type_free(&config_type);
......@@ -315,24 +314,24 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
MPI_Bcast(config_file, 1, config_type, root, intercomm);
//Inicializado de estructuras internas
malloc_config_resizes(config_file, config_file->resizes); // Reserva de memoria de los vectores
config_file->iter_stage = malloc(sizeof(iter_stage_t) * config_file->iter_stages);
malloc_config_resizes(config_file, config_file->n_resizes); // Reserva de memoria de los vectores
config_file->stages = malloc(sizeof(iter_stage_t) * config_file->n_stages);
// Obtener un tipo derivado para enviar los tres vectores
// de enteros con una sola comunicacion
def_struct_config_file_array(config_file, &config_type_array);
def_struct_iter_stage(&(config_file->iter_stage[0]), config_file->iter_stages, &iter_stage_type);
def_struct_iter_stage(&(config_file->stages[0]), config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file, 1, config_type_array, root, intercomm);
MPI_Bcast(config_file->factors, config_file->resizes, MPI_FLOAT, root, intercomm);
MPI_Bcast(config_file->iter_stage, config_file->iter_stages, iter_stage_type, root, intercomm);
MPI_Bcast(config_file->factors, config_file->n_resizes, MPI_FLOAT, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
//Liberar tipos derivados
MPI_Type_free(&config_type);
MPI_Type_free(&config_type_array);
MPI_Type_free(&iter_stage_type);
init_config_stages(config_file, config_file->iter_stages); // Inicializar a NULL vectores
init_config_stages(config_file, config_file->n_stages); // Inicializar a NULL vectores
*config_file_out = config_file;
}
......@@ -353,15 +352,15 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
// Rellenar vector displs
MPI_Get_address(config_file, &dir);
MPI_Get_address(&(config_file->resizes), &displs[0]);
MPI_Get_address(&(config_file->iter_stages), &displs[1]);
MPI_Get_address(&(config_file->actual_resize), &displs[2]);
MPI_Get_address(&(config_file->matrix_tam), &displs[3]);
MPI_Get_address(&(config_file->n_resizes), &displs[0]);
MPI_Get_address(&(config_file->n_stages), &displs[1]);
MPI_Get_address(&(config_file->actual_resize), &displs[2]); // TODO Refactor Es necesario enviarlo?
MPI_Get_address(&(config_file->granularity), &displs[3]);
MPI_Get_address(&(config_file->sdr), &displs[4]);
MPI_Get_address(&(config_file->adr), &displs[5]);
MPI_Get_address(&(config_file->aib), &displs[6]);
MPI_Get_address(&(config_file->css), &displs[7]);
MPI_Get_address(&(config_file->cst), &displs[8]);
MPI_Get_address(&(config_file->at), &displs[6]);
MPI_Get_address(&(config_file->ss), &displs[7]);
MPI_Get_address(&(config_file->sm), &displs[8]);
MPI_Get_address(&(config_file->latency_m), &displs[9]);
MPI_Get_address(&(config_file->bw_m), &displs[10]);
......@@ -385,7 +384,7 @@ void def_struct_config_file_array(configuration *config_file, MPI_Datatype *conf
types[0] = types[1] = types[2] = MPI_INT;
// Modificar blocklengths al valor adecuado
blocklengths[0] = blocklengths[1] = blocklengths[2] = config_file->resizes;
blocklengths[0] = blocklengths[1] = blocklengths[2] = config_file->n_resizes;
//Rellenar vector displs
MPI_Get_address(config_file, &dir);
......@@ -408,7 +407,7 @@ void def_struct_config_file_array(configuration *config_file, MPI_Datatype *conf
* Tipo derivado para enviar elementos especificos
* de la estructuras de fases de iteracion en una sola comunicacion.
*/
void def_struct_iter_stage(iter_stage_t *iter_stage, int stages, MPI_Datatype *config_type) {
void def_struct_iter_stage(iter_stage_t *stages, int n_stages, MPI_Datatype *config_type) {
int i, counts = 4;
int blocklengths[4] = {1, 1, 1, 1};
MPI_Aint displs[counts], dir;
......@@ -420,21 +419,21 @@ void def_struct_iter_stage(iter_stage_t *iter_stage, int stages, MPI_Datatype *c
types[2] = MPI_DOUBLE;
// Rellenar vector displs
MPI_Get_address(iter_stage, &dir);
MPI_Get_address(stages, &dir);
MPI_Get_address(&(iter_stage->pt), &displs[0]);
MPI_Get_address(&(iter_stage->t_stage), &displs[1]);
MPI_Get_address(&(iter_stage->t_op), &displs[2]);
MPI_Get_address(&(iter_stage->bytes), &displs[3]);
MPI_Get_address(&(stages->pt), &displs[0]);
MPI_Get_address(&(stages->t_stage), &displs[1]);
MPI_Get_address(&(stages->t_op), &displs[2]);
MPI_Get_address(&(stages->bytes), &displs[3]);
for(i=0;i<counts;i++) displs[i] -= dir;
if (stages == 1) {
if (n_stages == 1) {
MPI_Type_create_struct(counts, blocklengths, displs, types, config_type);
} else { // Si hay mas de una fase(estructura), el "extent" se modifica.
MPI_Type_create_struct(counts, blocklengths, displs, types, &aux);
// Tipo derivado para enviar N elementos de la estructura
MPI_Type_create_resized(aux, 0, 1*sizeof(iter_stage_t), config_type);
MPI_Type_create_resized(aux, 0, sizeof(iter_stage_t), config_type);
}
MPI_Type_commit(config_type);
}
......@@ -31,17 +31,17 @@ typedef struct
typedef struct
{
int resizes, iter_stages;
int actual_resize, actual_iter;
int matrix_tam, sdr, adr;
int css, cst;
int aib;
int n_resizes, n_stages;
int actual_resize, actual_stage;
int granularity, sdr, adr;
int sm, ss;
int at;
double latency_m, bw_m;
int *iters, *procs, *phy_dist;
float *factors;
iter_stage_t *iter_stage;
iter_stage_t *stages;
} configuration;
......
......@@ -116,8 +116,8 @@ int main(int argc, char *argv[]) {
MPI_Comm_size(comm, &(group->numP));
MPI_Comm_rank(comm, &(group->myId));
if(config_file->resizes != group->grp + 1) {
set_malleability_configuration(config_file->cst, config_file->css, config_file->phy_dist[group->grp+1], -1, config_file->aib, -1);
if(config_file->n_resizes != group->grp + 1) {
set_malleability_configuration(config_file->sm, config_file->ss, config_file->phy_dist[group->grp+1], -1, config_file->at, -1);
set_children_number(config_file->procs[group->grp+1]); // TODO TO BE DEPRECATED
if(group->grp == 0) {
......@@ -136,7 +136,7 @@ int main(int argc, char *argv[]) {
print_local_results();
reset_results_index(results);
} while((config_file->resizes > group->grp + 1) && (config_file->cst == COMM_SPAWN_MERGE || config_file->cst == COMM_SPAWN_MERGE_PTHREAD));
} while((config_file->n_resizes > group->grp + 1) && (config_file->sm == COMM_SPAWN_MERGE || config_file->sm == COMM_SPAWN_MERGE_PTHREAD));
//
// TERMINA LA EJECUCION ----------------------------------------------------------
......@@ -153,7 +153,7 @@ int main(int argc, char *argv[]) {
MPI_Comm_free(&comm);
}
if(group->myId == ROOT && (config_file->cst == COMM_SPAWN_MERGE || config_file->cst == COMM_SPAWN_MERGE_PTHREAD)) {
if(group->myId == ROOT && (config_file->sm == COMM_SPAWN_MERGE || config_file->sm == COMM_SPAWN_MERGE_PTHREAD)) {
MPI_Abort(MPI_COMM_WORLD, -100);
}
free_application_data();
......@@ -183,21 +183,20 @@ int work() {
double *matrix = NULL;
maxiter = config_file->iters[group->grp];
//initMatrix(&matrix, config_file->matrix_tam);
state = MAL_NOT_STARTED;
res = 0;
for(iter=group->iter_start; iter < maxiter; iter++) {
iterate(matrix, config_file->matrix_tam, state, iter);
iterate(matrix, config_file->granularity, state, iter);
}
if(config_file->resizes != group->grp + 1)
if(config_file->n_resizes != group->grp + 1)
state = malleability_checkpoint();
iter = 0;
while(state == MAL_DIST_PENDING || state == MAL_SPAWN_PENDING || state == MAL_SPAWN_SINGLE_PENDING) {
if(iter < config_file->iters[group->grp+1]) {
iterate(matrix, config_file->matrix_tam, state, iter);
iterate(matrix, config_file->granularity, state, iter);
iter++;
group->iter_start = iter;
}
......@@ -205,7 +204,7 @@ int work() {
}
if(config_file->resizes - 1 == group->grp) res=1;
if(config_file->n_resizes - 1 == group->grp) res=1;
if(state == MAL_ZOMBIE) res=state;
return res;
}
......@@ -229,8 +228,8 @@ double iterate(double *matrix, int n, int async_comm, int iter) {
start_time = MPI_Wtime();
for(i=0; i < config_file->iter_stages; i++) {
aux+= process_stage(*config_file, config_file->iter_stage[i], *group, comm);
for(i=0; i < config_file->n_stages; i++) {
aux+= process_stage(*config_file, config_file->stages[i], *group, comm);
}
actual_time = MPI_Wtime(); // Guardar tiempos
......@@ -311,7 +310,7 @@ int print_final_results() {
if(group->myId == ROOT) {
if(group->grp == config_file->resizes -1) {
if(group->grp == config_file->n_resizes -1) {
file_name = NULL;
file_name = malloc(20 * sizeof(char));
if(file_name == NULL) return -1; // No ha sido posible alojar la memoria
......@@ -321,7 +320,7 @@ int print_final_results() {
ptr_out = dup(1);
create_out_file(file_name, &ptr_global, 1);
print_config(config_file, group->grp);
print_global_results(*results, config_file->resizes);
print_global_results(*results, config_file->n_resizes);
fflush(stdout);
free(file_name);
......@@ -365,7 +364,7 @@ void init_application() {
config_file = read_ini_file(group->argv[1]);
results = malloc(sizeof(results_data));
init_results_data(results, config_file->resizes, config_file->iters[group->grp]);
init_results_data(results, config_file->n_resizes, config_file->iters[group->grp]);
if(config_file->sdr) {
malloc_comm_array(&(group->sync_array), config_file->sdr , group->myId, group->numP);
}
......@@ -374,6 +373,7 @@ void init_application() {
}
int message_tam = 100000000;
message_tam = 10240000;
config_file->latency_m = latency(group->myId, group->numP, comm);
config_file->bw_m = bandwidth(group->myId, group->numP, comm, config_file->latency_m, message_tam);
obtain_op_times(1);
......@@ -393,7 +393,7 @@ void init_application() {
void obtain_op_times(int compute) {
int i;
double time = 0;
for(i=0; i<config_file->iter_stages; i++) {
for(i=0; i<config_file->n_stages; i++) {
time+=init_stage(config_file, i, *group, comm, compute);
}
if(!compute) results->wasted_time += time;
......
......@@ -41,7 +41,7 @@ double init_stage(configuration *config_file, int stage_i, group_data group, MPI
double result = 0;
int qty = 20000;
iter_stage_t *stage = &(config_file->iter_stage[stage_i]);
iter_stage_t *stage = &(config_file->stages[stage_i]);
stage->operations = qty;
switch(stage->pt) {
......@@ -83,12 +83,12 @@ double process_stage(configuration config_file, iter_stage_t stage, group_data g
//Computo
case COMP_PI:
for(i=0; i < stage.operations; i++) {
result += computePiSerial(config_file.matrix_tam);
result += computePiSerial(config_file.granularity);
}
break;
case COMP_MATRIX:
for(i=0; i < stage.operations; i++) {
result += computeMatrix(stage.double_array, config_file.matrix_tam); //FIXME No da tiempos repetibles
result += computeMatrix(stage.double_array, config_file.granularity); //FIXME No da tiempos repetibles
}
break;
//Comunicaciones
......@@ -128,29 +128,29 @@ double latency(int myId, int numP, MPI_Comm comm) {
aux = '0';
elapsed_time = 0;
if(myId+1 != numP || (myId+1 == numP && numP % 2 == 0)) {
//if(myId+1 != numP || (myId+1 == numP && numP % 2 == 0)) {
MPI_Barrier(comm);
start_time = MPI_Wtime();
if(myId % 2 == 0){
//if(myId % 2 == 0){
if(myId == 0) {
for(i=0; i<loop_count; i++){
MPI_Ssend(&aux, 0, MPI_CHAR, myId+1, 99, comm);
MPI_Ssend(&aux, 0, MPI_CHAR, numP-1, 99, comm);
}
MPI_Recv(&aux, 0, MPI_CHAR, myId+1, 99, comm, MPI_STATUS_IGNORE);
} else {
MPI_Recv(&aux, 0, MPI_CHAR, numP-1, 99, comm, MPI_STATUS_IGNORE);
} else if(myId+1 == numP) {
for(i=0; i<loop_count; i++){
MPI_Recv(&aux, 0, MPI_CHAR, myId-1, 99, comm, MPI_STATUS_IGNORE);
MPI_Recv(&aux, 0, MPI_CHAR, 0, 99, comm, MPI_STATUS_IGNORE);
}
MPI_Ssend(&aux, 0, MPI_CHAR, myId-1, 99, comm);
MPI_Ssend(&aux, 0, MPI_CHAR, 0, 99, comm);
}
MPI_Barrier(comm);
stop_time = MPI_Wtime();
elapsed_time = (stop_time - start_time) / loop_count;
}
max_time = (stop_time - start_time) / loop_count;
//}
if(myId %2 != 0) {
elapsed_time=0;
}
MPI_Allreduce(&elapsed_time, &max_time, 1, MPI_DOUBLE, MPI_MAX, comm);
//MPI_Allreduce(&elapsed_time, &max_time, 1, MPI_DOUBLE, MPI_MAX, comm);
MPI_Bcast(&max_time, 1, MPI_DOUBLE, ROOT, comm);
return max_time;
}
......@@ -171,31 +171,33 @@ double bandwidth(int myId, int numP, MPI_Comm comm, double latency, int n) {
elapsed_time = 0;
time = 0;
if(myId+1 != numP || (myId+1 == numP && numP % 2 == 0)) {
// if(myId+1 != numP || (myId+1 == numP && numP % 2 == 0)) {
MPI_Barrier(comm);
start_time = MPI_Wtime();
if(myId % 2 == 0){
//if(myId % 2 == 0){
if(myId == 0) {
for(i=0; i<loop_count; i++){
MPI_Ssend(aux, n, MPI_CHAR, myId+1, 99, comm);
MPI_Ssend(aux, n, MPI_CHAR, numP-1, 99, comm);
}
MPI_Recv(aux, 0, MPI_CHAR, myId+1, 99, comm, MPI_STATUS_IGNORE);
} else {
MPI_Recv(aux, 0, MPI_CHAR, numP-1, 99, comm, MPI_STATUS_IGNORE);
} else if(myId+1 == numP) {
for(i=0; i<loop_count; i++){
MPI_Recv(aux, n, MPI_CHAR, myId-1, 99, comm, MPI_STATUS_IGNORE);
MPI_Recv(aux, n, MPI_CHAR, 0, 99, comm, MPI_STATUS_IGNORE);
}
MPI_Ssend(aux, 0, MPI_CHAR, myId-1, 99, comm);
MPI_Ssend(aux, 0, MPI_CHAR, 0, 99, comm);
}
MPI_Barrier(comm);
stop_time = MPI_Wtime();
elapsed_time = (stop_time - start_time) / loop_count;
}
//}
if(myId %2 == 0) {
time = elapsed_time - latency;
}
MPI_Allreduce(&time, &max_time, 1, MPI_DOUBLE, MPI_MAX, comm);
//TODO Cambiar a Bcast si solo se realiza por Root
bw = ((double)n_bytes) / max_time;
free(aux);
return bw;
......@@ -240,6 +242,18 @@ void linear_regression_stage(iter_stage_t *stage, group_data group, MPI_Comm com
if(group.myId == ROOT) {
MPI_Reduce(MPI_IN_PLACE, times, LR_ARRAY_TAM * loop_iters, MPI_DOUBLE, MPI_MAX, ROOT, comm);
/*
printf("PT=%d ", stage->pt);
for(i=0; i<tam; i++) {
printf("%lf, ", times[i]);
}
printf("\n");
printf("BYTES ");
for(i=0; i<tam; i++) {
printf("%lf, ", bytes[i]);
}
printf("\n");
*/
lr_compute(tam, bytes, times, &(stage->slope), &(stage->intercept));
} else {
MPI_Reduce(times, NULL, LR_ARRAY_TAM * loop_iters, MPI_DOUBLE, MPI_MAX, ROOT, comm);
......@@ -266,7 +280,7 @@ double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t
result = 0;
t_stage = stage->t_stage * config_file->factors[group.grp];
initMatrix(&(stage->double_array), config_file->matrix_tam);
initMatrix(&(stage->double_array), config_file->granularity);
double start_time = MPI_Wtime();
if(group.myId == ROOT && compute) {
......
mpicc -g -Wall Main/Main.c Main/computing_func.c Main/comunication_func.c Main/linear_reg.c Main/process_stage.c IOcodes/results.c IOcodes/read_ini.c IOcodes/ini.c malleability/malleabilityManager.c malleability/malleabilityTypes.c malleability/malleabilityZombies.c malleability/ProcessDist.c malleability/CommDist.c malleability/distribution_methods/block_distribution.c -pthread -lslurm -lm
mpicc -Wall Main/Main.c Main/computing_func.c Main/comunication_func.c Main/linear_reg.c Main/process_stage.c IOcodes/results.c IOcodes/read_ini.c IOcodes/ini.c malleability/malleabilityManager.c malleability/malleabilityTypes.c malleability/malleabilityZombies.c malleability/ProcessDist.c malleability/CommDist.c malleability/distribution_methods/block_distribution.c -pthread -lslurm -lm
if [ $# -gt 0 ]
then
......
......@@ -424,7 +424,7 @@ void Children_init() {
recv_config_file(mall->root, mall->intercomm, &(mall_conf->config_file));
mall_conf->results = (results_data *) malloc(sizeof(results_data));
init_results_data(mall_conf->results, mall_conf->config_file->resizes, RESULTS_INIT_DATA_QTY);
init_results_data(mall_conf->results, mall_conf->config_file->n_resizes, RESULTS_INIT_DATA_QTY);
if(dist_a_data->entries || rep_a_data->entries) { // Recibir datos asincronos
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_CHILDREN, mall->myId, root_parents, mall->intercomm);
......@@ -469,7 +469,7 @@ void Children_init() {
}
// Guardar los resultados de esta transmision
recv_results(mall_conf->results, mall->root, mall_conf->config_file->resizes, mall->intercomm);
recv_results(mall_conf->results, mall->root, mall_conf->config_file->n_resizes, mall->intercomm);
MPI_Comm_disconnect(&(mall->intercomm));
......@@ -639,7 +639,7 @@ int end_redistribution() {
// result = MAL_DIST_ADAPTED;
}
send_results(mall_conf->results, rootBcast, mall_conf->config_file->resizes, mall->intercomm);
send_results(mall_conf->results, rootBcast, mall_conf->config_file->n_resizes, mall->intercomm);
result = MAL_DIST_COMPLETED;
MPI_Comm_disconnect(&(mall->intercomm));
......
......@@ -28,8 +28,8 @@
#define MAL_APP_ENDED 1
// TODO Refactor
#define COMM_PHY_NODES 1
#define COMM_PHY_CPU 2
#define COMM_PHY_SPREAD 1
#define COMM_PHY_COMPACT 2
// SPAWN METHODS
#define COMM_SPAWN_SERIAL 0
......
......@@ -5,12 +5,12 @@ dir="/home/martini/malleability_benchmark/Codes/auxiliar_codes"
aux=$(grep "\[resize0\]" -n $1 | cut -d ":" -f1)
read -r ini fin <<<$(echo $aux)
diff=$(( fin - ini ))
numP=$(head -$fin $1 | tail -$diff | cut -d ';' -f1 | grep procs | cut -d '=' -f2)
dist=$(head -$fin $1 | tail -$diff | cut -d ';' -f1 | grep physical_dist | cut -d '=' -f2)
numP=$(head -$fin $1 | tail -$diff | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
dist=$(head -$fin $1 | tail -$diff | cut -d ';' -f1 | grep Dist | cut -d '=' -f2)
if [ $dist == "node" ]; then
if [ $dist == "spread" ]; then
dist=1
elif [ $dist == "cpu" ]; then
elif [ $dist == "compact" ]; then
dist=2
fi
......
[general]
resizes=1
iter_stages=1
matrix_tam=100000
R=0
S=1
Granularity=100000
SDR=0.0
ADR=0.0
AIB=0
CST=3
CSS=0
AT=0
SM=0
SS=0
; end [general]
[stage0]
PT=3
......@@ -14,24 +14,29 @@ bytes=0
t_stage=0.1
;end [stage0]
[stage1]
PT=3
PT=4
bytes=0
t_stage=0.01
t_stage=0.1
;end [stage1]
[stage2]
PT=0
PT=5
bytes=0
t_stage=0.2
t_stage=0.1
;end [stage2]
[stage3]
PT=6
bytes=0
t_stage=0.1
;end [stage3]
[resize0]
iters=10
procs=4
factor=1
physical_dist=cpu
Iters=10
Procs=40
FactorS=1
Dist=compact
;end [resize0]
[resize1]
iters=10
procs=8
factor=0.5
physical_dist=cpu
Iters=10
Procs=8
FactorS=0.5
Dist=compact
;end [resize1]
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment