Commit 04e2f90a authored by iker_martin's avatar iker_martin
Browse files

Ya no es necesario el uso de Slurm para las ejecuciones. Ademas se continua...

Ya no es necesario el uso de Slurm para las ejecuciones. Ademas se continua refactor de configuracion. WIP
parent 2b3f5e66
......@@ -28,12 +28,9 @@ static int handler(void* user, const char* section, const char* name,
#define MATCH(s, n) strcmp(section, s) == 0 && strcmp(name, n) == 0
if (MATCH("general", "Total_Resizes")) {
pconfig->n_resizes = atoi(value) + 1;
//malloc_config_resizes(pconfig); //FIXME Unknown
user_functions->resizes_f(pconfig);
} else if (MATCH("general", "Total_Stages")) {
pconfig->n_stages = atoi(value);
pconfig->stages = malloc(sizeof(iter_stage_t) * (size_t) pconfig->n_stages);
//init_config_stages(pconfig); //FIXME Unkown
user_functions->stages_f(pconfig);
} else if (MATCH("general", "Granularity")) {
pconfig->granularity = atoi(value);
......@@ -41,48 +38,40 @@ static int handler(void* user, const char* section, const char* name,
pconfig->sdr = atoi(value);
} else if (MATCH("general", "ADR")) { // TODO Refactor a nombre manual
pconfig->adr = atoi(value);
} else if (MATCH("general", "Asynch_Redistribution_Type")) {
pconfig->at = atoi(value);
} else if (MATCH("general", "Spawn_Method")) {
pconfig->sm = atoi(value);
} else if (MATCH("general", "Spawn_Strategy")) {
pconfig->ss = atoi(value);
// Iter stage
} else if (MATCH(stage_name, "Stage_Type")) {
if(pconfig->actual_stage < pconfig->n_stages)
pconfig->stages[pconfig->actual_stage].pt = atoi(value);
} else if (MATCH(stage_name, "Stage_bytes")) {
if(pconfig->actual_stage < pconfig->n_stages)
pconfig->stages[pconfig->actual_stage].bytes = atoi(value);
} else if (MATCH(stage_name, "Stage_time")) {
if(pconfig->actual_stage < pconfig->n_stages) {
pconfig->stages[pconfig->actual_stage].t_stage = (float) atof(value);
pconfig->actual_stage = pconfig->actual_stage+1; // Ultimo elemento del grupo
}
//if(pconfig->actual_stage < pconfig->n_stages)
pconfig->stages[pconfig->actual_stage].pt = atoi(value);
} else if (MATCH(stage_name, "Stage_Bytes")) {
pconfig->stages[pconfig->actual_stage].bytes = atoi(value);
} else if (MATCH(stage_name, "Stage_Time")) {
pconfig->stages[pconfig->actual_stage].t_stage = (float) atof(value);
pconfig->actual_stage = pconfig->actual_stage+1; // Ultimo elemento del grupo
// Resize stage
} else if (MATCH(resize_name, "Iters")) {
if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->iters[pconfig->actual_resize] = atoi(value);
//if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->groups[pconfig->actual_resize].iters = atoi(value);
} else if (MATCH(resize_name, "Procs")) {
if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->procs[pconfig->actual_resize] = atoi(value);
pconfig->groups[pconfig->actual_resize].procs = atoi(value);
} else if (MATCH(resize_name, "FactorS")) {
if(pconfig->actual_resize < pconfig->n_resizes)
pconfig->factors[pconfig->actual_resize] =(float) atof(value);
pconfig->groups[pconfig->actual_resize].factor =(float) atof(value);
} else if (MATCH(resize_name, "Dist")) {
if(pconfig->actual_resize < pconfig->n_resizes) {
char *aux = strdup(value);
if (strcmp(aux, "spread") == 0) {
pconfig->phy_dist[pconfig->actual_resize] = MALL_DIST_SPREAD;
} else {
pconfig->phy_dist[pconfig->actual_resize] = MALL_DIST_COMPACT;
}
free(aux);
pconfig->actual_resize = pconfig->actual_resize+1; // Ultimo elemento del grupo
}
int aux_value = MALL_DIST_COMPACT;
if (strcmp(value, "spread") == 0) {
aux_value = MALL_DIST_SPREAD;
}
pconfig->groups[pconfig->actual_resize].phy_dist = aux_value;
} else if (MATCH(resize_name, "Asynch_Redistribution_Type")) {
pconfig->groups[pconfig->actual_resize].at = atoi(value);
} else if (MATCH(resize_name, "Spawn_Method")) {
pconfig->groups[pconfig->actual_resize].sm = atoi(value);
} else if (MATCH(resize_name, "Spawn_Strategy")) {
pconfig->groups[pconfig->actual_resize].ss = atoi(value);
pconfig->actual_resize = pconfig->actual_resize+1; // Ultimo elemento del grupo
// Unkown case
} else {
return 0; /* unknown section or name, error */
}
......
......@@ -243,7 +243,11 @@ void print_global_results(results_data results, size_t resizes) {
void init_results_data(results_data *results, size_t resizes, size_t stages, size_t iters_size) {
size_t i;
printf("Test 1 R=%ld\n", resizes); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
double *test = calloc(1, sizeof(double));
printf("Test 2 R=%ld\n", resizes); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
results->spawn_time = calloc(resizes, sizeof(double));
printf("Test 3\n"); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
results->spawn_real_time = calloc(resizes, sizeof(double));
results->sync_time = calloc(resizes, sizeof(double));
results->async_time = calloc(resizes, sizeof(double));
......@@ -251,10 +255,13 @@ void init_results_data(results_data *results, size_t resizes, size_t stages, siz
results->iters_size = iters_size + RESULTS_EXTRA_SIZE;
results->iters_time = calloc(results->iters_size, sizeof(double));
printf("Test 6\n"); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
results->stage_times = malloc(stages * sizeof(double*));
printf("Test 7\n"); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
for(i=0; i<stages; i++) {
results->stage_times[i] = calloc(results->iters_size, sizeof(double));
}
printf("Test 8\n"); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
results->iters_async = 0;
results->iter_index = 0;
......
......@@ -57,7 +57,6 @@ int main(int argc, char *argv[]) {
}
init_group_struct(argv, argc, myId, numP);
//FIXME No funciona en OpenMPI
im_child = init_malleability(myId, numP, ROOT, comm, argv[0], nodelist, num_cpus, num_nodes);
if(!im_child) { //TODO REFACTOR Simplificar inicio
......@@ -118,8 +117,9 @@ int main(int argc, char *argv[]) {
}
if(config_file->n_resizes != group->grp + 1) { //TODO Llevar a otra funcion
set_malleability_configuration(config_file->sm, config_file->ss, config_file->phy_dist[group->grp+1], config_file->at, -1);
set_children_number(config_file->procs[group->grp+1]); // TODO TO BE DEPRECATED
set_malleability_configuration(config_file->groups[group->grp+1].sm, config_file->groups[group->grp+1].ss,
config_file->groups[group->grp+1].phy_dist, config_file->groups[group->grp+1].at, -1);
set_children_number(config_file->groups[group->grp+1].procs); // TODO TO BE DEPRECATED
if(group->grp == 0) {
malleability_add_data(&(group->grp), 1, MAL_INT, 1, 1);
......@@ -141,7 +141,7 @@ int main(int argc, char *argv[]) {
print_local_results();
reset_results_index(results);
} while(config_file->n_resizes > group->grp + 1 && config_file->sm == MALL_SPAWN_MERGE);
} while(config_file->n_resizes > group->grp + 1 && config_file->groups[group->grp].sm == MALL_SPAWN_MERGE);
//
// TERMINA LA EJECUCION ----------------------------------------------------------
......@@ -158,7 +158,7 @@ int main(int argc, char *argv[]) {
MPI_Comm_free(&comm);
}
if(group->myId == ROOT && config_file->sm == MALL_SPAWN_MERGE) {
if(group->myId == ROOT && config_file->groups[group->grp].sm == MALL_SPAWN_MERGE) {
MPI_Abort(MPI_COMM_WORLD, -100);
}
free_application_data(); //FIXME Error al liberar memoria de SDR/ADR
......@@ -186,7 +186,7 @@ int main(int argc, char *argv[]) {
int work() {
int iter, maxiter, state, res;
maxiter = config_file->iters[group->grp];
maxiter = config_file->groups[group->grp].iters;
state = MALL_NOT_STARTED;
res = 0;
......@@ -199,7 +199,7 @@ int work() {
iter = 0;
while(state == MALL_DIST_PENDING || state == MALL_SPAWN_PENDING || state == MALL_SPAWN_SINGLE_PENDING || state == MALL_SPAWN_ADAPT_POSTPONE) {
if(iter < config_file->iters[group->grp+1]) {
if(iter < config_file->groups[group->grp+1].iters) {
iterate(state);
iter++;
group->iter_start = iter;
......@@ -379,7 +379,9 @@ void init_application() {
//config_file = read_ini_file(group->argv[1]);
init_config(group->argv[1], &config_file);
results = malloc(sizeof(results_data));
init_results_data(results, (size_t)config_file->n_resizes, (size_t)config_file->n_stages, (size_t)config_file->iters[group->grp]);
printf("Test 0 P%d -- Resizes=%d Stages=%d Iters=%d\n", group->myId, config_file->n_resizes, config_file->n_stages, config_file->groups[group->grp].iters); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
init_results_data(results, (size_t)config_file->n_resizes, (size_t)config_file->n_stages, (size_t)config_file->groups[group->grp].iters);
printf("Test F P%d\n", group->myId); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
if(config_file->sdr) {
malloc_comm_array(&(group->sync_array), config_file->sdr , group->myId, group->numP);
}
......
......@@ -34,10 +34,6 @@ typedef struct
int operations;
int bytes, real_bytes, my_bytes;
// Variables to represent linear regresion
// for collective calls.
double slope, intercept;
// Arrays to communicate data;
char* array, *full_array;
double* double_array;
......@@ -47,20 +43,23 @@ typedef struct
} iter_stage_t;
typedef struct
{
int iters, procs;
int sm, ss, phy_dist, at;
float factor;
} group_config_t;
typedef struct
{
int n_resizes, n_stages;
int actual_resize, actual_stage;
int granularity, sdr, adr;
int sm, ss;
int at;
double latency_m, bw_m;
int *iters, *procs, *phy_dist;
float *factors;
double t_op_comms;
iter_stage_t *stages;
group_config_t *groups;
} configuration;
#endif
......@@ -8,10 +8,10 @@
#include "../malleability/distribution_methods/block_distribution.h"
void malloc_config_resizes(configuration *user_config);
void init_config_stages(configuration *user_config);
void malloc_config_stages(configuration *user_config);
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type);
void def_struct_config_file_array(configuration *config_file, MPI_Datatype *config_type);
void def_struct_groups(group_config_t *groups, size_t n_resizes, MPI_Datatype *config_type);
void def_struct_iter_stage(iter_stage_t *stages, size_t n_stages, MPI_Datatype *config_type);
/*
......@@ -29,12 +29,16 @@ void init_config(char *file_name, configuration **user_config) {
if(file_name != NULL) {
ext_functions_t mallocs;
mallocs.resizes_f = malloc_config_resizes;
mallocs.stages_f = init_config_stages;
mallocs.stages_f = malloc_config_stages;
*user_config = read_ini_file(file_name, mallocs);
} else {
configuration *config = NULL;
config = malloc(sizeof(configuration));
config->n_resizes=1;
malloc_config_resizes(config);
config->n_stages=1;
malloc_config_stages(config);
if(config == NULL) {
perror("Error when reserving configuration structure\n");
MPI_Abort(MPI_COMM_WORLD, -3);
......@@ -52,17 +56,22 @@ void init_config(char *file_name, configuration **user_config) {
* "configuration *config = malloc(sizeof(configuration));"
*
* Sin embargo se puede obtener a traves de las funciones
* - read_ini_file
* - init_config
* - recv_config_file
*/
void malloc_config_resizes(configuration *user_config) {
size_t n_resizes = user_config->n_resizes;
int i;
if(user_config != NULL) {
user_config->iters = malloc(sizeof(int) * n_resizes);
user_config->procs = malloc(sizeof(int) * n_resizes);
user_config->factors = malloc(sizeof(float) * n_resizes);
user_config->phy_dist = malloc(sizeof(int) * n_resizes);
user_config->groups = malloc(sizeof(group_config_t) * (size_t) user_config->n_resizes);
for(i=0; i<user_config->n_resizes; i++) {
user_config->groups[i].iters = 0;
user_config->groups[i].procs = 1;
user_config->groups[i].sm = 0;
user_config->groups[i].ss = 1;
user_config->groups[i].phy_dist = 0;
user_config->groups[i].at = 0;
user_config->groups[i].factor = 1;
}
}
}
......@@ -72,22 +81,21 @@ void malloc_config_resizes(configuration *user_config) {
* para poder liberar correctamente cada fase.
*
* Se puede obtener a traves de las funciones
* - read_ini_file
* - init_config
* - recv_config_file
*/
void init_config_stages(configuration *user_config) {
int i;
if(user_config != NULL) {
for(i=0; i<user_config->n_stages; i++) {
user_config->stages[i].array = NULL;
user_config->stages[i].full_array = NULL;
user_config->stages[i].double_array = NULL;
user_config->stages[i].counts.counts = NULL;
user_config->stages[i].real_bytes = 0;
user_config->stages[i].intercept = 0;
user_config->stages[i].slope = 0;
}
void malloc_config_stages(configuration *user_config) {
int i;
if(user_config != NULL) {
user_config->stages = malloc(sizeof(iter_stage_t) * (size_t) user_config->n_stages);
for(i=0; i<user_config->n_stages; i++) {
user_config->stages[i].array = NULL;
user_config->stages[i].full_array = NULL;
user_config->stages[i].double_array = NULL;
user_config->stages[i].counts.counts = NULL;
user_config->stages[i].real_bytes = 0;
}
}
}
......@@ -97,10 +105,6 @@ void init_config_stages(configuration *user_config) {
void free_config(configuration *user_config) {
int i;
if(user_config != NULL) {
free(user_config->iters);
free(user_config->procs);
free(user_config->factors);
free(user_config->phy_dist);
for(i=0; i < user_config->n_stages; i++) {
......@@ -122,6 +126,7 @@ void free_config(configuration *user_config) {
}
free(user_config->groups);
//free(user_config->stages); //FIXME ERROR de memoria relacionado con la carpeta malleability
free(user_config);
}
......@@ -135,16 +140,18 @@ void free_config(configuration *user_config) {
void print_config(configuration *user_config, int grp) {
if(user_config != NULL) {
int i;
printf("Config loaded: R=%d, S=%d, granularity=%d, SDR=%d, ADR=%d, AT=%d, SM=%d, SS=%d, latency=%2.8f, bw=%lf || grp=%d\n",
printf("Config loaded: R=%d, S=%d, granularity=%d, SDR=%d, ADR=%d, latency=%2.8f, bw=%lf || grp=%d\n",
user_config->n_resizes, user_config->n_stages, user_config->granularity, user_config->sdr, user_config->adr,
user_config->at, user_config->sm, user_config->ss, user_config->latency_m, user_config->bw_m, grp);
user_config->latency_m, user_config->bw_m, grp);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d, Intercept=%lf, Slope=%lf\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].intercept, user_config->stages[i].slope);
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes);
}
for(i=0; i<user_config->n_resizes; i++) {
printf("Resize %d: Iters=%d, Procs=%d, Factors=%f, Dist=%d\n",
i, user_config->iters[i], user_config->procs[i], user_config->factors[i], user_config->phy_dist[i]);
printf("Group %d: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d\n",
i, user_config->groups[i].iters, user_config->groups[i].procs, user_config->groups[i].factor,
user_config->groups[i].phy_dist, user_config->groups[i].at, user_config->groups[i].sm,
user_config->groups[i].ss);
}
}
}
......@@ -160,20 +167,22 @@ void print_config_group(configuration *user_config, int grp) {
int parents, sons;
parents = sons = 0;
if(grp > 0) {
parents = user_config->procs[grp-1];
parents = user_config->groups[grp-1].procs;
}
if(grp < user_config->n_resizes - 1) {
sons = user_config->procs[grp+1];
sons = user_config->groups[grp+1].procs;
}
printf("Config: granularity=%d, SDR=%d, ADR=%d, AT=%d, SM=%d, SS=%d, latency=%2.8f, bw=%lf\n",
user_config->granularity, user_config->sdr, user_config->adr, user_config->at, user_config->sm, user_config->ss, user_config->latency_m, user_config->bw_m);
printf("Config: granularity=%d, SDR=%d, ADR=%d, latency=%2.8f, bw=%lf\n",
user_config->granularity, user_config->sdr, user_config->adr, user_config->latency_m, user_config->bw_m);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d, Intercept=%lf, Slope=%lf\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].intercept, user_config->stages[i].slope);
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes);
}
printf("Config Group: iters=%d, factor=%f, phy=%d, procs=%d, parents=%d, sons=%d\n",
user_config->iters[grp], user_config->factors[grp], user_config->phy_dist[grp], user_config->procs[grp], parents, sons);
printf("Group %d: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d, parents=%d, children=%d\n",
grp, user_config->groups[grp].iters, user_config->groups[grp].procs, user_config->groups[grp].factor,
user_config->groups[grp].phy_dist, user_config->groups[grp].at, user_config->groups[grp].sm,
user_config->groups[grp].ss, parents, sons);
}
}
......@@ -193,29 +202,24 @@ void print_config_group(configuration *user_config, int grp) {
* configuracion al otro grupo.
*/
void send_config_file(configuration *config_file, int root, MPI_Comm intercomm) {
MPI_Datatype config_type, config_type_array, iter_stage_type;
MPI_Datatype config_type, group_type, iter_stage_type;
// Obtener un tipo derivado para enviar todos los
// datos escalares con una sola comunicacion
def_struct_config_file(config_file, &config_type);
// Obtener un tipo derivado para enviar los tres vectores
// de enteros con una sola comunicacion
def_struct_config_file_array(config_file, &config_type_array);
// Obtener un tipo derivado para enviar las estructuras de fases de iteracion
// con una sola comunicacion
def_struct_groups(&(config_file->groups[0]), (size_t) config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), (size_t) config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file, 1, config_type, root, intercomm);
MPI_Bcast(config_file, 1, config_type_array, root, intercomm);
MPI_Bcast(config_file->factors, config_file->n_resizes, MPI_FLOAT, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_resizes, group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
//Liberar tipos derivados
MPI_Type_free(&config_type);
MPI_Type_free(&config_type_array);
MPI_Type_free(&group_type);
MPI_Type_free(&iter_stage_type);
}
......@@ -233,7 +237,7 @@ void send_config_file(configuration *config_file, int root, MPI_Comm intercomm)
* la funcion "free_config".
*/
void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_out) {
MPI_Datatype config_type, config_type_array, iter_stage_type;
MPI_Datatype config_type, group_type, iter_stage_type;
configuration *config_file = malloc(sizeof(configuration) * 1);
// Obtener un tipo derivado para recibir todos los
......@@ -242,24 +246,23 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
MPI_Bcast(config_file, 1, config_type, root, intercomm);
//Inicializado de estructuras internas
malloc_config_resizes(config_file); // Reserva de memoria de los vectores
config_file->groups = malloc(sizeof(group_config_t) * (size_t) config_file->n_resizes);
config_file->stages = malloc(sizeof(iter_stage_t) * (size_t) config_file->n_stages);
malloc_config_resizes(config_file); // Inicializar valores de grupos
malloc_config_stages(config_file); // Inicializar a NULL vectores stage
// Obtener un tipo derivado para enviar los tres vectores
// de enteros con una sola comunicacion
def_struct_config_file_array(config_file, &config_type_array);
def_struct_groups(&(config_file->groups[0]), (size_t) config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), (size_t) config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file, 1, config_type_array, root, intercomm);
MPI_Bcast(config_file->factors, config_file->n_resizes, MPI_FLOAT, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_resizes, group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
//Liberar tipos derivados
MPI_Type_free(&config_type);
MPI_Type_free(&config_type_array);
MPI_Type_free(&group_type);
MPI_Type_free(&iter_stage_type);
init_config_stages(config_file); // Inicializar a NULL vectores
*config_file_out = config_file;
}
......@@ -269,29 +272,25 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
* de la estructura de configuracion con una sola comunicacion.
*/
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type) {
int i, counts = 11;
int blocklengths[11] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
int i, counts = 7;
int blocklengths[7] = {1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype types[counts];
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = types[6] = types[7] = types[8] = MPI_INT;
types[9] = types[10] = MPI_DOUBLE;
types[0] = types[1] = types[2] = types[3] = types[4] = MPI_INT;
types[5] = types[6] = MPI_DOUBLE;
// Rellenar vector displs
MPI_Get_address(config_file, &dir);
MPI_Get_address(&(config_file->n_resizes), &displs[0]);
MPI_Get_address(&(config_file->n_stages), &displs[1]);
MPI_Get_address(&(config_file->actual_resize), &displs[2]); // TODO Refactor Es necesario enviarlo?
MPI_Get_address(&(config_file->granularity), &displs[3]);
MPI_Get_address(&(config_file->sdr), &displs[4]);
MPI_Get_address(&(config_file->adr), &displs[5]);
MPI_Get_address(&(config_file->at), &displs[6]);
MPI_Get_address(&(config_file->ss), &displs[7]);
MPI_Get_address(&(config_file->sm), &displs[8]);
MPI_Get_address(&(config_file->latency_m), &displs[9]);
MPI_Get_address(&(config_file->bw_m), &displs[10]);
MPI_Get_address(&(config_file->granularity), &displs[2]);
MPI_Get_address(&(config_file->sdr), &displs[3]);
MPI_Get_address(&(config_file->adr), &displs[4]);
MPI_Get_address(&(config_file->latency_m), &displs[5]);
MPI_Get_address(&(config_file->bw_m), &displs[6]);
for(i=0;i<counts;i++) displs[i] -= dir;
......@@ -299,36 +298,41 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
MPI_Type_commit(config_type);
}
/*
* Tipo derivado para enviar tres vectores de enteros
* de la estructura de configuracion con una sola comunicacion.
* Tipo derivado para enviar elementos especificos
* de la estructuras de la configuracion de cada grupo
* en una sola comunicacion.
*/
void def_struct_config_file_array(configuration *config_file, MPI_Datatype *config_type) {
int i, counts = 3;
int blocklengths[3] = {1, 1, 1};
void def_struct_groups(group_config_t *groups, size_t n_resizes, MPI_Datatype *config_type) {
int i, counts = 7;
int blocklengths[7] = {1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype aux, types[counts];
// Rellenar vector types
types[0] = types[1] = types[2] = MPI_INT;
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = MPI_INT;
types[6] = MPI_DOUBLE;
// Modificar blocklengths al valor adecuado
blocklengths[0] = blocklengths[1] = blocklengths[2] = config_file->n_resizes;
//Rellenar vector displs
MPI_Get_address(config_file, &dir);
// Rellenar vector displs
MPI_Get_address(groups, &dir);
MPI_Get_address(config_file->iters, &displs[0]);
MPI_Get_address(config_file->procs, &displs[1]);
MPI_Get_address(config_file->phy_dist, &displs[2]);
MPI_Get_address(&(groups->iters), &displs[0]);
MPI_Get_address(&(groups->procs), &displs[1]);
MPI_Get_address(&(groups->sm), &displs[2]);
MPI_Get_address(&(groups->ss), &displs[3]);
MPI_Get_address(&(groups->phy_dist), &displs[4]);
MPI_Get_address(&(groups->at), &displs[5]);
MPI_Get_address(&(groups->factor), &displs[6]);
for(i=0;i<counts;i++) displs[i] -= dir;
// Tipo derivado para enviar un solo elemento de tres vectores
MPI_Type_create_struct(counts, blocklengths, displs, types, &aux);
// Tipo derivado para enviar N elementos de tres vectores(3N en total)
MPI_Type_create_resized(aux, 0, 1*sizeof(int), config_type);
if (n_resizes == 1) {
MPI_Type_create_struct(counts, blocklengths, displs, types, config_type);
} else { // Si hay mas de una fase(estructura), el "extent" se modifica.
MPI_Type_create_struct(counts, blocklengths, displs, types, &aux);
// Tipo derivado para enviar N elementos de la estructura
MPI_Type_create_resized(aux, 0, sizeof(iter_stage_t), config_type);
}
MPI_Type_commit(config_type);
}
......@@ -344,8 +348,7 @@ void def_struct_iter_stage(iter_stage_t *stages, size_t n_stages, MPI_Datatype *
// Rellenar vector types
types[0] = types[3] = MPI_INT;
types[1] = MPI_FLOAT;
types[2] = MPI_DOUBLE;
types[1] = types[2] = MPI_DOUBLE;
// Rellenar vector displs
MPI_Get_address(stages, &dir);
......
......@@ -220,7 +220,7 @@ double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t
double result, t_stage, start_time;
result = 0;
t_stage = stage->t_stage * config_file->factors[group.grp];
t_stage = stage->t_stage * config_file->groups[group.grp].factor;
initMatrix(&(stage->double_array), (size_t) config_file->granularity);
if(compute) {
......@@ -240,7 +240,7 @@ double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *st
double result, t_stage, start_time;
result = 0;
t_stage = stage->t_stage * config_file->factors[group.grp];
t_stage = stage->t_stage * config_file->groups[group.grp].factor;
if(compute) {
start_time = MPI_Wtime();
if(group.myId == ROOT) {
......@@ -288,6 +288,10 @@ double init_comm_allgatherv_pt(group_data group, configuration *config_file, ite
if(stage->array != NULL)
free(stage->array);
if(stage->counts.counts != NULL)
freeCounts(&(stage->counts));
if(stage->full_array != NULL)
free(stage->full_array);
stage->real_bytes = stage->bytes;
if(stage->bytes != 0) {
......@@ -297,11 +301,7 @@ double init_comm_allgatherv_pt(group_data group, configuration *config_file, ite
stage->my_bytes = dist_data.tamBl;
stage->array = malloc(sizeof(char) * (size_t)stage->my_bytes);
if(stage->full_array != NULL)
free(stage->full_array);
stage->full_array = malloc(sizeof(char) * (size_t)stage->real_bytes);
if(stage->counts.counts != NULL)
freeCounts(&(stage->counts));
} else {
time = init_emulation_comm_time(group, config_file, stage, comm);
}
......@@ -313,13 +313,13 @@ double init_comm_reduce_pt(group_data group, configuration *config_file, iter_st
double time = 0;
if(stage->array != NULL)
free(stage->array);
if(stage->full_array != NULL)
free(stage->full_array);
stage->real_bytes = stage->bytes;
if(stage->bytes != 0) {
stage->array = malloc(sizeof(char) * (size_t)stage->real_bytes);
//Full array para el reduce necesita el mismo tamanyo
if(stage->full_array != NULL)
free(stage->full_array);
stage->full_array = malloc(sizeof(char) * (size_t)stage->real_bytes);
} else {
init_emulation_comm_time(group, config_file, stage, comm);
......
......@@ -4,7 +4,6 @@
#include <fcntl.h>
#include <unistd.h>
#include <mpi.h>
//#include <slurm/slurm.h>
#include <signal.h>
#include "../IOcodes/results.h"
#include "malleabilityZombies.h"
......
......@@ -7,7 +7,6 @@
#include <fcntl.h>
#include <unistd.h>
#include <mpi.h>
//#include <slurm/slurm.h>
#include <signal.h>
void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int root, void *results_void);
......
......@@ -5,9 +5,9 @@
#include <unistd.h>
#include <string.h>
#include <mpi.h>
#include <slurm/slurm.h>
#include "ProcessDist.h"
#define USE_SLURM
//--------------PRIVATE DECLARATIONS---------------//
......@@ -15,16 +15,24 @@ void node_dist( struct physical_dist dist, int **qty, int *used_nodes);
void spread_dist(struct physical_dist dist, int *used_nodes, int *procs);
void compact_dist(struct physical_dist dist, int *used_nodes, int *procs);
void generate_info_string(char *nodelist, int *procs_array, size_t nodes, MPI_Info *info);
void fill_str_hostfile(char *nodelist, int *qty, size_t used_nodes, char **hostfile_str);
int write_str_node(char **hostfile_str, size_t len_og, size_t qty, char *node_name);
void generate_info_string(int target_qty, MPI_Info *info);
//--------------------------------SLURM USAGE-------------------------------------//
#ifdef USE_SLURM
#include <slurm/slurm.h>
void generate_info_string_slurm(char *nodelist, int *procs_array, size_t nodes, MPI_Info *info);
void fill_str_hosts_slurm(char *nodelist, int *qty, size_t used_nodes, char **hostfile_str);
//@deprecated functions
void generate_info_hostfile_slurm(char *nodelist, int *procs_array, int nodes, MPI_Info *info);
void fill_hostfile_slurm(char *nodelist, int ptr, int *qty, int used_nodes);
#endif
//--------------------------------SLURM USAGE-------------------------------------//
int write_str_node(char **hostfile_str, size_t len_og, size_t qty, char *node_name);
//@deprecated functions
void generate_info_hostfile(char *nodelist, int *procs_array, int nodes, MPI_Info *info);
int create_hostfile(char **file_name);
void fill_hostfile(char *nodelist, int ptr, int *qty, int used_nodes);
int write_hostfile_node(int ptr, int qty, char *node_name);
//--------------PUBLIC FUNCTIONS---------------//
/*
* Pone los datos para una estructura que guarda los parametros
* para realizar un mappeado de los procesos.
......@@ -69,23 +77,28 @@ int physical_struct_create(int target_qty, int already_created, int num_cpus, in
* a usar al crear los procesos.
*/
void processes_dist(struct physical_dist dist, MPI_Info *info_spawn) {
#ifdef USE_SLURM
int used_nodes=0;
int *procs_array;
// GET NEW DISTRIBUTION
node_dist(dist, &procs_array, &used_nodes);
switch(dist.info_type) {
case MALL_DIST_STRING:
generate_info_string(dist.nodelist, procs_array, (size_t) used_nodes, info_spawn);
generate_info_string_slurm(dist.nodelist, procs_array, (size_t) used_nodes, info_spawn);
break;
case MALL_DIST_HOSTFILE:
generate_info_hostfile(dist.nodelist, procs_array, used_nodes, info_spawn);
generate_info_hostfile_slurm(dist.nodelist, procs_array, used_nodes, info_spawn);
break;
}
free(procs_array);
#else
generate_info_string(dist.target_qty, info_spawn);
#endif
}
//--------------PRIVATE FUNCTIONS---------------//
//-----------------DISTRIBUTION-----------------//
/*
* Obtiene la distribucion fisica del grupo de procesos a crear, devolviendo
* cuantos nodos se van a utilizar y la cantidad de procesos que alojara cada
......@@ -184,15 +197,43 @@ void compact_dist(struct physical_dist dist, int *used_nodes, int *procs) {
if(*used_nodes > dist.num_nodes) *used_nodes = dist.num_nodes; //FIXME Si ocurre esto no es un error?
}
//--------------PRIVATE FUNCTIONS---------------//
//-------------------INFO SET-------------------//
/*
* Crea y devuelve un objeto MPI_Info con un par hosts/mapping
* en el que se indica el mappeado a utilizar en los nuevos
* procesos.
*
* Actualmente no considera que puedan haber varios nodos
* y lleva todos al mismo. Las funciones "generate_info_string_slurm"
* o "generate_info_hostfile_slurm" permiten utilizar varios
* nodos, pero es necesario activar Slurm.
*/
void generate_info_string(int target_qty, MPI_Info *info){
char *host_string, host[9] = "localhost";
// CREATE AND SET STRING HOSTS
write_str_node(&host_string, 0, (size_t)target_qty, host);
// SET MAPPING
MPI_Info_create(info);
MPI_Info_set(*info, "hosts", host_string);
free(host_string);
}
//--------------------------------SLURM USAGE-------------------------------------//
#ifdef USE_SLURM
/*
* Crea y devuelve un objeto MPI_Info con un par hosts/mapping
* en el que se indica el mappeado a utilizar en los nuevos
* procesos.
* Es necesario usar Slurm para usarlo.
*/
void generate_info_string(char *nodelist, int *procs_array, size_t nodes, MPI_Info *info){
void generate_info_string_slurm(char *nodelist, int *procs_array, size_t nodes, MPI_Info *info){
// CREATE AND SET STRING HOSTS
char *hoststring;
fill_str_hostfile(nodelist, procs_array, nodes, &hoststring);
fill_str_hosts_slurm(nodelist, procs_array, nodes, &hoststring);
MPI_Info_create(info);
MPI_Info_set(*info, "hosts", hoststring);
free(hoststring);
......@@ -203,7 +244,7 @@ void generate_info_string(char *nodelist, int *procs_array, size_t nodes, MPI_In
* Crea y devuelve una cadena para ser utilizada por la llave "hosts"
* al crear procesos e indicar donde tienen que ser creados.
*/
void fill_str_hostfile(char *nodelist, int *qty, size_t used_nodes, char **hostfile_str) {
void fill_str_hosts_slurm(char *nodelist, int *qty, size_t used_nodes, char **hostfile_str) {
char *host;
size_t i=0,len=0;
hostlist_t hostlist;
......@@ -219,6 +260,8 @@ void fill_str_hostfile(char *nodelist, int *qty, size_t used_nodes, char **hostf
slurm_hostlist_destroy(hostlist);
}
#endif
//--------------------------------SLURM USAGE-------------------------------------//
/*
* Añade en una cadena "qty" entradas de "node_name".
* Realiza la reserva de memoria y la realoja si es necesario.
......@@ -263,12 +306,14 @@ int write_str_node(char **hostfile_str, size_t len_og, size_t qty, char *node_na
//====================================================
//====================================================
//--------------------------------SLURM USAGE-------------------------------------//
#ifdef USE_SLURM
/* FIXME Por revisar
* @deprecated
* Genera un fichero hostfile y lo anyade a un objeto
* MPI_Info para ser utilizado.
*/
void generate_info_hostfile(char *nodelist, int *procs_array, int nodes, MPI_Info *info){
void generate_info_hostfile_slurm(char *nodelist, int *procs_array, int nodes, MPI_Info *info){
char *hostfile;
int ptr;
......@@ -279,7 +324,7 @@ void generate_info_hostfile(char *nodelist, int *procs_array, int nodes, MPI_Inf
free(hostfile);
// SET NEW DISTRIBUTION
fill_hostfile(nodelist, ptr, procs_array, nodes);
fill_hostfile_slurm(nodelist, ptr, procs_array, nodes);
close(ptr);
}
......@@ -316,7 +361,7 @@ int create_hostfile(char **file_name) {
* de los nodos a utilizar indicados por "job_record" y la cantidad
* de procesos que alojara cada nodo indicado por "qty".
*/
void fill_hostfile(char *nodelist, int ptr, int *qty, int nodes) {
void fill_hostfile_slurm(char *nodelist, int ptr, int *qty, int nodes) {
int i=0;
char *host;
hostlist_t hostlist;
......@@ -359,6 +404,8 @@ int write_hostfile_node(int ptr, int qty, char *node_name) {
return 0;
}
#endif
//--------------------------------SLURM USAGE-------------------------------------//
//TODO REFACTOR PARA CUANDO SE COMUNIQUE CON RMS
......
[general]
R=1
S=4
Total_Resizes=0
Total_Stages=4
Granularity=100000
SDR=0.0
SDR=1000.0
ADR=0.0
AT=0
SM=1
SS=2
; end [general]
[stage0]
PT=0
bytes=0
t_stage=0.01235
Stage_Type=0
Stage_Bytes=0
Stage_Time=0.01235
;end [stage0]
[stage1]
PT=6
bytes=8
t_stage=0.1
Stage_Type=3
Stage_Bytes=0
Stage_Time=0.03
;end [stage1]
[stage2]
PT=6
bytes=8
t_stage=0.1
Stage_Type=3
Stage_Bytes=10000000
Stage_Time=0
;end [stage2]
[stage3]
PT=4
bytes=33176880
t_stage=0.040449
Stage_Type=4
Stage_Bytes=33176880
Stage_Time=0.040449
;end [stage3]
[resize0]
Iters=1
Procs=8
Iters=20
Procs=2
FactorS=1
Dist=compact
Asynch_Redistribution_Type=3
Spawn_Method=0
Spawn_Strategy=1
;end [resize0]
[resize1]
Iters=30
Procs=2
FactorS=0.1
Dist=compact
Asynch_Redistribution_Type=3
Spawn_Method=0
Spawn_Strategy=1
;end [resize1]
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment