Commit 32a4ca93 authored by Iker Martín Álvarez's avatar Iker Martín Álvarez
Browse files

Merge branch 'malleability-refactor' into 'dev'

Malleability focus refactor of Proteo

See merge request martini/malleability_benchmark!5
parents f1511cb4 06573694
......@@ -115,9 +115,10 @@ def record_stage_line(lineS, dataG_it, stage):
# and stores them in the dataframe
# Is needed to indicate to which group refers
# the resize line
# Group 0: Iters=3, Procs=80, Factors=0.037500, Dist=2, RM=0, SM=0, RS=0, SS=0
def record_group_line(lineS, dataG_it, group):
array_groups = [G_enum.ITERS.value, G_enum.GROUPS.value, G_enum.FACTOR_S.value, G_enum.DIST.value, \
G_enum.RED_METHOD.value, G_enum.RED_STRATEGY.value, G_enum.SPAWN_METHOD.value, G_enum.SPAWN_STRATEGY.value]
G_enum.RED_METHOD.value, G_enum.SPAWN_METHOD.value, G_enum.RED_STRATEGY.value, G_enum.SPAWN_STRATEGY.value]
offset_lines = 2
for i in range(len(array_groups)):
value = get_value(lineS, i+offset_lines)
......@@ -275,9 +276,8 @@ dataG = []
for elem in lista:
f = open(elem, "r")
id_run = elem.split("_Global.out")[0].split(common_name)[1]
path_to_run = elem.split(common_name)[0]
lista_local = glob.glob(path_to_run + common_name + id_run + "_G*NP*.out")
id_run = elem.split("_Global.out")[0].split(common_name)[-1]
lista_local = glob.glob(BaseDir + common_name + id_run + "_G*NP*.out")
it,runs_in_file = read_global_file(f, dataG, it)
f.close()
......
This diff is collapsed.
This diff is collapsed.
......@@ -3,10 +3,11 @@
#include <string.h>
#include "read_ini.h"
#include "ini.h"
#include "../malleability/spawn_methods/ProcessDist.h"
#include "../malleability/MAM.h"
ext_functions_t *user_functions;
void get_numbers_from_string(const char *input, size_t *res_len, int **res);
/*
* Funcion utilizada para leer el fichero de configuracion
......@@ -18,6 +19,8 @@ ext_functions_t *user_functions;
static int handler(void* user, const char* section, const char* name,
const char* value) {
int ret_value=1;
int *aux;
size_t aux_len;
configuration* pconfig = (configuration*)user;
if(pconfig->actual_group >= pconfig->n_groups && pconfig->actual_stage >= pconfig->n_stages) {
......@@ -47,6 +50,8 @@ static int handler(void* user, const char* section, const char* name,
pconfig->adr = strtoul(value, NULL, 10);
} else if (MATCH("general", "Rigid")) {
pconfig->rigid_times = atoi(value);
} else if (MATCH("general", "Capture_Method")) {
pconfig->capture_method = atoi(value);
// Iter stage
} else if (MATCH(stage_name, "Stage_Type") && LAST(pconfig->actual_stage, pconfig->n_stages)) {
......@@ -55,6 +60,8 @@ static int handler(void* user, const char* section, const char* name,
pconfig->stages[pconfig->actual_stage].t_capped = atoi(value);
} else if (MATCH(stage_name, "Stage_Bytes") && LAST(pconfig->actual_stage, pconfig->n_stages)) {
pconfig->stages[pconfig->actual_stage].bytes = atoi(value);
} else if (MATCH(stage_name, "Stage_Identifier") && LAST(pconfig->actual_stage, pconfig->n_stages)) {
pconfig->stages[pconfig->actual_stage].id = atoi(value);
} else if (MATCH(stage_name, "Stage_Time") && LAST(pconfig->actual_stage, pconfig->n_stages)) {
pconfig->stages[pconfig->actual_stage].t_stage = (float) atof(value);
pconfig->actual_stage = pconfig->actual_stage+1; // Ultimo elemento del grupo
......@@ -67,19 +74,23 @@ static int handler(void* user, const char* section, const char* name,
} else if (MATCH(resize_name, "FactorS") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].factor =(float) atof(value);
} else if (MATCH(resize_name, "Dist") && LAST(pconfig->actual_group, pconfig->n_groups)) {
int aux_value = MALL_DIST_COMPACT;
int aux_value = MAM_PHY_DIST_COMPACT;
if (strcmp(value, "spread") == 0) {
aux_value = MALL_DIST_SPREAD;
aux_value = MAM_PHY_DIST_SPREAD;
}
pconfig->groups[pconfig->actual_group].phy_dist = aux_value;
} else if (MATCH(resize_name, "Redistribution_Method") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].rm = atoi(value);
} else if (MATCH(resize_name, "Redistribution_Strategy") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].rs = atoi(value);
get_numbers_from_string(value, &aux_len, &aux);
pconfig->groups[pconfig->actual_group].rs = aux;
pconfig->groups[pconfig->actual_group].rs_len = aux_len;
} else if (MATCH(resize_name, "Spawn_Method") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].sm = atoi(value);
} else if (MATCH(resize_name, "Spawn_Strategy") && LAST(pconfig->actual_group, pconfig->n_groups)) {
pconfig->groups[pconfig->actual_group].ss = atoi(value);
get_numbers_from_string(value, &aux_len, &aux);
pconfig->groups[pconfig->actual_group].ss = aux;
pconfig->groups[pconfig->actual_group].ss_len = aux_len;
pconfig->actual_group = pconfig->actual_group+1; // Ultimo elemento de la estructura
// Unkown case
......@@ -92,6 +103,50 @@ static int handler(void* user, const char* section, const char* name,
return ret_value;
}
/**
* @brief Extracts numbers from a comma-separated string and stores them in an array.
*
* This function takes a string containing a sequence of numbers separated by commas,
* converts each number to an integer, and stores them in a dynamically allocated array.
*
* @param input The input string containing comma-separated numbers.
* @param res_len Pointer to an integer that will hold the length of the resulting array.
* Note: Null can be passed if the caller does not need it.
* @param res Pointer to an integer array where the extracted numbers will be stored.
* Note: The memory for this array is dynamically allocated and should be freed by the caller.
*/
void get_numbers_from_string(const char *input, size_t *res_len, int **res) {
char *aux, *token;
int num;
size_t len, malloc_len;
len = 0;
malloc_len = 10;
*res = (int *) malloc(malloc_len * sizeof(int));
aux = (char *) malloc((strlen(input)+1) * sizeof(char));
strcpy(aux, input);
token = strtok(aux, ",");
while (token != NULL) {
num = atoi(token);
if(len == malloc_len) {
malloc_len += 10;
*res = (int *) realloc(*res, malloc_len * sizeof(int));
}
(*res)[len] = num;
len++;
token = strtok(NULL, ",");
}
if(res_len != NULL) *res_len = len;
if(len != malloc_len) {
*res = (int *) realloc(*res, len * sizeof(int));
}
free(aux);
}
/*
* Crea y devuelve una estructura de configuracion a traves
* de un nombre de fichero dado.
......@@ -107,6 +162,8 @@ configuration *read_ini_file(char *file_name, ext_functions_t init_functions) {
printf("Error when reserving configuration structure\n");
return NULL;
}
config->capture_method = 0;
config->rigid_times = 0;
config->n_resizes = 0;
config->n_groups = 1;
config->n_stages = 1;
......
......@@ -7,6 +7,11 @@
void def_results_type(results_data *results, int resizes, MPI_Datatype *results_type);
void compute_max(results_data *results, double *computed_array, int myId, int root, MPI_Comm comm);
void compute_mean(results_data *results, double *computed_array, int myId, int numP, int root, MPI_Comm comm);
void compute_median(results_data *results, double *computed_array, size_t *used_ids, int myId, int numP, int root, MPI_Comm comm);
void match_median(results_data *results, double *computed_array, size_t *used_ids, int myId, int numP, int root, MPI_Comm comm);
//======================================================||
//======================================================||
//================MPI RESULTS FUNCTIONS=================||
......@@ -22,7 +27,7 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
* de ese grupo el valor "MPI_PROC_NULL". Los procesos del otro grupo tienen que
* indicar el Id del proceso raiz que ha puesto "MPI_ROOT".
*/
void comm_results(results_data *results, int root, size_t resizes, MPI_Comm intercomm) {
void results_comm(results_data *results, int root, size_t resizes, MPI_Comm intercomm) {
MPI_Datatype results_type;
// Obtener un tipo derivado para enviar todos los
......@@ -42,13 +47,13 @@ void comm_results(results_data *results, int root, size_t resizes, MPI_Comm inte
*/
void def_results_type(results_data *results, int resizes, MPI_Datatype *results_type) {
int i, counts = 7;
int blocklengths[] = {1, 1, 1, 1, 1, 1, 1};
int blocklengths[] = {1, 1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype types[counts];
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = types[6] = MPI_DOUBLE;
blocklengths[2] = blocklengths[3] = blocklengths[4] = blocklengths[5] = blocklengths[6] = resizes;
blocklengths[2] = blocklengths[3] = blocklengths[4] = blocklengths[5] = blocklengths[6] = resizes;
// Rellenar vector displs
MPI_Get_address(results, &dir);
......@@ -57,7 +62,7 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
MPI_Get_address(&(results->wasted_time), &displs[1]);
MPI_Get_address(results->sync_time, &displs[2]);
MPI_Get_address(results->async_time, &displs[3]);
MPI_Get_address(results->spawn_real_time, &displs[4]);
MPI_Get_address(results->user_time, &displs[4]);
MPI_Get_address(results->spawn_time, &displs[5]);
MPI_Get_address(results->malleability_time, &displs[6]);
......@@ -66,31 +71,13 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
MPI_Type_create_struct(counts, blocklengths, displs, types, results_type);
MPI_Type_commit(results_type);
}
//======================================================||
//======================================================||
//================SET RESULTS FUNCTIONS=================||
//======================================================||
//======================================================||
/*
* Guarda los resultados respecto a la redistribución de datos
* tras una reconfiguración. A llamar por los hijos tras
* terminar la redistribución y obtener la configuración.
*/
void set_results_post_reconfig(results_data *results, int grp, int sdr, int adr) {
if(sdr) { // Si no hay datos sincronos, el tiempo es 0
results->sync_time[grp-1] = results->sync_end - results->sync_time[grp-1];
} else {
results->sync_time[grp-1] = 0;
}
if(adr) { // Si no hay datos asincronos, el tiempo es 0
results->async_time[grp-1] = results->async_end - results->async_time[grp-1];
} else {
results->async_time[grp-1] = 0;
}
results->malleability_time[grp-1] = results->malleability_end - results->malleability_time[grp-1];
}
/*
* Pone el indice del siguiente elemento a escribir a 0 para los vectores
* que tengan que ver con las iteraciones.
......@@ -105,13 +92,6 @@ void reset_results_index(results_data *results) {
results->iters_async = 0;
}
//=============================================================== FIXME BORRAR?
int compare(const void *_a, const void *_b) {
double *a, *b;
a = (double *) _a;
b = (double *) _b;
return (*a - *b);
}
/*
* Obtiene para cada iteracion, el tiempo maximo entre todos los procesos
* que han participado.
......@@ -119,64 +99,116 @@ int compare(const void *_a, const void *_b) {
* Es necesario obtener el maximo, pues es el que representa el tiempo real
* que se ha utilizado.
*/
void compute_results_iter(results_data *results, int myId, int numP, int root, MPI_Comm comm) { //TODO Probar a quedarse la MEDIA en vez de MAX?
void compute_results_iter(results_data *results, int myId, int numP, int root, size_t stages, int capture_method, MPI_Comm comm) {
size_t i, *used_ids;
switch(capture_method) {
case RESULTS_MAX:
compute_max(results, results->iters_time, myId, root, comm);
for(i=0; i<stages; i++) {
compute_max(results, results->stage_times[i], myId, root, comm);
}
break;
case RESULTS_MEAN:
compute_mean(results, results->iters_time, myId, numP, root, comm);
for(i=0; i<stages; i++) {
compute_mean(results, results->stage_times[i], myId, numP, root, comm);
}
break;
case RESULTS_MEDIAN:
used_ids = malloc(results->iter_index * sizeof(size_t));
compute_median(results, results->iters_time, used_ids, myId, numP, root, comm);
for(i=0; i<stages; i++) {
//compute_median(results, results->stage_times[i], myId, numP, root, comm);
match_median(results, results->stage_times[i], used_ids, myId, numP, root, comm);
}
free(used_ids);
break;
}
}
void compute_max(results_data *results, double *computed_array, int myId, int root, MPI_Comm comm) {
if(myId == root) {
MPI_Reduce(MPI_IN_PLACE, computed_array, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
} else {
MPI_Reduce(computed_array, NULL, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
}
}
void compute_mean(results_data *results, double *computed_array, int myId, int numP, int root, MPI_Comm comm) {
if(myId == root) {
MPI_Reduce(MPI_IN_PLACE, results->iters_time, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
/*
MPI_Reduce(MPI_IN_PLACE, computed_array, results->iter_index, MPI_DOUBLE, MPI_SUM, root, comm);
for(size_t i=0; i<results->iter_index; i++) {
results->iters_time[i] = results->iters_time[i] / numP;
}*/
computed_array[i] = results->iters_time[i] / numP;
}
} else {
MPI_Reduce(results->iters_time, NULL, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
MPI_Reduce(computed_array, NULL, results->iter_index, MPI_DOUBLE, MPI_SUM, root, comm);
}
/*
double *aux_all_iters, *aux_id_iters, median;
}
struct TimeWithIndex {
double time;
size_t index;
};
int compare(const void *a, const void *b) {
return ((struct TimeWithIndex *)a)->time - ((struct TimeWithIndex *)b)->time;
}
/*
* Calcula la mediana de un vector de tiempos replicado entre "numP" procesos.
* Se calcula la mediana para cada elemento del vector final y se devuelve este.
*
* Además se devuelve en el vector "used_ids" de que proceso se ha obtenido la mediana de cada elemento.
*/
void compute_median(results_data *results, double *computed_array, size_t *used_ids, int myId, int numP, int root, MPI_Comm comm) {
double *aux_all_iters, median;
struct TimeWithIndex *aux_id_iters;
if(myId == root) {
aux_all_iters = malloc(numP *results->iter_index * sizeof(double));
aux_id_iters = malloc(numP * sizeof(struct TimeWithIndex));
}
MPI_Gather(results->iters_time, results->iter_index, MPI_DOUBLE, aux_all_iters, results->iter_index, MPI_DOUBLE, root, comm);
MPI_Gather(computed_array, results->iter_index, MPI_DOUBLE, aux_all_iters, results->iter_index, MPI_DOUBLE, root, comm);
if(myId == root) {
aux_id_iters = malloc(numP * sizeof(double));
for(size_t i=0; i<results->iter_index; i++) {
for(int j=0; j<numP; j++) {
aux_id_iters[j] = aux_all_iters[i+(results->iter_index*j)];
aux_id_iters[j].time = aux_all_iters[i+(results->iter_index*j)];
aux_id_iters[j].index = (size_t) j;
}
// Get Median
qsort(aux_id_iters, numP, sizeof(double), &compare);
median = aux_id_iters[numP/2];
if (numP % 2 == 0) median = (aux_id_iters[numP/2 - 1] + aux_id_iters[numP/2]) / 2;
results->iters_time[i] = median;
qsort(aux_id_iters, numP, sizeof(struct TimeWithIndex), &compare);
median = aux_id_iters[numP/2].time;
if (numP % 2 == 0) median = (aux_id_iters[numP/2 - 1].time + aux_id_iters[numP/2].time) / 2;
computed_array[i] = median;
used_ids[i] = aux_id_iters[numP/2].index; //FIXME What should be the index when numP is even?
}
free(aux_all_iters);
free(aux_id_iters);
}
*/
}
/*
* Obtiene para cada stage de cada iteracion, el tiempo maximo entre todos los procesos
* que han participado.
* Obtiene las medianas de un vector de tiempos replicado entre "numP" procesos.
* La mediana de cada elemento se obtiene consultando el vector "used_ids", que contiene
* que proceso tiene la mediana.
*
* Es necesario obtener el maximo, pues es el que representa el tiempo real
* que se ha utilizado.
* Como resultado devuelve un vector con la mediana calculada.
*/
void compute_results_stages(results_data *results, int myId, int numP, int root, int stages, MPI_Comm comm) { //TODO Probar a quedarse la MEDIA en vez de MAX?
int i;
void match_median(results_data *results, double *computed_array, size_t *used_ids, int myId, int numP, int root, MPI_Comm comm) {
double *aux_all_iters;
size_t matched_id;
if(myId == root) {
for(i=0; i<stages; i++) {
MPI_Reduce(MPI_IN_PLACE, results->stage_times[i], results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
/* for(size_t j=0; j<results->iter_index; j++) {
results->stage_times[i][j] = results->stage_times[i][j] / numP;
}*/
}
aux_all_iters = malloc(numP * results->iter_index * sizeof(double));
}
else {
for(i=0; i<stages; i++) {
MPI_Reduce(results->stage_times[i], NULL, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
MPI_Gather(computed_array, results->iter_index, MPI_DOUBLE, aux_all_iters, results->iter_index, MPI_DOUBLE, root, comm);
if(myId == root) {
for(size_t i=0; i<results->iter_index; i++) {
matched_id = used_ids[i];
computed_array[i] = aux_all_iters[i+(results->iter_index*matched_id)];
}
free(aux_all_iters);
}
//MPI_Barrier(comm); //FIXME Esto debería de borrarse
}
//======================================================||
......@@ -229,11 +261,6 @@ void print_global_results(results_data results, size_t resizes) {
printf("%lf ", results.spawn_time[i]);
}
printf("\nT_spawn_real: ");
for(i=0; i< resizes; i++) {
printf("%lf ", results.spawn_real_time[i]);
}
printf("\nT_SR: ");
for(i=0; i < resizes; i++) {
printf("%lf ", results.sync_time[i]);
......@@ -244,6 +271,11 @@ void print_global_results(results_data results, size_t resizes) {
printf("%lf ", results.async_time[i]);
}
printf("\nT_US: ");
for(i=0; i < resizes; i++) {
printf("%lf ", results.user_time[i]);
}
printf("\nT_Malleability: ");
for(i=0; i < resizes; i++) {
printf("%lf ", results.malleability_time[i]);
......@@ -268,9 +300,9 @@ void init_results_data(results_data *results, size_t resizes, size_t stages, siz
size_t i;
results->spawn_time = calloc(resizes, sizeof(double));
results->spawn_real_time = calloc(resizes, sizeof(double));
results->sync_time = calloc(resizes, sizeof(double));
results->async_time = calloc(resizes, sizeof(double));
results->user_time = calloc(resizes, sizeof(double));
results->malleability_time = calloc(resizes, sizeof(double));
results->wasted_time = 0;
......@@ -320,10 +352,6 @@ void free_results_data(results_data *results, size_t stages) {
free(results->spawn_time);
results->spawn_time = NULL;
}
if(results->spawn_real_time != NULL) {
free(results->spawn_real_time);
results->spawn_real_time = NULL;
}
if(results->sync_time != NULL) {
free(results->sync_time);
results->sync_time = NULL;
......@@ -332,6 +360,10 @@ void free_results_data(results_data *results, size_t stages) {
free(results->async_time);
results->async_time = NULL;
}
if(results->user_time != NULL) {
free(results->user_time);
results->user_time = NULL;
}
if(results->malleability_time != NULL) {
free(results->malleability_time);
results->malleability_time = NULL;
......
......@@ -7,27 +7,26 @@
#define RESULTS_INIT_DATA_QTY 100
enum capture_methods{RESULTS_MAX, RESULTS_MEAN, RESULTS_MEDIAN};
typedef struct {
// Iters data
double *iters_time, **stage_times;
size_t iters_async, iter_index, iters_size;
// Spawn, Thread, Sync, Async and Exec time
double spawn_start, *spawn_time, *spawn_real_time;
double spawn_start, *spawn_time;
double sync_end, *sync_time;
double async_end, *async_time;
double user_end, *user_time;
double malleability_end, *malleability_time;
double exec_start, exec_time;
double wasted_time; // Time spent recalculating iter stages
} results_data;
void comm_results(results_data *results, int root, size_t resizes, MPI_Comm intercomm);
void results_comm(results_data *results, int root, size_t resizes, MPI_Comm intercomm);
void set_results_post_reconfig(results_data *results, int grp, int sdr, int adr);
void reset_results_index(results_data *results);
void compute_results_iter(results_data *results, int myId, int numP, int root, MPI_Comm comm);
void compute_results_stages(results_data *results, int myId, int numP, int root, int n_stages, MPI_Comm comm);
void compute_results_iter(results_data *results, int myId, int numP, int root, size_t n_stages, int capture_method, MPI_Comm comm);
void print_iter_results(results_data results);
void print_stage_results(results_data results, size_t n_stages);
......
......@@ -8,9 +8,8 @@
#include "Main_datatypes.h"
#include "configuration.h"
#include "../IOcodes/results.h"
#include "../malleability/CommDist.h"
#include "../malleability/malleabilityManager.h"
#include "../malleability/malleabilityStates.h"
#include "../malleability/distribution_methods/Distributed_CommDist.h"
#include "../malleability/MAM.h"
#define DR_MAX_SIZE 1000000000
......@@ -23,16 +22,23 @@ void init_group_struct(char *argv[], int argc, int myId, int numP);
void init_application();
void obtain_op_times();
void free_application_data();
void free_zombie_process();
void print_general_info(int myId, int grp, int numP);
int print_local_results();
int print_final_results();
int create_out_file(char *nombre, int *ptr, int newstdout);
void init_originals();
void init_targets();
void update_targets();
void user_redistribution(void *args);
configuration *config_file;
group_data *group;
results_data *results;
MPI_Comm comm;
MPI_Comm comm, new_comm;
int run_id = 0; // Utilizado para diferenciar más fácilmente ejecuciones en el análisis
int main(int argc, char *argv[]) {
......@@ -41,19 +47,11 @@ int main(int argc, char *argv[]) {
int im_child;
size_t i;
int num_cpus, num_nodes;
char *nodelist = NULL;
num_cpus = 20; //FIXME NUMERO MAGICO //TODO Usar openMP para obtener el valor con un pragma
if (argc >= 5) {
nodelist = argv[3];
num_nodes = atoi(argv[4]);
num_cpus = num_nodes * num_cpus;
}
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &req);
MPI_Comm_rank(MPI_COMM_WORLD, &myId);
MPI_Comm_size(MPI_COMM_WORLD, &numP);
comm = MPI_COMM_WORLD;
new_comm = MPI_COMM_NULL;
if(req != MPI_THREAD_MULTIPLE) {
printf("No se ha obtenido la configuración de hilos necesaria\nSolicitada %d -- Devuelta %d\n", req, MPI_THREAD_MULTIPLE);
......@@ -62,129 +60,73 @@ int main(int argc, char *argv[]) {
}
init_group_struct(argv, argc, myId, numP);
im_child = init_malleability(myId, numP, ROOT, comm, argv[0], nodelist, num_cpus, num_nodes);
if(!im_child) { //TODO REFACTOR Simplificar inicio
init_application();
set_benchmark_grp(group->grp);
set_benchmark_configuration(config_file);
set_benchmark_results(results);
im_child = MAM_Init(ROOT, &comm, argv[0], user_redistribution, NULL);
if(config_file->n_groups > 1) {
set_malleability_configuration(config_file->groups[group->grp+1].sm, config_file->groups[group->grp+1].ss,
config_file->groups[group->grp+1].phy_dist, config_file->groups[group->grp+1].rm, config_file->groups[group->grp+1].rs);
set_children_number(config_file->groups[group->grp+1].procs); // TODO TO BE DEPRECATED
//MAM_Use_valgrind(1);
malleability_add_data(&(group->grp), 1, MAL_INT, 1, 1);
malleability_add_data(&run_id, 1, MAL_INT, 1, 1);
malleability_add_data(&(group->iter_start), 1, MAL_INT, 1, 1);
if(im_child) {
update_targets();
if(config_file->sdr) {
for(i=0; i<group->sync_data_groups; i++) {
malleability_add_data(group->sync_array[i], group->sync_qty[i], MAL_CHAR, 0, 1);
}
}
if(config_file->adr) {
for(i=0; i<group->async_data_groups; i++) {
malleability_add_data(group->async_array[i], group->async_qty[i], MAL_CHAR, 0, 0);
}
}
}
} else {
init_application();
init_originals();
MPI_Barrier(comm);
results->exec_start = MPI_Wtime();
} else { //Init hijos
get_malleability_user_comm(&comm);
get_benchmark_configuration(&config_file);
get_benchmark_results(&results);
// TODO Refactor - Que sea una unica funcion
// Obtiene las variables que van a utilizar los hijos
void *value = NULL;
size_t entries;
malleability_get_data(&value, 0, 1, 1);
group->grp = *((int *)value);
malleability_get_data(&value, 1, 1, 1);
run_id = *((int *)value);
malleability_get_data(&value, 2, 1, 1);
group->iter_start = *((int *)value);
if(config_file->sdr) {
malleability_get_entries(&entries, 0, 1);
group->sync_array = (char **) malloc(entries * sizeof(char *));
for(i=0; i<entries; i++) {
malleability_get_data(&value, i, 0, 1);
group->sync_array[i] = (char *)value;
}
}
if(config_file->adr) {
malleability_get_entries(&entries, 0, 0);
group->async_array = (char **) malloc(entries * sizeof(char *));
for(i=0; i<entries; i++) {
malleability_get_data(&value, i, 0, 0);
group->async_array[i] = (char *)value;
}
}
group->grp = group->grp + 1;
realloc_results_iters(results, config_file->n_stages, config_file->groups[group->grp].iters);
}
//
// EMPIEZA LA EJECUCION-------------------------------
//
group->grp = group->grp - 1; // TODO REFACTOR???
do {
get_malleability_user_comm(&comm);
MPI_Comm_size(comm, &(group->numP));
MPI_Comm_rank(comm, &(group->myId));
group->grp = group->grp + 1;
set_benchmark_grp(group->grp);
if(group->grp != 0) {
obtain_op_times(0); //Obtener los nuevos valores de tiempo para el computo
set_results_post_reconfig(results, group->grp, config_file->sdr, config_file->adr);
MAM_Retrieve_times(&results->spawn_time[group->grp - 1], &results->sync_time[group->grp - 1], &results->async_time[group->grp - 1], &results->user_time[group->grp - 1], &results->malleability_time[group->grp - 1]);
}
if(config_file->n_groups != group->grp + 1) { //TODO Llevar a otra funcion
set_malleability_configuration(config_file->groups[group->grp+1].sm, config_file->groups[group->grp+1].ss,
config_file->groups[group->grp+1].phy_dist, config_file->groups[group->grp+1].rm, config_file->groups[group->grp+1].rs);
set_children_number(config_file->groups[group->grp+1].procs); // TODO TO BE DEPRECATED
MAM_Set_configuration(config_file->groups[group->grp+1].sm, MAM_STRAT_SPAWN_CLEAR,
config_file->groups[group->grp+1].phy_dist, config_file->groups[group->grp+1].rm, MAM_STRAT_RED_CLEAR);
for(i=0; i<config_file->groups[group->grp+1].ss_len; i++) {
MAM_Set_key_configuration(MAM_SPAWN_STRATEGIES, config_file->groups[group->grp+1].ss[i], &req);
}
for(i=0; i<config_file->groups[group->grp+1].rs_len; i++) {
MAM_Set_key_configuration(MAM_RED_STRATEGIES, config_file->groups[group->grp+1].rs[i], &req);
}
MAM_Set_target_number(config_file->groups[group->grp+1].procs); // TODO TO BE DEPRECATED
if(group->grp != 0) {
malleability_modify_data(&(group->grp), 0, 1, MAL_INT, 1, 1);
MAM_Data_modify(&(group->grp), 0, 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
MAM_Data_modify(&(group->iter_start), 0, 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_VARIABLE);
}
}
res = work();
if(res == MALL_ZOMBIE) break;
if(res==1) { // Se ha llegado al final de la aplicacion
MPI_Barrier(comm);
results->exec_time = MPI_Wtime() - results->exec_start - results->wasted_time;
print_local_results();
}
print_local_results();
reset_results_index(results);
} while(config_file->n_groups > group->grp + 1 && config_file->groups[group->grp+1].sm == MALL_SPAWN_MERGE);
group->grp = group->grp + 1;
} while(config_file->n_groups > group->grp);
//
// TERMINA LA EJECUCION ----------------------------------------------------------
//
//
print_final_results(); // Pasado este punto ya no pueden escribir los procesos
MPI_Barrier(comm);
if(comm != MPI_COMM_WORLD && comm != MPI_COMM_NULL) {
MPI_Comm_free(&comm);
}
if(group->myId == ROOT && config_file->groups[group->grp].sm == MALL_SPAWN_MERGE) {
MPI_Abort(MPI_COMM_WORLD, -100);
}
free_application_data();
MPI_Finalize();
......@@ -208,31 +150,31 @@ int main(int argc, char *argv[]) {
*/
int work() {
int iter, maxiter, state, res;
int wait_completed = MAM_CHECK_COMPLETION;
maxiter = config_file->groups[group->grp].iters;
state = MALL_NOT_STARTED;
state = MAM_NOT_STARTED;
res = 0;
for(iter=group->iter_start; iter < maxiter; iter++) {
iterate(state);
}
if(config_file->n_groups != group->grp + 1)
state = malleability_checkpoint();
MAM_Checkpoint(&state, wait_completed, user_redistribution, NULL);
iter = 0;
while(state == MALL_DIST_PENDING || state == MALL_SPAWN_PENDING || state == MALL_SPAWN_SINGLE_PENDING || state == MALL_SPAWN_ADAPT_POSTPONE || state == MALL_SPAWN_ADAPT_PENDING) {
while(state == MAM_PENDING || state == MAM_USER_PENDING) {
if(group->grp+1 < config_file->n_groups && iter < config_file->groups[group->grp+1].iters) {
iterate(state);
iter++;
group->iter_start = iter;
}
state = malleability_checkpoint();
} else { wait_completed = MAM_WAIT_COMPLETION; }
MAM_Checkpoint(&state, wait_completed, user_redistribution, NULL);
}
if(config_file->n_groups == group->grp + 1) res=1;
if(state == MALL_ZOMBIE) res=state;
//if(state == MAM_COMPLETED) {}
if(config_file->n_groups == group->grp + 1) { res=1; }
return res;
}
......@@ -263,8 +205,8 @@ double iterate(int async_comm) {
}
// Se esta realizando una redistribucion de datos asincrona
if(async_comm == MALL_DIST_PENDING || async_comm == MALL_SPAWN_PENDING || async_comm == MALL_SPAWN_SINGLE_PENDING) {
// TODO Que diferencie entre ambas en el IO
if(async_comm == MAM_PENDING || async_comm == MAM_USER_PENDING) {
// TODO Que diferencie entre tipo de partes asincronas?
results->iters_async += 1;
}
......@@ -318,9 +260,9 @@ double iterate_rigid(double *time, double *times_stages) {
start_time = MPI_Wtime();
for(i=0; i < config_file->n_stages; i++) {
MPI_Barrier(comm);
start_time_stage = MPI_Wtime();
aux+= process_stage(*config_file, config_file->stages[i], *group, comm);
MPI_Barrier(comm);
times_stages[i] = MPI_Wtime() - start_time_stage;
}
......@@ -345,7 +287,8 @@ void print_general_info(int myId, int grp, int numP) {
char *version = malloc(MPI_MAX_LIBRARY_VERSION_STRING * sizeof(char));
MPI_Get_processor_name(name, &len);
MPI_Get_library_version(version, &len);
printf("P%d Nuevo GRUPO %d de %d procs en nodo %s con %s\n", myId, grp, numP, name, version);
//printf("P%d Nuevo GRUPO %d de %d procs en nodo %s con %s\n", myId, grp, numP, name, version);
printf("P%d Nuevo GRUPO %d de %d procs en nodo %s -- PID=%d\n", myId, grp, numP, name, getpid());
free(name);
free(version);
......@@ -359,8 +302,8 @@ int print_local_results() {
int ptr_local, ptr_out, err;
char *file_name;
compute_results_iter(results, group->myId, group->numP, ROOT, comm);
compute_results_stages(results, group->myId, group->numP, config_file->n_stages, ROOT, comm);
// This function causes an overhead in the recorded time for last group
compute_results_iter(results, group->myId, group->numP, ROOT, config_file->n_stages, config_file->capture_method, comm);
if(group->myId == ROOT) {
ptr_out = dup(1);
......@@ -394,7 +337,7 @@ int print_final_results() {
if(group->myId == ROOT) {
if(config_file->n_groups == group->grp+1) {
if(config_file->n_groups == group->grp) {
file_name = NULL;
file_name = malloc(20 * sizeof(char));
if(file_name == NULL) return -1; // No ha sido posible alojar la memoria
......@@ -504,6 +447,7 @@ void obtain_op_times(int compute) {
* Libera toda la memoria asociada con la aplicacion
*/
void free_application_data() {
int abort_needed;
size_t i;
if(config_file->sdr && group->sync_array != NULL) {
......@@ -527,14 +471,33 @@ void free_application_data() {
free(group->async_array);
group->async_array = NULL;
}
free_malleability();
abort_needed = MAM_Finalize();
free_zombie_process();
free(group);
if(abort_needed) { MPI_Abort(MPI_COMM_WORLD, -100); }
}
/*
* Libera la memoria asociada a un proceso Zombie
*/
void free_zombie_process() {
free_results_data(results, config_file->n_stages);
free(results);
size_t i;
if(config_file->adr && group->async_array != NULL) {
for(i=0; i<group->async_data_groups; i++) {
free(group->async_array[i]);
group->async_array[i] = NULL;
}
free(group->async_qty);
group->async_qty = NULL;
free(group->async_array);
group->async_array = NULL;
}
free_config(config_file);
free(group);
}
......@@ -561,3 +524,107 @@ int create_out_file(char *nombre, int *ptr, int newstdout) {
return 0;
}
//======================================================||
//======================================================||
//================ INIT MALLEABILITY ===================||
//======================================================||
//======================================================||
//FIXME TENER EN CUENTA QUE ADR PUEDE SER 0
void init_originals() {
size_t i;
if(config_file->n_groups > 1) {
MAM_Data_add(&(group->grp), NULL, 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
MAM_Data_add(&run_id, NULL, 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
MAM_Data_add(&(group->iter_start), NULL, 1, MPI_INT, MAM_DATA_REPLICATED, MAM_DATA_VARIABLE);
if(config_file->sdr) {
for(i=0; i<group->sync_data_groups; i++) {
MAM_Data_add(group->sync_array[i], NULL, group->sync_qty[i], MPI_CHAR, MAM_DATA_DISTRIBUTED, MAM_DATA_VARIABLE);
}
}
if(config_file->adr) {
for(i=0; i<group->async_data_groups; i++) {
MAM_Data_add(group->async_array[i], NULL, group->async_qty[i], MPI_CHAR, MAM_DATA_DISTRIBUTED, MAM_DATA_CONSTANT);
}
}
}
}
void init_targets() {
size_t i, entries, total_qty;
void *value = NULL;
MPI_Datatype type;
MAM_Data_get_pointer(&value, 0, &total_qty, &type, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
group->grp = *((int *)value);
group->grp = group->grp + 1;
recv_config_file(ROOT, new_comm, &config_file);
results = malloc(sizeof(results_data));
init_results_data(results, config_file->n_resizes, config_file->n_stages, config_file->groups[group->grp].iters);
results_comm(results, ROOT, config_file->n_resizes, new_comm);
MAM_Data_get_pointer(&value, 1, &total_qty, &type, MAM_DATA_REPLICATED, MAM_DATA_CONSTANT);
run_id = *((int *)value);
if(config_file->adr) {
MAM_Data_get_entries(MAM_DATA_DISTRIBUTED, MAM_DATA_CONSTANT, &entries);
group->async_qty = (int *) malloc(entries * sizeof(int));
group->async_array = (char **) malloc(entries * sizeof(char *));
for(i=0; i<entries; i++) {
MAM_Data_get_pointer(&value, i, &total_qty, &type, MAM_DATA_DISTRIBUTED, MAM_DATA_CONSTANT);
group->async_array[i] = (char *)value;
group->async_qty[i] = DR_MAX_SIZE;
}
group->async_qty[entries-1] = config_file->adr % DR_MAX_SIZE ? config_file->adr % DR_MAX_SIZE : DR_MAX_SIZE;
group->async_data_groups = entries;
}
}
void update_targets() { //FIXME Should not be needed after redist -- Declarar antes
size_t i, entries, total_qty;
void *value = NULL;
MPI_Datatype type;
MAM_Data_get_pointer(&value, 0, &total_qty, &type, MAM_DATA_REPLICATED, MAM_DATA_VARIABLE);
group->iter_start = *((int *)value);
if(config_file->sdr) {
MAM_Data_get_entries(MAM_DATA_DISTRIBUTED, MAM_DATA_VARIABLE, &entries);
group->sync_qty = (int *) malloc(entries * sizeof(int));
group->sync_array = (char **) malloc(entries * sizeof(char *));
for(i=0; i<entries; i++) {
MAM_Data_get_pointer(&value, i, &total_qty, &type, MAM_DATA_DISTRIBUTED, MAM_DATA_VARIABLE);
group->sync_array[i] = (char *)value;
group->sync_qty[i] = DR_MAX_SIZE;
}
group->sync_qty[entries-1] = config_file->sdr % DR_MAX_SIZE ? config_file->sdr % DR_MAX_SIZE : DR_MAX_SIZE;
group->sync_data_groups = entries;
}
}
void user_redistribution(void *args) {
int commited;
mam_user_reconf_t user_reconf;
MAM_Get_Reconf_Info(&user_reconf);
new_comm = user_reconf.comm;
if(user_reconf.rank_state == MAM_PROC_NEW_RANK) {
init_targets();
} else {
send_config_file(config_file, ROOT, new_comm);
results_comm(results, ROOT, config_file->n_resizes, new_comm);
print_local_results();
if(user_reconf.rank_state == MAM_PROC_ZOMBIE) {
free_zombie_process();
}
}
MAM_Resume_redistribution(&commited);
}
......@@ -28,7 +28,8 @@ typedef struct {
typedef struct
{
int pt; // Procedure type
int pt; // Procedure type to execute
int id; // Stage identifier
// Wether the stage completes after "operations" iterations (0)
// or after "t_stage" time has passed (1).
int t_capped;
......@@ -41,6 +42,9 @@ typedef struct
// Arrays to communicate data;
char* array, *full_array;
double* double_array;
int req_count;
MPI_Request *reqs;
// Arrays to indicate how many bytes are received from each rank
struct Counts counts;
......@@ -49,7 +53,9 @@ typedef struct
typedef struct
{
int iters, procs;
int sm, ss, phy_dist, rm, rs;
int sm, phy_dist, rm;
int *ss, *rs;
size_t ss_len, rs_len;
float factor;
} group_config_t;
......@@ -57,11 +63,11 @@ typedef struct
{
size_t n_groups, n_resizes, n_stages; // n_groups==n_resizes+1
size_t actual_group, actual_stage;
int rigid_times;
int rigid_times, capture_method;
int granularity;
size_t sdr, adr;
MPI_Datatype config_type, group_type, iter_stage_type;
MPI_Datatype config_type, group_type, group_strats_type, iter_stage_type;
iter_stage_t *stages;
group_config_t *groups;
} configuration;
......
......@@ -7,16 +7,17 @@
/*
* Realiza una multiplicación de matrices de tamaño n
*/
double computeMatrix(double *matrix, int n) { //FIXME No da tiempos repetibles
double computeMatrix(double *matrix, int n) {
int row, col;
double aux;
aux=0;
for(row=0; row<n; row++) {
for(col=0; col<n; col++) {
aux += ( (int)(matrix[row*n + col] + exp(sqrt(row*col))) % n);
aux += (int)(matrix[row*n + col] * matrix[row*n + col]);
}
}
return aux;
}
......@@ -42,20 +43,25 @@ double computePiSerial(int n) {
*/
void initMatrix(double **matrix, size_t n) {
size_t i, j;
double *aux = NULL;
freeMatrix(matrix);
// Init matrix
if(matrix != NULL) {
*matrix = malloc(n * n * sizeof(double));
if(*matrix == NULL) { MPI_Abort(MPI_COMM_WORLD, -1);}
for(i=0; i < n; i++) {
for(j=0; j < n; j++) {
(*matrix)[i*n + j] = i+j;
}
aux = (double *) malloc(n * n * sizeof(double));
if(aux == NULL) { perror("Computing matrix could not be allocated"); MPI_Abort(MPI_COMM_WORLD, -1);}
for(i=0; i < n; i++) {
for(j=0; j < n; j++) {
aux[i*n + j] = (i+j) * 1.1;
}
}
*matrix = aux;
}
void freeMatrix(double **matrix) {
// Init matrix
if(*matrix != NULL) {
......
......@@ -21,15 +21,20 @@ void point_to_point(int myId, int numP, int root, MPI_Comm comm, char *array, in
}
}
void point_to_point_inter(int myId, int numP, MPI_Comm comm, char *array, int qty) {
void point_to_point_inter(int myId, int numP, MPI_Comm comm, char *array, char *r_array, int qty) {
int target;
target = (myId + numP/2)%numP;
MPI_Sendrecv(array, qty, MPI_CHAR, target, 99, r_array, qty, MPI_CHAR, target, 99, comm, MPI_STATUS_IGNORE);
}
void point_to_point_asynch_inter(int myId, int numP, MPI_Comm comm, char *array, char *r_array, int qty, MPI_Request *reqs) {
int target;
target = (myId + numP/2)%numP;
if(myId < numP/2) {
MPI_Send(array, qty, MPI_CHAR, target, 99, comm);
//MPI_Recv(array, qty, MPI_CHAR, target, 99, comm, MPI_STATUS_IGNORE);
MPI_Isend(array, qty, MPI_CHAR, target, 99, comm, &(reqs[0]));
MPI_Irecv(r_array, qty, MPI_CHAR, target, 99, comm, &(reqs[1]));
} else {
MPI_Recv(array, qty, MPI_CHAR, target, 99, comm, MPI_STATUS_IGNORE);
//MPI_Send(array, qty, MPI_CHAR, target, 99, comm);
MPI_Irecv(r_array, qty, MPI_CHAR, target, 99, comm, &(reqs[0]));
MPI_Isend(array, qty, MPI_CHAR, target, 99, comm, &(reqs[1]));
}
}
......@@ -7,6 +7,8 @@
void point_to_point(int myId, int numP, int root, MPI_Comm comm, char *array, int qty);
void point_to_point_inter(int myId, int numP, MPI_Comm comm, char *array, int qty);
void point_to_point_inter(int myId, int numP, MPI_Comm comm, char *array, char *r_array, int qty);
void point_to_point_asynch_inter(int myId, int numP, MPI_Comm comm, char *array, char *r_array, int qty, MPI_Request *reqs);
#endif
......@@ -4,14 +4,16 @@
#include <mpi.h>
#include "../IOcodes/read_ini.h"
#include "configuration.h"
#include "../malleability/spawn_methods/ProcessDist.h"
#include "../malleability/distribution_methods/block_distribution.h"
void malloc_config_resizes(configuration *user_config);
void malloc_config_stages(configuration *user_config);
void free_config_stage(iter_stage_t *stage, int *freed_ids, size_t *found_ids);
void def_struct_config_file(configuration *config_file);
void def_struct_groups(configuration *config_file);
void def_struct_groups_strategies(configuration *config_file);
void def_struct_iter_stage(configuration *config_file);
/*
......@@ -48,6 +50,7 @@ void init_config(char *file_name, configuration **user_config) {
*user_config=config;
}
def_struct_config_file(*user_config);
def_struct_groups_strategies(*user_config);
}
/*
......@@ -69,10 +72,12 @@ void malloc_config_resizes(configuration *user_config) {
user_config->groups[i].iters = 0;
user_config->groups[i].procs = 1;
user_config->groups[i].sm = 0;
user_config->groups[i].ss = 1;
user_config->groups[i].ss = NULL;
user_config->groups[i].ss_len = 0;
user_config->groups[i].phy_dist = 0;
user_config->groups[i].rm = 0;
user_config->groups[i].rs = 1;
user_config->groups[i].rs = NULL;
user_config->groups[i].rs_len = 0;
user_config->groups[i].factor = 1;
}
def_struct_groups(user_config);
......@@ -96,12 +101,14 @@ void malloc_config_stages(configuration *user_config) {
user_config->stages[i].array = NULL;
user_config->stages[i].full_array = NULL;
user_config->stages[i].double_array = NULL;
user_config->stages[i].reqs = NULL;
user_config->stages[i].counts.counts = NULL;
user_config->stages[i].bytes = 0;
user_config->stages[i].my_bytes = 0;
user_config->stages[i].real_bytes = 0;
user_config->stages[i].operations = 0;
user_config->stages[i].pt = 0;
user_config->stages[i].id = -1;
user_config->stages[i].t_op = 0;
user_config->stages[i].t_stage = 0;
user_config->stages[i].t_capped = 0;
......@@ -115,25 +122,18 @@ void malloc_config_stages(configuration *user_config) {
* Libera toda la memoria de una estructura de configuracion
*/
void free_config(configuration *user_config) {
size_t i;
size_t i, found_ids;
int *freed_ids;
found_ids = 0;
if(user_config != NULL) {
freed_ids = (int *) malloc(user_config->n_stages * sizeof(int));
for(i=0; i < user_config->n_stages; i++) {
if(user_config->stages[i].array != NULL) {
free(user_config->stages[i].array);
user_config->stages[i].array = NULL;
}
if(user_config->stages[i].full_array != NULL) {
free(user_config->stages[i].full_array);
user_config->stages[i].full_array = NULL;
}
if(user_config->stages[i].double_array != NULL) {
free(user_config->stages[i].double_array);
user_config->stages[i].double_array = NULL;
}
if(user_config->stages[i].counts.counts != NULL) {
freeCounts(&(user_config->stages[i].counts));
}
free_config_stage(&(user_config->stages[i]), freed_ids, &found_ids);
}
for(i=0; i < user_config->n_groups; i++) {
free(user_config->groups[i].ss);
free(user_config->groups[i].rs);
}
//Liberar tipos derivados
MPI_Type_free(&(user_config->config_type));
......@@ -142,15 +142,67 @@ void free_config(configuration *user_config) {
MPI_Type_free(&(user_config->group_type));
user_config->group_type = MPI_DATATYPE_NULL;
MPI_Type_free(&(user_config->group_strats_type));
user_config->group_strats_type = MPI_DATATYPE_NULL;
MPI_Type_free(&(user_config->iter_stage_type));
user_config->iter_stage_type = MPI_DATATYPE_NULL;
free(user_config->groups);
free(user_config->stages);
free(user_config);
free(freed_ids);
}
}
/*
* Libera toda la memoria de una stage
*/
void free_config_stage(iter_stage_t *stage, int *freed_ids, size_t *found_ids) {
size_t i;
int mpi_index, free_reqs;
free_reqs = 1;
if(stage->id > -1) {
for(i=0; i<*found_ids; i++) {
if(stage->id == freed_ids[i]) {
free_reqs = 0;
break;
}
}
if(free_reqs) {
freed_ids[*found_ids] = stage->id;
*found_ids=*found_ids + 1;
}
}
if(stage->array != NULL) {
free(stage->array);
stage->array = NULL;
}
if(stage->full_array != NULL) {
free(stage->full_array);
stage->full_array = NULL;
}
if(stage->double_array != NULL) {
free(stage->double_array);
stage->double_array = NULL;
}
if(stage->reqs != NULL && free_reqs) {
for(mpi_index=0; mpi_index<stage->req_count; mpi_index++) {
if(stage->reqs[mpi_index] != MPI_REQUEST_NULL) {
MPI_Request_free(&(stage->reqs[mpi_index]));
stage->reqs[mpi_index] = MPI_REQUEST_NULL;
}
}
free(stage->reqs);
stage->reqs = NULL;
}
if(stage->counts.counts != NULL) {
freeCounts(&(stage->counts));
}
}
/*
* Imprime por salida estandar toda la informacion que contiene
......@@ -158,18 +210,27 @@ void free_config(configuration *user_config) {
*/
void print_config(configuration *user_config) {
if(user_config != NULL) {
size_t i;
printf("Config loaded: R=%zu, S=%zu, granularity=%d, SDR=%zu, ADR=%zu\n",
user_config->n_resizes, user_config->n_stages, user_config->granularity, user_config->sdr, user_config->adr);
size_t i, j;
printf("Config loaded: R=%zu, S=%zu, granularity=%d, SDR=%zu, ADR=%zu, Rigid=%d, Capture_Method=%d\n",
user_config->n_resizes, user_config->n_stages, user_config->granularity, user_config->sdr, user_config->adr, user_config->rigid_times, user_config->capture_method);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %zu: PT=%d, T_stage=%lf, bytes=%d, T_capped=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].t_capped);
}
for(i=0; i<user_config->n_groups; i++) {
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, RM=%d, RS=%d, SM=%d, SS=%d\n",
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, RM=%d, SM=%d",
i, user_config->groups[i].iters, user_config->groups[i].procs, user_config->groups[i].factor,
user_config->groups[i].phy_dist, user_config->groups[i].rm, user_config->groups[i].rs,
user_config->groups[i].sm, user_config->groups[i].ss);
user_config->groups[i].phy_dist, user_config->groups[i].rm, user_config->groups[i].sm);
printf(", RS=%d", user_config->groups[i].rs[0]);
for(j=1; j<user_config->groups[i].rs_len; j++) {
printf("/%d", user_config->groups[i].rs[j]);
}
printf(", SS=%d", user_config->groups[i].ss[0]);
for(j=1; j<user_config->groups[i].ss_len; j++) {
printf("/%d", user_config->groups[i].ss[j]);
}
printf("\n");
}
}
}
......@@ -191,16 +252,24 @@ void print_config_group(configuration *user_config, size_t grp) {
sons = user_config->groups[grp+1].procs;
}
printf("Config: granularity=%d, SDR=%zu, ADR=%zu\n",
user_config->granularity, user_config->sdr, user_config->adr);
printf("Config: granularity=%d, SDR=%zu, ADR=%zu, Rigid=%d, Capture_Method=%d\n",
user_config->granularity, user_config->sdr, user_config->adr, user_config->rigid_times, user_config->capture_method);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %zu: PT=%d, T_stage=%lf, bytes=%d, T_capped=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes, user_config->stages[i].t_capped);
}
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, RM=%d, RS=%d, SM=%d, SS=%d, parents=%d, children=%d\n",
grp, user_config->groups[grp].iters, user_config->groups[grp].procs, user_config->groups[grp].factor,
user_config->groups[grp].phy_dist, user_config->groups[grp].rm, user_config->groups[grp].rs,
user_config->groups[grp].sm, user_config->groups[grp].ss, parents, sons);
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, RM=%d, SM=%d", grp, user_config->groups[grp].iters, user_config->groups[grp].procs, user_config->groups[grp].factor,
user_config->groups[grp].phy_dist, user_config->groups[grp].rm, user_config->groups[grp].sm);
printf(", RS=%d", user_config->groups[grp].rs[0]);
for(i=1; i<user_config->groups[grp].rs_len; i++) {
printf("/%d", user_config->groups[grp].rs[i]);
}
printf(", SS=%d", user_config->groups[grp].ss[0]);
for(i=1; i<user_config->groups[grp].ss_len; i++) {
printf("/%d", user_config->groups[grp].ss[i]);
}
printf(", parents=%d, children=%d\n", parents, sons);
}
}
......@@ -221,8 +290,9 @@ void print_config_group(configuration *user_config, size_t grp) {
*/
void send_config_file(configuration *config_file, int root, MPI_Comm intercomm) {
MPI_Bcast(config_file, 1, config_file->config_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, config_file->iter_stage_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
MPI_Bcast(config_file->groups, 1, config_file->group_strats_type, root, intercomm);
}
......@@ -239,6 +309,7 @@ void send_config_file(configuration *config_file, int root, MPI_Comm intercomm)
* la funcion "free_config".
*/
void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_out) {
size_t i;
configuration *config_file = malloc(sizeof(configuration));
def_struct_config_file(config_file);
......@@ -246,29 +317,36 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
//Inicializado de estructuras internas
config_file->n_resizes = config_file->n_groups-1;
malloc_config_resizes(config_file); // Inicializar valores de grupos
malloc_config_stages(config_file); // Inicializar a NULL vectores stage
malloc_config_resizes(config_file); // Inicializar valores de grupos
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, config_file->iter_stage_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_groups, config_file->group_type, root, intercomm);
for(i=0; i<config_file->n_groups; i++) {
config_file->groups[i].ss = (int *) malloc(config_file->groups[i].ss_len * sizeof(int));
config_file->groups[i].rs = (int *) malloc(config_file->groups[i].rs_len * sizeof(int));
}
def_struct_groups_strategies(config_file); // Inicializar vectores de grupos
MPI_Bcast(config_file->groups, 1, config_file->group_strats_type, root, intercomm);
*config_file_out = config_file;
}
/*
* Tipo derivado para enviar 6 elementos especificos
* Tipo derivado para enviar 7 elementos especificos
* de la estructura de configuracion con una sola comunicacion.
*/
void def_struct_config_file(configuration *config_file) {
int i, counts = 6;
int blocklengths[6] = {1, 1, 1, 1, 1, 1};
int i, counts = 7;
int blocklengths[7] = {1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype types[counts];
MPI_Datatype types[counts], type_size_t;
MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(size_t), &type_size_t);
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = MPI_UNSIGNED_LONG;
types[4] = types[5] = MPI_INT;
types[0] = types[1] = types[2] = types[3] = type_size_t;
types[4] = types[5] = types[6] = MPI_INT;
// Rellenar vector displs
MPI_Get_address(config_file, &dir);
......@@ -279,6 +357,7 @@ void def_struct_config_file(configuration *config_file) {
MPI_Get_address(&(config_file->adr), &displs[3]);
MPI_Get_address(&(config_file->granularity), &displs[4]);
MPI_Get_address(&(config_file->rigid_times), &displs[5]);
MPI_Get_address(&(config_file->capture_method), &displs[6]);
for(i=0;i<counts;i++) displs[i] -= dir;
......@@ -295,11 +374,13 @@ void def_struct_groups(configuration *config_file) {
int i, counts = 8;
int blocklengths[8] = {1, 1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype aux, types[counts];
MPI_Datatype types[counts], type_size_t, aux;
group_config_t *groups = config_file->groups;
MPI_Type_match_size(MPI_TYPECLASS_INTEGER, sizeof(size_t), &type_size_t);
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = types[6] = MPI_INT;
types[0] = types[1] = types[2] = types[4] = types[5] = MPI_INT;
types[3] = types[6] = type_size_t;
types[7] = MPI_FLOAT;
// Rellenar vector displs
......@@ -308,10 +389,10 @@ void def_struct_groups(configuration *config_file) {
MPI_Get_address(&(groups->iters), &displs[0]);
MPI_Get_address(&(groups->procs), &displs[1]);
MPI_Get_address(&(groups->sm), &displs[2]);
MPI_Get_address(&(groups->ss), &displs[3]);
MPI_Get_address(&(groups->ss_len), &displs[3]);
MPI_Get_address(&(groups->phy_dist), &displs[4]);
MPI_Get_address(&(groups->rm), &displs[5]);
MPI_Get_address(&(groups->rs), &displs[6]);
MPI_Get_address(&(groups->rs_len), &displs[6]);
MPI_Get_address(&(groups->factor), &displs[7]);
for(i=0;i<counts;i++) displs[i] -= dir;
......@@ -328,29 +409,66 @@ void def_struct_groups(configuration *config_file) {
}
}
/*
* Tipo derivado para enviar las estrategias
* de cada grupo con una sola comunicacion.
*/
void def_struct_groups_strategies(configuration *config_file) {
int i, counts = config_file->n_groups*2;
int *blocklengths;
MPI_Aint *displs, dir;
MPI_Datatype *types;
group_config_t *group;
blocklengths = (int *) malloc(counts * sizeof(int));
displs = (MPI_Aint *) malloc(counts * sizeof(MPI_Aint));
types = (MPI_Datatype *) malloc(counts * sizeof(MPI_Datatype));
MPI_Get_address(config_file->groups, &dir);
for(i = 0; i < counts; i+=2) {
group = &(config_file->groups[i/2]);
MPI_Get_address(group->ss, &displs[i]);
MPI_Get_address(group->rs, &displs[i+1]);
displs[i] -= dir;
displs[i+1] -= dir;
types[i] = types[i+1] = MPI_INT;
blocklengths[i] = group->ss_len;
blocklengths[i+1] = group->rs_len;
}
MPI_Type_create_struct(counts, blocklengths, displs, types, &config_file->group_strats_type);
MPI_Type_commit(&config_file->group_strats_type);
free(blocklengths);
free(displs);
free(types);
}
/*
* Tipo derivado para enviar elementos especificos
* de la estructuras de fases de iteracion en una sola comunicacion.
*/
void def_struct_iter_stage(configuration *config_file) {
int i, counts = 5;
int blocklengths[5] = {1, 1, 1, 1, 1};
int i, counts = 6;
int blocklengths[6] = {1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype aux, types[counts];
iter_stage_t *stages = config_file->stages;
// Rellenar vector types
types[0] = types[1] = types[2] = MPI_INT;
types[3] = types[4] = MPI_DOUBLE;
types[0] = types[1] = types[2] = types[3] = MPI_INT;
types[4] = types[5] = MPI_DOUBLE;
// Rellenar vector displs
MPI_Get_address(stages, &dir);
MPI_Get_address(&(stages->pt), &displs[0]);
MPI_Get_address(&(stages->bytes), &displs[1]);
MPI_Get_address(&(stages->t_capped), &displs[2]);
MPI_Get_address(&(stages->t_stage), &displs[3]);
MPI_Get_address(&(stages->t_op), &displs[4]);
MPI_Get_address(&(stages->id), &displs[1]);
MPI_Get_address(&(stages->bytes), &displs[2]);
MPI_Get_address(&(stages->t_capped), &displs[3]);
MPI_Get_address(&(stages->t_stage), &displs[4]);
MPI_Get_address(&(stages->t_op), &displs[5]);
for(i=0;i<counts;i++) displs[i] -= dir;
......
......@@ -9,13 +9,17 @@
#include "../malleability/distribution_methods/block_distribution.h"
double init_emulation_comm_time(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_emulation_icomm_time(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_comm_iptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_comm_allgatherv_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_comm_reduce_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_comm_wait_pt(configuration *config_file, iter_stage_t *stage);
/*
* Calcula el tiempo por operacion o total de bytes a enviar
......@@ -36,7 +40,7 @@ double init_comm_reduce_pt(group_data group, configuration *config_file, iter_st
*/
double init_stage(configuration *config_file, int stage_i, group_data group, MPI_Comm comm, int compute) {
double result = 0;
int qty = 20000;
int qty = 5000;
iter_stage_t *stage = &(config_file->stages[stage_i]);
stage->operations = qty;
......@@ -52,7 +56,10 @@ double init_stage(configuration *config_file, int stage_i, group_data group, MPI
//Comunicación
case COMP_POINT:
init_comm_ptop_pt(group, config_file, stage, comm);
result = init_comm_ptop_pt(group, config_file, stage, comm, compute);
break;
case COMP_IPOINT:
result = init_comm_iptop_pt(group, config_file, stage, comm, compute);
break;
case COMP_BCAST:
result = init_comm_bcast_pt(group, config_file, stage, comm, compute);
......@@ -64,6 +71,9 @@ double init_stage(configuration *config_file, int stage_i, group_data group, MPI
case COMP_ALLREDUCE:
result = init_comm_reduce_pt(group, config_file, stage, comm, compute);
break;
case COMP_WAIT:
result = init_comm_wait_pt(config_file, stage);
break;
}
return result;
}
......@@ -93,7 +103,22 @@ double process_stage(configuration config_file, iter_stage_t stage, group_data g
break;
//Comunicaciones
case COMP_POINT:
point_to_point(group.myId, group.numP, ROOT, comm, stage.array, stage.real_bytes);
if(stage.t_capped) {
while(t_total < stage.t_stage) {
point_to_point_inter(group.myId, group.numP, comm, stage.array, stage.full_array, stage.real_bytes);
t_total = MPI_Wtime() - t_start;
MPI_Bcast(&t_total, 1, MPI_DOUBLE, ROOT, comm);
}
} else {
for(i=0; i < stage.operations; i++) {
point_to_point_inter(group.myId, group.numP, comm, stage.array, stage.full_array, stage.real_bytes);
}
}
break;
case COMP_IPOINT:
for(i=0; i < stage.operations; i++) {
point_to_point_asynch_inter(group.myId, group.numP, comm, stage.array, stage.full_array, stage.real_bytes, &(stage.reqs[i*2])); //FIXME Magical number
}
break;
case COMP_BCAST:
......@@ -148,6 +173,33 @@ double process_stage(configuration config_file, iter_stage_t stage, group_data g
}
}
break;
case COMP_WAIT:
if(stage.t_capped) { //FIXME Right now, COMP_WAIT with t_capped only works for P2P comms
int remaining;
i = 0;
// Wait until t_stage time has passed
while(t_total < stage.t_stage) {
MPI_Waitall(2, &(stage.reqs[i*2]), MPI_STATUSES_IGNORE); //FIXME Magical number
t_total = MPI_Wtime() - t_start;
i++;
MPI_Bcast(&t_total, 1, MPI_DOUBLE, ROOT, comm);
}
remaining = stage.operations - i;
// If there are operations remaning, terminate them
if (remaining) {
for(; i < stage.operations; i++) {
MPI_Cancel(&(stage.reqs[i*2])); //FIXME Magical number
MPI_Cancel(&(stage.reqs[i*2+1])); //FIXME Magical number
}
MPI_Waitall(remaining*2, &(stage.reqs[(stage.operations-remaining)*2]), MPI_STATUSES_IGNORE); //FIXME Magical number
}
} else {
MPI_Waitall(stage.req_count, stage.reqs, MPI_STATUSES_IGNORE);
}
break;
}
return result;
}
......@@ -176,6 +228,32 @@ double init_emulation_comm_time(group_data group, configuration *config_file, it
return time;
}
double init_emulation_icomm_time(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
double start_time, end_time, time = 0;
double t_stage;
iter_stage_t wait_stage;
wait_stage.pt = COMP_WAIT;
wait_stage.id = stage->id;
wait_stage.operations = stage->operations;
wait_stage.req_count = stage->req_count;
wait_stage.reqs = stage->reqs;
MPI_Barrier(comm);
start_time = MPI_Wtime();
process_stage(*config_file, *stage, group, comm);
process_stage(*config_file, wait_stage, group, comm);
MPI_Barrier(comm);
end_time = MPI_Wtime();
stage->t_op = (end_time - start_time) / stage->operations; //Tiempo de una operacion
t_stage = stage->t_stage * config_file->groups[group.grp].factor;
stage->operations = ceil(t_stage / stage->t_op);
MPI_Bcast(&(stage->operations), 1, MPI_INT, ROOT, comm);
return time;
}
double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
double result, t_stage, start_time;
......@@ -218,20 +296,57 @@ double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *st
return result;
}
void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
int aux_bytes = stage->bytes;
double init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
double time = 0;
if(stage->array != NULL)
free(stage->array);
if(stage->full_array != NULL)
free(stage->full_array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = calloc(stage->real_bytes, sizeof(char));
stage->full_array = calloc(stage->real_bytes, sizeof(char));
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
} else {
stage->operations = 1;
}
return time;
}
double init_comm_iptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
int i;
double time = 0;
if(stage->array != NULL)
free(stage->array);
if(aux_bytes == 0) {
MPI_Barrier(comm);
//aux_bytes = (stage->t_stage - config_file->latency_m) * config_file->bw_m;
init_emulation_comm_time(group, config_file, stage, comm);
if(stage->full_array != NULL)
free(stage->full_array);
if(stage->reqs != NULL) //FIXME May be erroneous if request are active...
free(stage->reqs);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = calloc(stage->real_bytes, sizeof(char));
stage->full_array = calloc(stage->real_bytes, sizeof(char));
if(compute && !stage->bytes) { // t_capped is not considered in this case
stage->req_count = 2 * stage->operations; //FIXME Magical number
stage->reqs = (MPI_Request *) malloc(stage->req_count * sizeof(MPI_Request));
time = init_emulation_icomm_time(group, config_file, stage, comm);
free(stage->reqs);
} else {
stage->operations = 1;
}
stage->real_bytes = aux_bytes;
stage->array = malloc(stage->real_bytes * sizeof(char));
stage->req_count = 2 * stage->operations; //FIXME Magical number
stage->reqs = (MPI_Request *) malloc(stage->req_count * sizeof(MPI_Request));
for(i=0; i < stage->req_count; i++) {
stage->reqs[i] = MPI_REQUEST_NULL;
}
return time;
}
// TODO Compute should be always 1 if the number of processes is different
double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
double time = 0;
......@@ -239,7 +354,7 @@ double init_comm_bcast_pt(group_data group, configuration *config_file, iter_sta
free(stage->array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char)); //FIXME Valgrind indica unitialised
stage->array = calloc(stage->real_bytes, sizeof(char)); //FIXME Valgrind indica unitialised
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......@@ -268,8 +383,8 @@ double init_comm_allgatherv_pt(group_data group, configuration *config_file, ite
get_block_dist(stage->real_bytes, group.myId, group.numP, &dist_data);
stage->my_bytes = dist_data.tamBl;
stage->array = malloc(stage->my_bytes * sizeof(char));
stage->full_array = malloc(stage->real_bytes * sizeof(char));
stage->array = calloc(stage->my_bytes, sizeof(char));
stage->full_array = calloc(stage->real_bytes, sizeof(char));
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......@@ -289,9 +404,9 @@ double init_comm_reduce_pt(group_data group, configuration *config_file, iter_st
free(stage->full_array);
stage->real_bytes = (stage->bytes && !stage->t_capped) ? stage->bytes : config_file->granularity;
stage->array = malloc(stage->real_bytes * sizeof(char));
stage->array = calloc(stage->real_bytes, sizeof(char));
//Full array para el reduce necesita el mismo tamanyo
stage->full_array = malloc(stage->real_bytes * sizeof(char));
stage->full_array = calloc(stage->real_bytes, sizeof(char));
if(compute && !stage->bytes && !stage->t_capped) {
time = init_emulation_comm_time(group, config_file, stage, comm);
......@@ -301,3 +416,29 @@ double init_comm_reduce_pt(group_data group, configuration *config_file, iter_st
return time;
}
double init_comm_wait_pt(configuration *config_file, iter_stage_t *stage) {
size_t i;
double time = 0;
iter_stage_t aux_stage;
if(stage->id < 0) {
printf("Error when initializing wait stage. Id is negative\n");
MPI_Abort(MPI_COMM_WORLD, -1);
return -1;
}
for(i=0; i<config_file->n_stages; i++) {
aux_stage = config_file->stages[i];
if(aux_stage.id == stage->id) { break; }
}
if(i == config_file->n_stages) {
printf("Error when initializing wait stage. Not found a corresponding id\n");
MPI_Abort(MPI_COMM_WORLD, -1);
return -1;
}
stage->req_count = aux_stage.req_count;
stage->reqs = aux_stage.reqs;
return time;
}
......@@ -6,7 +6,8 @@
#include <mpi.h>
#include "Main_datatypes.h"
enum compute_methods{COMP_PI, COMP_MATRIX, COMP_POINT, COMP_BCAST, COMP_ALLGATHER, COMP_REDUCE, COMP_ALLREDUCE};
// 0 1 2 3 4 5 6 7 8
enum compute_methods{COMP_PI, COMP_MATRIX, COMP_POINT, COMP_IPOINT, COMP_WAIT, COMP_BCAST, COMP_ALLGATHER, COMP_REDUCE, COMP_ALLREDUCE};
double init_stage(configuration *config_file, int stage_i, group_data group, MPI_Comm comm, int compute);
//double stage_init_all();
......
......@@ -3,13 +3,23 @@ MCC = mpicc
#C_FLAGS_ALL = -Wconversion -Wpedantic
C_FLAGS = -Wall -Wextra -Wshadow -Wfatal-errors
LD_FLAGS = -lm -pthread
DEF =
MAM_USE_SLURM ?= 0
MAM_USE_BARRIERS ?= 0
MAM_DEBUG ?= 0
DEF = -DMAM_USE_SLURM=$(MAM_USE_SLURM) -DMAM_USE_BARRIERS=$(MAM_USE_BARRIERS) -DMAM_DEBUG=$(MAM_DEBUG)
.PHONY : clean clear install install_slurm
ifeq ($(MAM_USE_SLURM),1)
LD_FLAGS += -lslurm
endif
ifeq ($(shell test $(MAM_DEBUG) -gt 0; echo $$?),0)
C_FLAGS += -g
endif
# Final binary
BIN = a.out
CONFIG = config.txt
# Put all auto generated stuff to this build dir.
BUILD_DIR = ./build
......@@ -24,9 +34,31 @@ OBJ = $(C_FILES:%.c=$(BUILD_DIR)/%.o)
# Gcc will create these .d files containing dependencies.
DEP = $(OBJ:%.o=%.d)
# BASIC RULES
.PHONY : clean clear install
all: install
clean:
-rm $(BUILD_DIR)/$(BIN) $(BUILD_DIR)/$(CONFIG) $(OBJ) $(DEP)
clear:
-rm -rf $(BUILD_DIR)
install: $(BIN) $(CONFIG)
echo "Done"
# SPECIFIC RULES
# Default configuration file
$(CONFIG) : $(BUILD_DIR)/$(CONFIG)
# Default target named after the binary.
$(BIN) : $(BUILD_DIR)/$(BIN)
$(BUILD_DIR)/$(CONFIG) :
@mkdir -p $(@D)
@ echo -n "dir=\"" > $(BUILD_DIR)/$(CONFIG)
@ realpath -z $$(echo "$$(pwd)/..") | tr -d '\0' >> $(BUILD_DIR)/$(CONFIG)
@ echo "\"" >> $(BUILD_DIR)/$(CONFIG)
# Actual target of the binary - depends on all .o files.
$(BUILD_DIR)/$(BIN) : $(OBJ)
$(MCC) $(C_FLAGS) $^ -o $@ $(LD_FLAGS)
......@@ -41,19 +73,5 @@ $(BUILD_DIR)/$(BIN) : $(OBJ)
# The -MMD flags additionaly creates a .d file with
# the same name as the .o file.
$(BUILD_DIR)/%.o : %.c
mkdir -p $(@D)
@mkdir -p $(@D)
$(MCC) $(C_FLAGS) $(DEF) -MMD -c $< -o $@
clean:
-rm $(BUILD_DIR)/$(BIN) $(OBJ) $(DEP)
clear:
-rm -rf $(BUILDDIR)
install: $(BIN)
echo "Done"
# Builds target with slurm
install_slurm: LD_FLAGS += -lslurm
install_slurm: DEF += -DUSE_SLURM
install_slurm: install
#ifndef COMMDIST_H
#define COMMDIST_H
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string.h>
#include "malleabilityStates.h"
//#define MAL_COMM_COMPLETED 0
//#define MAL_COMM_UNINITIALIZED 2
//#define MAL_ASYNC_PENDING 1
//#define MAL_USE_NORMAL 0
//#define MAL_USE_IBARRIER 1
//#define MAL_USE_POINT 2
//#define MAL_USE_THREAD 3
int sync_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int comm_type, MPI_Comm comm);
//int async_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int red_method, int red_strategies, MPI_Comm comm, MPI_Request **requests, size_t *request_qty);
int async_communication_start(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int red_method, int red_strategies, MPI_Comm comm, MPI_Request **requests, size_t *request_qty, MPI_Win *win);
int async_communication_check(int myId, int is_children_group, int red_strategies, MPI_Comm comm, MPI_Request *requests, size_t request_qty);
void async_communication_wait(int red_strategies, MPI_Comm comm, MPI_Request *requests, size_t request_qty);
void async_communication_end(int red_method, int red_strategies, MPI_Request *requests, size_t request_qty, MPI_Win *win);
//int send_async(char *array, int qty, int myId, int numP, MPI_Comm intercomm, int numP_child, MPI_Request **comm_req, int red_method, int red_strategies);
//void recv_async(char **array, int qty, int myId, int numP, MPI_Comm intercomm, int numP_parents, int red_method, int red_strategies);
void malloc_comm_array(char **array, int qty, int myId, int numP);
int malleability_red_contains_strat(int comm_strategies, int strategy, int *result);
int malleability_red_add_strat(int *comm_strategies, int strategy);
#endif
#ifndef MAM_H
#define MAM_H
#include "MAM_Constants.h"
#include "MAM_Manager.h"
#include "MAM_Configuration.h"
#include "MAM_Times_retrieve.h"
#endif
#include "MAM_Configuration.h"
#include "MAM_Init_Configuration.h"
#include "MAM_DataStructures.h"
#include <limits.h>
typedef struct {
unsigned int *value, default_value;
int config_max_length;
union {
int (*set_config_simple)(unsigned int, unsigned int *);
int (*set_config_complex)(unsigned int);
};
char *env_name;
} mam_config_setting_t;
int MAM_I_set_method(unsigned int new_method, unsigned int *method);
int MAM_I_set_spawn_strat(unsigned int strategy, unsigned int *strategies);
int MAM_I_set_red_strat(unsigned int strategy, unsigned int *strategies);
int MAM_I_set_target_number(unsigned int new_numC);
int MAM_I_configuration_get_defaults();
int MAM_I_contains_strat(unsigned int comm_strategies, unsigned int strategy);
int MAM_I_add_strat(unsigned int *comm_strategies, unsigned int strategy);
int MAM_I_remove_strat(unsigned int *comm_strategies, unsigned int strategy);
mam_config_setting_t configSettings[] = {
{NULL, MAM_SPAWN_MERGE, MAM_METHODS_SPAWN_LEN, {.set_config_simple = MAM_I_set_method }, MAM_SPAWN_METHOD_ENV},
{NULL, MAM_STRAT_SPAWN_CLEAR, MAM_STRATS_SPAWN_LEN, {.set_config_simple = MAM_I_set_spawn_strat }, MAM_SPAWN_STRATS_ENV},
{NULL, MAM_PHY_DIST_COMPACT, MAM_METHODS_PHYSICAL_DISTRIBUTION_LEN, {.set_config_simple = MAM_I_set_method }, MAM_PHYSICAL_DISTRIBUTION_METHOD_ENV},
{NULL, MAM_RED_BASELINE, MAM_METHODS_RED_LEN, {.set_config_simple = MAM_I_set_method }, MAM_RED_METHOD_ENV},
{NULL, MAM_STRAT_RED_CLEAR, MAM_STRATS_RED_LEN, {.set_config_simple = MAM_I_set_red_strat }, MAM_RED_STRATS_ENV},
{NULL, 1, INT_MAX, {.set_config_complex = MAM_I_set_target_number }, MAM_NUM_TARGETS_ENV}
};
unsigned int masks_spawn[] = {MAM_STRAT_CLEAR_VALUE, MAM_MASK_PTHREAD, MAM_MASK_SPAWN_SINGLE, MAM_MASK_SPAWN_INTERCOMM, MAM_MASK_SPAWN_MULTIPLE};
unsigned int masks_red[] = {MAM_STRAT_CLEAR_VALUE, MAM_MASK_PTHREAD, MAM_MASK_RED_WAIT_SOURCES, MAM_MASK_RED_WAIT_TARGETS};
/**
* @brief Set configuration parameters for MAM.
*
* This function allows setting various configuration parameters for MAM
* such as spawn method, spawn strategies, spawn physical distribution,
* redistribution method, and red strategies.
*
* @param spawn_method The spawn method reconfiguration.
* @param spawn_strategies The spawn strategies reconfiguration.
* @param spawn_dist The spawn physical distribution method reconfiguration.
* @param red_method The redistribution method reconfiguration.
* @param red_strategies The redesitribution strategy for reconfiguration.
*/
void MAM_Set_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int red_method, int red_strategies) {
int i, aux;
int aux_array[] = {spawn_method, spawn_strategies, spawn_dist, red_method, red_strategies};
if(state > MAM_I_NOT_STARTED) return;
mam_config_setting_t *config = NULL;
for (i = 0; i < MAM_KEY_COUNT-1; i++) { //FIXME Numero magico para no cambiar num_targets
aux = aux_array[i];
config = &configSettings[i];
if (0 <= aux && aux < config->config_max_length) {
if(i == MAM_NUM_TARGETS) {
config->set_config_complex(aux);
} else {
config->set_config_simple(aux, config->value);
}
}
}
}
/*
* @brief Set the configuration value for a specific key in MAM.
*
* Modifies the configuration value associated with the given key
* to the specified "required" value. The final value set is returned in the
* "provided" parameter.
*
* @param key The key for which the configuration value is to be modified.
* @param required The required value to set for the specified key.
* @param provided Pointer to an integer where the final value set will be stored.
* This parameter is updated with the actual value after modification.
* For strategy keys the value is "MAM_STRATS_ADDED" if "required" has
* been added, or "MAM_STRATS_MODIFIED" if multiple strategies of the
* key have been modified.
*/
void MAM_Set_key_configuration(int key, int required, int *provided) {
int i, aux;
if(provided == NULL) provided = &aux;
*provided = MAM_DENIED;
if(required < 0 || state > MAM_I_NOT_STARTED) return;
mam_config_setting_t *config = NULL;
for (i = 0; i < MAM_KEY_COUNT; i++) {
if (key == i) {
config = &configSettings[i];
break;
}
}
if (config != NULL) {
if (required < config->config_max_length) {
if(i == MAM_NUM_TARGETS) {
*provided = config->set_config_complex(required);
} else {
*provided = config->set_config_simple(required, config->value);
}
} else {*provided = *(config->value); }
} else { printf("MAM: Key %d does not exist\n", key); }
}
/*
* Retorna si una estrategia aparece o no
*/
int MAM_Contains_strat(int key, unsigned int strategy, int *result) {
int strategies, aux = MAM_OK;
unsigned int len = 0, mask;
switch(key) {
case MAM_SPAWN_STRATEGIES:
strategies = mall_conf->spawn_strategies;
mask = masks_spawn[strategy];
len = MAM_STRATS_SPAWN_LEN;
break;
case MAM_RED_STRATEGIES:
strategies = mall_conf->red_strategies;
mask = masks_red[strategy];
len = MAM_STRATS_RED_LEN;
break;
default:
aux = MAM_DENIED;
break;
}
if(aux == MAM_OK && strategy < len) {
aux = MAM_I_contains_strat(strategies, mask);
} else {
aux = 0;
}
if(result != NULL) *result = aux;
return aux;
}
/*
* //TODO
* Tiene que ser llamado despues de setear la config
*/
int MAM_Set_target_number(unsigned int numC){
return MAM_I_set_target_number(numC);
}
/*
* //TODO
* Tiene que ser llamado fuera de reconfig
*/
void MAM_Use_valgrind(int flag) {
if(state > MAM_I_NOT_STARTED) return;
mall_conf->external_usage = flag ? MAM_USE_VALGRIND: 0;
#if MAM_DEBUG
if(mall->myId == mall->root && flag) DEBUG_FUNC("Settled Valgrind Wrapper", mall->myId, mall->numP); fflush(stdout);
#endif
}
/*
* //TODO
* Tiene que ser llamado fuera de reconfig
*/
void MAM_Use_extrae(int flag) {
if(state > MAM_I_NOT_STARTED) return;
mall_conf->external_usage = flag ? MAM_USE_EXTRAE: 0;
#if MAM_DEBUG
if(mall->myId == mall->root && flag) DEBUG_FUNC("Settled Extrae Wrapper", mall->myId, mall->numP); fflush(stdout);
#endif
}
//======================================================||
//===============MAM_INIT FUNCTIONS=====================||
//======================================================||
//======================================================||
void MAM_Init_configuration() {
if(mall == NULL || mall_conf == NULL) {
printf("MAM FATAL ERROR: Setting initial config without previous mallocs\n");
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, -50);
}
mall_conf->spawn_method = MAM_STRAT_CLEAR_VALUE;
mall_conf->spawn_strategies = MAM_STRAT_CLEAR_VALUE;
mall_conf->red_method = MAM_STRAT_CLEAR_VALUE;
mall_conf->red_strategies = MAM_STRAT_CLEAR_VALUE;
mall_conf->external_usage = 0;
configSettings[MAM_SPAWN_METHOD].value = &mall_conf->spawn_method;
configSettings[MAM_SPAWN_STRATEGIES].value = &mall_conf->spawn_strategies;
configSettings[MAM_PHYSICAL_DISTRIBUTION].value = &mall_conf->spawn_dist;
configSettings[MAM_RED_METHOD].value = &mall_conf->red_method;
configSettings[MAM_RED_STRATEGIES].value = &mall_conf->red_strategies;
}
void MAM_Set_initial_configuration() {
int not_filled = 1;
not_filled = MAM_I_configuration_get_defaults();
if(not_filled) {
if(mall->myId == mall->root) printf("MAM WARNING: Starting configuration not set\n");
fflush(stdout);
MPI_Abort(mall->comm, -50);
}
#if MAM_DEBUG >= 2
if(mall->myId == mall->root) {
DEBUG_FUNC("Initial configuration settled", mall->myId, mall->numP);
fflush(stdout);
}
#endif
}
void MAM_Check_configuration() {
int global_internodes;
if(mall->numC == mall->numP) { // Migrate
MAM_Set_key_configuration(MAM_SPAWN_METHOD, MAM_SPAWN_BASELINE, NULL);
}
MPI_Allreduce(&mall->internode_group, &global_internodes, 1, MPI_INT, MPI_MAX, mall->comm);
if(MAM_Contains_strat(MAM_SPAWN_STRATEGIES, MAM_STRAT_SPAWN_MULTIPLE, NULL)
&& global_internodes) { // Remove internode MPI_COMM_WORLDs
MAM_Set_key_configuration(MAM_SPAWN_METHOD, MAM_SPAWN_BASELINE, NULL);
}
if(mall_conf->spawn_method == MAM_SPAWN_MERGE) {
if(MAM_I_contains_strat(mall_conf->spawn_strategies, MAM_MASK_SPAWN_INTERCOMM)) {
MAM_I_remove_strat(&mall_conf->spawn_strategies, MAM_MASK_SPAWN_INTERCOMM);
}
if(mall->numP > mall->numC && MAM_I_contains_strat(mall_conf->spawn_strategies, MAM_MASK_SPAWN_SINGLE)) {
MAM_I_remove_strat(&mall_conf->spawn_strategies, MAM_MASK_SPAWN_SINGLE);
}
}
if(mall_conf->red_method == MAM_RED_RMA_LOCK || mall_conf->red_method == MAM_RED_RMA_LOCKALL) {
if(MAM_I_contains_strat(mall_conf->spawn_strategies, MAM_MASK_SPAWN_INTERCOMM)) {
MAM_I_remove_strat(&mall_conf->spawn_strategies, MAM_MASK_SPAWN_INTERCOMM);
}
if(!MAM_I_contains_strat(mall_conf->red_strategies, MAM_MASK_RED_WAIT_TARGETS) &&
!MAM_I_contains_strat(mall_conf->red_strategies, MAM_MASK_PTHREAD)) {
MAM_I_set_red_strat(MAM_STRAT_RED_WAIT_TARGETS, &mall_conf->red_strategies);
}
}
}
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//======================================================||
//======================================================||
int MAM_I_configuration_get_defaults() {
size_t i;
int set_value;
char *tmp = NULL;
mam_config_setting_t *config = NULL;
for (i = 0; i < MAM_KEY_COUNT; i++) {
config = &configSettings[i];
tmp = getenv(config->env_name);
if(tmp != NULL) {
set_value = atoi(tmp);
} else {
set_value = config->default_value;
}
if (0 <= set_value && set_value < config->config_max_length) {
if(i == MAM_NUM_TARGETS) {
config->set_config_complex(set_value);
} else {
config->set_config_simple(set_value, config->value);
}
}
tmp = NULL;
}
return 0;
}
int MAM_I_set_method(unsigned int new_method, unsigned int *method) {
*method = new_method;
return *method;
}
int MAM_I_set_spawn_strat(unsigned int strategy, unsigned int *strategies) {
int result = 0;
int strat_removed = 0;
switch(strategy) {
case MAM_STRAT_SPAWN_CLEAR:
*strategies = MAM_STRAT_CLEAR_VALUE;
result = MAM_STRATS_MODIFIED;
break;
case MAM_STRAT_SPAWN_PTHREAD:
result = MAM_I_add_strat(strategies, MAM_MASK_PTHREAD);
break;
case MAM_STRAT_SPAWN_SINGLE:
result = MAM_I_add_strat(strategies, MAM_MASK_SPAWN_SINGLE);
break;
case MAM_STRAT_SPAWN_INTERCOMM:
result = MAM_I_add_strat(strategies, MAM_MASK_SPAWN_INTERCOMM);
break;
case MAM_STRAT_SPAWN_MULTIPLE:
result = MAM_I_add_strat(strategies, MAM_MASK_SPAWN_MULTIPLE);
break;
default:
//Unkown strategy
result = MAM_DENIED;
break;
}
if(strat_removed) {
result = MAM_STRATS_MODIFIED;
}
return result;
}
int MAM_I_set_red_strat(unsigned int strategy, unsigned int *strategies) {
int result = 0;
int strat_removed = 0;
switch(strategy) {
case MAM_STRAT_RED_CLEAR:
*strategies = MAM_STRAT_CLEAR_VALUE;
result = MAM_STRATS_MODIFIED;
break;
case MAM_STRAT_RED_PTHREAD: //TODO - IMPROVEMENT - This could be done with a single operation instead of 3.
result = MAM_I_add_strat(strategies, MAM_MASK_PTHREAD);
if(result == MAM_STRATS_ADDED) {
strat_removed += MAM_I_remove_strat(strategies, MAM_MASK_RED_WAIT_SOURCES);
strat_removed += MAM_I_remove_strat(strategies, MAM_MASK_RED_WAIT_TARGETS);
}
break;
case MAM_STRAT_RED_WAIT_SOURCES:
result = MAM_I_add_strat(strategies, MAM_MASK_RED_WAIT_SOURCES);
if(result == MAM_STRATS_ADDED) {
strat_removed += MAM_I_remove_strat(strategies, MAM_MASK_RED_WAIT_TARGETS);
strat_removed += MAM_I_remove_strat(strategies, MAM_MASK_PTHREAD);
}
break;
case MAM_STRAT_RED_WAIT_TARGETS:
result = MAM_I_add_strat(strategies, MAM_MASK_RED_WAIT_TARGETS);
if(result == MAM_STRATS_ADDED) {
strat_removed += MAM_I_remove_strat(strategies, MAM_MASK_RED_WAIT_SOURCES);
strat_removed += MAM_I_remove_strat(strategies, MAM_MASK_PTHREAD);
}
break;
default:
//Unkown strategy
result = MAM_DENIED;
break;
}
if(strat_removed) {
result = MAM_STRATS_MODIFIED;
}
return result;
}
int MAM_I_set_target_number(unsigned int new_numC) {
if(state > MAM_I_NOT_STARTED || new_numC == 0) return MAM_DENIED;
mall->numC = (int) new_numC;
return new_numC;
}
/*
* Returns 1 if strategy is applied, 0 otherwise
*/
int MAM_I_contains_strat(unsigned int comm_strategies, unsigned int strategy) {
return comm_strategies & strategy;
}
int MAM_I_add_strat(unsigned int *comm_strategies, unsigned int strategy) {
if(MAM_I_contains_strat(*comm_strategies, strategy)) return MAM_OK;
*comm_strategies |= strategy;
return MAM_STRATS_ADDED;
}
int MAM_I_remove_strat(unsigned int *comm_strategies, unsigned int strategy) {
if(!MAM_I_contains_strat(*comm_strategies, strategy)) return MAM_OK;
*comm_strategies &= ~strategy;
return MAM_STRATS_MODIFIED;
}
#ifndef MAM_CONFIGURATION_H
#define MAM_CONFIGURATION_H
#include <mpi.h>
#include "MAM_Constants.h"
#define MAM_STRAT_CLEAR_VALUE 0
#define MAM_STRATS_ADDED 1
#define MAM_STRATS_MODIFIED 2
#define MAM_MASK_PTHREAD 0x01
#define MAM_MASK_SPAWN_SINGLE 0x02
#define MAM_MASK_SPAWN_INTERCOMM 0x04
#define MAM_MASK_SPAWN_MULTIPLE 0x08
#define MAM_MASK_RED_WAIT_SOURCES 0x02
#define MAM_MASK_RED_WAIT_TARGETS 0x04
int MAM_Contains_strat(int key, unsigned int strategy, int *result);
void MAM_Set_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int red_method, int red_strategies);
void MAM_Set_key_configuration(int key, int required, int *provided);
int MAM_Set_target_number(unsigned int numC);
void MAM_Use_valgrind(int flag);
void MAM_Use_extrae(int flag);
#endif
#ifndef MAM_CONSTANTS_H
#define MAM_CONSTANTS_H
//States
#define MAM_DENIED -1
#define MAM_OK 0
enum mam_states{MAM_UNRESERVED, MAM_NOT_STARTED, MAM_PENDING, MAM_USER_PENDING, MAM_COMPLETED};
enum mam_proc_states{MAM_PROC_CONTINUE, MAM_PROC_NEW_RANK, MAM_PROC_ZOMBIE};
enum mam_spawn_methods{MAM_SPAWN_BASELINE, MAM_SPAWN_MERGE, MAM_METHODS_SPAWN_LEN};
enum mam_spawn_strategies{MAM_STRAT_SPAWN_CLEAR, MAM_STRAT_SPAWN_PTHREAD, MAM_STRAT_SPAWN_SINGLE, MAM_STRAT_SPAWN_INTERCOMM, MAM_STRAT_SPAWN_MULTIPLE, MAM_STRATS_SPAWN_LEN};
enum mam_phy_dist_methods{MAM_PHY_DIST_SPREAD = 1, MAM_PHY_DIST_COMPACT, MAM_METHODS_PHYSICAL_DISTRIBUTION_LEN};
enum mam_phy_info_methods{MAM_PHY_TYPE_STRING = 1, MAM_PHY_TYPE_HOSTFILE};
enum mam_redistribution_methods{MAM_RED_BASELINE, MAM_RED_POINT, MAM_RED_RMA_LOCK, MAM_RED_RMA_LOCKALL, MAM_METHODS_RED_LEN};
enum mam_red_strategies{MAM_STRAT_RED_CLEAR, MAM_STRAT_RED_PTHREAD, MAM_STRAT_RED_WAIT_SOURCES, MAM_STRAT_RED_WAIT_TARGETS, MAM_STRATS_RED_LEN};
/* KEYS & VALUES for config*/
enum mam_key_values{MAM_SPAWN_METHOD=0, MAM_SPAWN_STRATEGIES, MAM_PHYSICAL_DISTRIBUTION, MAM_RED_METHOD, MAM_RED_STRATEGIES, MAM_NUM_TARGETS, MAM_KEY_COUNT};
#define MAM_SPAWN_METHOD_ENV "MAM_SPAWN_METHOD"
#define MAM_SPAWN_STRATS_ENV "MAM_SPAWN_STRATS"
#define MAM_PHYSICAL_DISTRIBUTION_METHOD_ENV "MAM_PHYSICAL_DISTRIBUTION_METHOD"
#define MAM_RED_METHOD_ENV "MAM_RED_METHOD"
#define MAM_RED_STRATS_ENV "MAM_RED_STRATS"
#define MAM_NUM_TARGETS_ENV "MAM_NUM_TARGETS"
#define MAM_CHECK_COMPLETION 0
#define MAM_WAIT_COMPLETION 1
#define MAM_SOURCES 0
#define MAM_TARGETS 1
#define MAM_DATA_DISTRIBUTED 0
#define MAM_DATA_REPLICATED 1
#define MAM_DATA_VARIABLE 0
#define MAM_DATA_CONSTANT 1
#endif
#include "MAM_DataStructures.h"
malleability_config_t *mall_conf = NULL;
malleability_t *mall = NULL;
int state = MAM_I_UNRESERVED;
/*
* Crea un tipo derivado para mandar las dos estructuras principales
* de MaM.
*/
void MAM_Def_main_datatype() {
int i, counts = 11;
int blocklengths[counts];
MPI_Aint displs[counts];
MPI_Datatype types[counts];
for(i=0; i<5; i++) {
blocklengths[i] = 1;
types[i] = MPI_UNSIGNED;
}
for(i=5; i<counts; i++) {
blocklengths[i] = 1;
types[i] = MPI_INT;
}
// Obtain base direction
MPI_Get_address(&(mall_conf->spawn_method), &displs[0]);
MPI_Get_address(&(mall_conf->spawn_strategies), &displs[1]);
MPI_Get_address(&(mall_conf->spawn_dist), &displs[2]);
MPI_Get_address(&(mall_conf->red_method), &displs[3]);
MPI_Get_address(&(mall_conf->red_strategies), &displs[4]);
MPI_Get_address(&(mall->root_parents), &displs[5]);
MPI_Get_address(&(mall->num_parents), &displs[6]); //TODO Add only when Single strat active?
MPI_Get_address(&(mall->numC), &displs[7]); //TODO Add only when MultipleSpawn strat active?
MPI_Get_address(&(mall->num_cpus), &displs[8]);
MPI_Get_address(&(mall->num_nodes), &displs[9]);
MPI_Get_address(&(mall->nodelist_len), &displs[10]);
MPI_Type_create_struct(counts, blocklengths, displs, types, &mall->struct_type);
MPI_Type_commit(&mall->struct_type);
}
void MAM_Free_main_datatype() {
if(mall->struct_type != MPI_DATATYPE_NULL) {
MPI_Type_free(&mall->struct_type);
}
}
/*
* Comunica datos necesarios de las estructuras
* principales de MAM de sources a targets.
*/
void MAM_Comm_main_structures(MPI_Comm comm, int rootBcast) {
MPI_Bcast(MPI_BOTTOM, 1, mall->struct_type, rootBcast, comm);
if(mall->nodelist == NULL) {
mall->nodelist = calloc(mall->nodelist_len+1, sizeof(char));
mall->nodelist[mall->nodelist_len] = '\0';
}
MPI_Bcast(mall->nodelist, mall->nodelist_len, MPI_CHAR, rootBcast, comm);
}
/*
* Muestra por pantalla el estado actual de todos los comunicadores
*/
void MAM_print_comms_state() {
int tester;
char *comm_name = malloc(MPI_MAX_OBJECT_NAME * sizeof(char));
MPI_Comm_get_name(mall->comm, comm_name, &tester);
printf("P%d Comm=%d Name=%s\n", mall->myId, mall->comm, comm_name);
MPI_Comm_get_name(*(mall->user_comm), comm_name, &tester);
printf("P%d Comm=%d Name=%s\n", mall->myId, *(mall->user_comm), comm_name);
if(mall->intercomm != MPI_COMM_NULL) {
MPI_Comm_get_name(mall->intercomm, comm_name, &tester);
printf("P%d Comm=%d Name=%s\n", mall->myId, mall->intercomm, comm_name);
}
free(comm_name);
}
/*
* Función para modificar los comunicadores principales de MaM
*/
void MAM_comms_update(MPI_Comm comm) {
if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_disconnect(&(mall->thread_comm));
if(mall->comm != MPI_COMM_WORLD) MPI_Comm_disconnect(&(mall->comm));
MPI_Comm_dup(comm, &(mall->thread_comm));
MPI_Comm_dup(comm, &(mall->comm));
MPI_Comm_set_name(mall->thread_comm, "MAM_THREAD");
MPI_Comm_set_name(mall->comm, "MAM_MAIN");
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment