Commit 897b6adb authored by iker_martin's avatar iker_martin
Browse files

Refactor de warnings. Codigo funcional

parent b8429aff
......@@ -24,10 +24,10 @@ static int handler(void* user, const char* section, const char* name,
}
char *resize_name = malloc(10 * sizeof(char));
snprintf(resize_name, 10, "resize%d", pconfig->actual_resize);
snprintf(resize_name, 10, "resize%zu", pconfig->actual_resize);
char *stage_name = malloc(10 * sizeof(char));
snprintf(stage_name, 10, "stage%d", pconfig->actual_stage);
snprintf(stage_name, 10, "stage%zu", pconfig->actual_stage);
#define MATCH(s, n) strcmp(section, s) == 0 && strcmp(name, n) == 0
if (MATCH("general", "Total_Resizes")) {
......
......@@ -22,7 +22,7 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
* e indicar cual es el proceso raiz que se encargara de enviar los
* resultados al otro grupo.
*/
void send_results(results_data *results, int root, int resizes, MPI_Comm intercomm) {
void send_results(results_data *results, int root, size_t resizes, MPI_Comm intercomm) {
MPI_Datatype results_type;
// Obtener un tipo derivado para enviar todos los
......@@ -43,7 +43,7 @@ void send_results(results_data *results, int root, int resizes, MPI_Comm interco
* e indicar cual es el proceso raiz del otro grupo que se encarga de enviar
* los resultados a este grupo.
*/
void recv_results(results_data *results, int root, int resizes, MPI_Comm intercomm) {
void recv_results(results_data *results, int root, size_t resizes, MPI_Comm intercomm) {
MPI_Datatype results_type;
// Obtener un tipo derivado para enviar todos los
......@@ -216,12 +216,12 @@ void print_global_results(results_data results, size_t resizes) {
}
printf("\nT_SR: ");
for(i=1; i < resizes; i++) {
for(i=0; i < resizes - 1; i++) {
printf("%lf ", results.sync_time[i]);
}
printf("\nT_AR: ");
for(i=1; i < resizes; i++) {
for(i=0; i < resizes - 1; i++) {
printf("%lf ", results.async_time[i]);
}
......@@ -261,16 +261,19 @@ void init_results_data(results_data *results, size_t resizes, size_t stages, siz
}
void realloc_results_iters(results_data *results, int stages, size_t needed) {
int i;
void realloc_results_iters(results_data *results, size_t stages, size_t needed) {
int error = 0;
double *time_aux;
size_t i;
time_aux = (double *) realloc(results->iters_time, needed * sizeof(double));
for(i=0; i<stages; i++) { //TODO Comprobar que no da error el realloc
results->stage_times[i] = (double *) realloc(results->stage_times[i], needed * sizeof(double));
results->stage_times[i] = (double *) realloc(results->stage_times[i], needed * sizeof(double));
if(results->stage_times[i] == NULL) error = 1;
}
if(time_aux == NULL) {
if(time_aux == NULL) error = 1;
if(error) {
fprintf(stderr, "Fatal error - No se ha podido realojar la memoria de resultados\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
......@@ -281,18 +284,18 @@ void realloc_results_iters(results_data *results, int stages, size_t needed) {
/*
* Libera toda la memoria asociada con una estructura de resultados.
*/
void free_results_data(results_data *results, int stages) {
int i;
if(results != NULL) {
free(results->spawn_time);
free(results->spawn_real_time);
free(results->sync_time);
free(results->async_time);
free(results->iters_time);
for(i=0; i<stages; i++) {
free(results->stage_times[i]);
}
free(results->stage_times);
void free_results_data(results_data *results, size_t stages) {
size_t i;
if(results != NULL) {
free(results->spawn_time);
free(results->spawn_real_time);
free(results->sync_time);
free(results->async_time);
free(results->iters_time);
for(i=0; i<stages; i++) {
free(results->stage_times[i]);
}
free(results->stage_times);
}
}
......@@ -20,8 +20,8 @@ typedef struct {
double wasted_time; // Time spent recalculating iter stages
} results_data;
void send_results(results_data *results, int root, int resizes, MPI_Comm intercomm);
void recv_results(results_data *results, int root, int resizes, MPI_Comm intercomm);
void send_results(results_data *results, int root, size_t resizes, MPI_Comm intercomm);
void recv_results(results_data *results, int root, size_t resizes, MPI_Comm intercomm);
void set_results_post_reconfig(results_data *results, int grp, int sdr, int adr);
void reset_results_index(results_data *results);
......@@ -34,7 +34,7 @@ void print_stage_results(results_data results, size_t n_stages);
void print_global_results(results_data results, size_t resizes);
void init_results_data(results_data *results, size_t resizes, size_t stages, size_t iters_size);
void realloc_results_iters(results_data *results, int stages, size_t needed);
void free_results_data(results_data *results, int stages);
void realloc_results_iters(results_data *results, size_t stages, size_t needed);
void free_results_data(results_data *results, size_t stages);
#endif
......@@ -227,10 +227,10 @@ int work() {
*/
double iterate(int async_comm) {
double start_time, start_time_stage, actual_time, *times_stages_aux;
int i;
size_t i;
double aux = 0;
times_stages_aux = malloc((size_t) config_file->n_stages * sizeof(double));
times_stages_aux = malloc(config_file->n_stages * sizeof(double));
start_time = MPI_Wtime();
for(i=0; i < config_file->n_stages; i++) {
......@@ -304,7 +304,7 @@ int print_local_results() {
print_config_group(config_file, group->grp);
print_iter_results(*results);
print_stage_results(*results, (size_t) config_file->n_stages);
print_stage_results(*results, config_file->n_stages);
free(file_name);
fflush(stdout);
......@@ -333,8 +333,8 @@ int print_final_results() {
ptr_out = dup(1);
create_out_file(file_name, &ptr_global, 1);
print_config(config_file, group->grp);
print_global_results(*results, (size_t)config_file->n_resizes);
print_config(config_file);
print_global_results(*results, config_file->n_resizes);
fflush(stdout);
free(file_name);
......@@ -379,7 +379,7 @@ void init_application() {
//config_file = read_ini_file(group->argv[1]);
init_config(group->argv[1], &config_file);
results = malloc(sizeof(results_data));
init_results_data(results, (size_t)config_file->n_resizes, (size_t)config_file->n_stages, (size_t)config_file->groups[group->grp].iters);
init_results_data(results, config_file->n_resizes, config_file->n_stages, config_file->groups[group->grp].iters);
if(config_file->sdr) {
malloc_comm_array(&(group->sync_array), config_file->sdr , group->myId, group->numP);
}
......@@ -391,9 +391,7 @@ void init_application() {
config_file->latency_m = latency(group->myId, group->numP, comm);
config_file->bw_m = bandwidth(group->myId, group->numP, comm, config_file->latency_m, message_tam);
printf("Test 0\n");
obtain_op_times(1);
printf("Test 1\n");
}
/*
......@@ -408,10 +406,9 @@ void init_application() {
* al tiempo total de ejecucion.
*/
void obtain_op_times(int compute) {
int i;
size_t i;
double time = 0;
for(i=0; i<config_file->n_stages; i++) {
printf("Test P%d 0.5\n", group->myId);
time+=init_stage(config_file, i, *group, comm, compute);
}
if(!compute) {results->wasted_time += time;}
......
......@@ -12,7 +12,7 @@
typedef struct {
int myId;
int numP;
int grp;
unsigned int grp;
int iter_start;
int argc;
......@@ -52,8 +52,8 @@ typedef struct
typedef struct
{
int n_resizes, n_stages;
int actual_resize, actual_stage;
size_t n_resizes, n_stages;
size_t actual_resize, actual_stage;
int granularity, sdr, adr;
double latency_m, bw_m;
......
......@@ -49,7 +49,7 @@ void initMatrix(double **matrix, size_t n) {
if(*matrix == NULL) { MPI_Abort(MPI_COMM_WORLD, -1);}
for(i=0; i < n; i++) {
for(j=0; j < n; j++) {
(*matrix)[i*n + j] =(double) i + (double) j;
(*matrix)[i*n + j] = i+j;
}
}
}
......
......@@ -60,9 +60,9 @@ void init_config(char *file_name, configuration **user_config) {
* - recv_config_file
*/
void malloc_config_resizes(configuration *user_config) {
int i;
size_t i;
if(user_config != NULL) {
user_config->groups = malloc(sizeof(group_config_t) * (size_t) user_config->n_resizes);
user_config->groups = malloc(sizeof(group_config_t) * user_config->n_resizes);
for(i=0; i<user_config->n_resizes; i++) {
user_config->groups[i].iters = 0;
user_config->groups[i].procs = 1;
......@@ -85,9 +85,9 @@ void malloc_config_resizes(configuration *user_config) {
* - recv_config_file
*/
void malloc_config_stages(configuration *user_config) {
int i;
size_t i;
if(user_config != NULL) {
user_config->stages = malloc(sizeof(iter_stage_t) * (size_t) user_config->n_stages);
user_config->stages = malloc(sizeof(iter_stage_t) * user_config->n_stages);
user_config->t_op_comms = 0;
for(i=0; i<user_config->n_stages; i++) {
user_config->stages[i].array = NULL;
......@@ -104,7 +104,7 @@ void malloc_config_stages(configuration *user_config) {
* Libera toda la memoria de una estructura de configuracion
*/
void free_config(configuration *user_config) {
int i;
size_t i;
if(user_config != NULL) {
for(i=0; i < user_config->n_stages; i++) {
......@@ -138,18 +138,18 @@ void free_config(configuration *user_config) {
* Imprime por salida estandar toda la informacion que contiene
* la configuracion pasada como argumento
*/
void print_config(configuration *user_config, int grp) {
void print_config(configuration *user_config) {
if(user_config != NULL) {
int i;
printf("Config loaded: R=%d, S=%d, granularity=%d, SDR=%d, ADR=%d, latency=%2.8f, bw=%lf || grp=%d\n",
size_t i;
printf("Config loaded: R=%zu, S=%zu, granularity=%d, SDR=%d, ADR=%d, latency=%2.8f, bw=%lf\n",
user_config->n_resizes, user_config->n_stages, user_config->granularity, user_config->sdr, user_config->adr,
user_config->latency_m, user_config->bw_m, grp);
user_config->latency_m, user_config->bw_m);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d\n",
printf("Stage %zu: PT=%d, T_stage=%lf, bytes=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes);
}
for(i=0; i<user_config->n_resizes; i++) {
printf("Group %d: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d\n",
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d\n",
i, user_config->groups[i].iters, user_config->groups[i].procs, user_config->groups[i].factor,
user_config->groups[i].phy_dist, user_config->groups[i].at, user_config->groups[i].sm,
user_config->groups[i].ss);
......@@ -162,8 +162,8 @@ void print_config(configuration *user_config, int grp) {
* Imprime por salida estandar la informacion relacionada con un
* solo grupo de procesos en su configuracion.
*/
void print_config_group(configuration *user_config, int grp) {
int i;
void print_config_group(configuration *user_config, size_t grp) {
size_t i;
if(user_config != NULL) {
int parents, sons;
parents = sons = 0;
......@@ -177,10 +177,10 @@ void print_config_group(configuration *user_config, int grp) {
printf("Config: granularity=%d, SDR=%d, ADR=%d, latency=%2.8f, bw=%lf\n",
user_config->granularity, user_config->sdr, user_config->adr, user_config->latency_m, user_config->bw_m);
for(i=0; i<user_config->n_stages; i++) {
printf("Stage %d: PT=%d, T_stage=%lf, bytes=%d\n",
printf("Stage %zu: PT=%d, T_stage=%lf, bytes=%d\n",
i, user_config->stages[i].pt, user_config->stages[i].t_stage, user_config->stages[i].real_bytes);
}
printf("Group %d: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d, parents=%d, children=%d\n",
printf("Group %zu: Iters=%d, Procs=%d, Factors=%f, Dist=%d, AT=%d, SM=%d, SS=%d, parents=%d, children=%d\n",
grp, user_config->groups[grp].iters, user_config->groups[grp].procs, user_config->groups[grp].factor,
user_config->groups[grp].phy_dist, user_config->groups[grp].at, user_config->groups[grp].sm,
user_config->groups[grp].ss, parents, sons);
......@@ -211,8 +211,8 @@ void send_config_file(configuration *config_file, int root, MPI_Comm intercomm)
// Obtener un tipo derivado para enviar las estructuras de fases de iteracion
// con una sola comunicacion
def_struct_groups(&(config_file->groups[0]), (size_t) config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), (size_t) config_file->n_stages, &iter_stage_type);
def_struct_groups(&(config_file->groups[0]), config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file, 1, config_type, root, intercomm);
MPI_Bcast(config_file->groups, config_file->n_resizes, group_type, root, intercomm);
......@@ -247,15 +247,15 @@ void recv_config_file(int root, MPI_Comm intercomm, configuration **config_file_
MPI_Bcast(config_file, 1, config_type, root, intercomm);
//Inicializado de estructuras internas
config_file->groups = malloc(sizeof(group_config_t) * (size_t) config_file->n_resizes);
config_file->stages = malloc(sizeof(iter_stage_t) * (size_t) config_file->n_stages);
config_file->groups = malloc(sizeof(group_config_t) * config_file->n_resizes);
config_file->stages = malloc(sizeof(iter_stage_t) * config_file->n_stages);
malloc_config_resizes(config_file); // Inicializar valores de grupos
malloc_config_stages(config_file); // Inicializar a NULL vectores stage
// Obtener un tipo derivado para enviar los tres vectores
// de enteros con una sola comunicacion
def_struct_groups(&(config_file->groups[0]), (size_t) config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), (size_t) config_file->n_stages, &iter_stage_type);
def_struct_groups(&(config_file->groups[0]), config_file->n_resizes, &group_type);
def_struct_iter_stage(&(config_file->stages[0]), config_file->n_stages, &iter_stage_type);
MPI_Bcast(config_file->groups, config_file->n_resizes, group_type, root, intercomm);
MPI_Bcast(config_file->stages, config_file->n_stages, iter_stage_type, root, intercomm);
......
......@@ -9,8 +9,8 @@
void init_config(char *file_name, configuration **user_config);
void free_config(configuration *user_config);
void print_config(configuration *user_config, int grp);
void print_config_group(configuration *user_config, int grp);
void print_config(configuration *user_config);
void print_config_group(configuration *user_config, size_t grp);
// MPI Intercomm functions
void send_config_file(configuration *config_file, int root, MPI_Comm intercomm);
......
......@@ -81,7 +81,6 @@ double process_stage(configuration config_file, iter_stage_t stage, group_data g
//Computo
case COMP_PI:
for(i=0; i < stage.operations; i++) {
if(i%100 == 0) {printf("Test 0.7");}
result += computePiSerial(config_file.granularity);
}
break;
......@@ -159,12 +158,13 @@ double latency(int myId, int numP, MPI_Comm comm) {
//
// Devuelve el tiempo necesario para realizar las pruebas
double bandwidth(int myId, int numP, MPI_Comm comm, double latency, int n) {
int i, loop_count = 100, n_bytes;
int i, loop_count = 100;
double start_time, stop_time, bw, time;
char *aux;
size_t n_bytes;
n_bytes = ((size_t)n) * sizeof(char);
aux = malloc((size_t)n_bytes);
n_bytes = n * sizeof(char);
aux = malloc(n_bytes);
time = 0;
......@@ -184,7 +184,7 @@ double bandwidth(int myId, int numP, MPI_Comm comm, double latency, int n) {
MPI_Barrier(comm);
stop_time = MPI_Wtime();
time = (stop_time - start_time) / loop_count;
bw = ((double)n_bytes) / (time - latency);
bw = n_bytes / (time - latency);
MPI_Bcast(&bw, 1, MPI_DOUBLE, ROOT, comm);
free(aux);
......@@ -202,7 +202,7 @@ double bandwidth(int myId, int numP, MPI_Comm comm, double latency, int n) {
double init_emulation_comm_time(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
double start_time, time = 0;
stage->array = malloc(sizeof(char) * (size_t)config_file->granularity);
stage->array = malloc(config_file->granularity * sizeof(char));
if(config_file->t_op_comms != 0) {
stage->t_op = config_file->t_op_comms;
return time;
......@@ -222,7 +222,7 @@ double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t
result = 0;
t_stage = stage->t_stage * config_file->groups[group.grp].factor;
initMatrix(&(stage->double_array), (size_t) config_file->granularity);
initMatrix(&(stage->double_array), config_file->granularity);
if(compute) {
start_time = MPI_Wtime();
......@@ -232,7 +232,7 @@ double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t
}
MPI_Bcast(&(stage->t_op), 1, MPI_DOUBLE, ROOT, comm);
}
stage->operations = (int) ceil(t_stage / stage->t_op);
stage->operations = ceil(t_stage / stage->t_op);
return result;
}
......@@ -250,7 +250,7 @@ double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *st
}
MPI_Bcast(&(stage->t_op), 1, MPI_DOUBLE, ROOT, comm);
}
stage->operations = (int) ceil(t_stage / stage->t_op);
stage->operations = ceil(t_stage / stage->t_op);
return result;
}
......@@ -265,7 +265,7 @@ void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_
init_emulation_comm_time(group, config_file, stage, comm);
}
stage->real_bytes = aux_bytes;
stage->array = malloc(sizeof(char) * (size_t)stage->real_bytes);
stage->array = malloc(stage->real_bytes * sizeof(char));
}
double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
......@@ -275,7 +275,7 @@ double init_comm_bcast_pt(group_data group, configuration *config_file, iter_sta
if(stage->bytes != 0) {
stage->real_bytes = stage->bytes;
stage->array = malloc(sizeof(char) * (size_t)stage->real_bytes);
stage->array = malloc(stage->real_bytes * sizeof(char));
} else { // Prepare to emulate Collective as PtoP
time = init_emulation_comm_time(group, config_file, stage, comm);
}
......@@ -301,8 +301,8 @@ double init_comm_allgatherv_pt(group_data group, configuration *config_file, ite
get_block_dist(stage->real_bytes, group.myId, group.numP, &dist_data);
stage->my_bytes = dist_data.tamBl;
stage->array = malloc(sizeof(char) * (size_t)stage->my_bytes);
stage->full_array = malloc(sizeof(char) * (size_t)stage->real_bytes);
stage->array = malloc(stage->my_bytes * sizeof(char));
stage->full_array = malloc(stage->real_bytes * sizeof(char));
} else {
time = init_emulation_comm_time(group, config_file, stage, comm);
}
......@@ -319,9 +319,9 @@ double init_comm_reduce_pt(group_data group, configuration *config_file, iter_st
stage->real_bytes = stage->bytes;
if(stage->bytes != 0) {
stage->array = malloc(sizeof(char) * (size_t)stage->real_bytes);
stage->array = malloc(stage->real_bytes * sizeof(char));
//Full array para el reduce necesita el mismo tamanyo
stage->full_array = malloc(sizeof(char) * (size_t)stage->real_bytes);
stage->full_array = malloc(stage->real_bytes * sizeof(char));
} else {
init_emulation_comm_time(group, config_file, stage, comm);
}
......
CC = gcc
MCC = mpicc
C_FLAGS_ALL = -Wfatal-errors -Wall -Wextra -Wpedantic -Wconversion -Wshadow
C_FLAGS = -Wall
#C_FLAGS_ALL = -Wconversion -Wpedantic
C_FLAGS = -Wall -Wextra -Wshadow -Wfatal-errors
LD_FLAGS = -lm -pthread
DEF =
......@@ -29,7 +29,7 @@ $(BIN) : $(BUILD_DIR)/$(BIN)
# Actual target of the binary - depends on all .o files.
$(BUILD_DIR)/$(BIN) : $(OBJ)
$(MCC) $(C_FLAGS_ALL) $^ -o $@ $(LD_FLAGS)
$(MCC) $(C_FLAGS) $^ -o $@ $(LD_FLAGS)
# Include all .d files
# .d files are used for knowing the dependencies of each source file
......@@ -42,7 +42,7 @@ $(BUILD_DIR)/$(BIN) : $(OBJ)
# the same name as the .o file.
$(BUILD_DIR)/%.o : %.c
mkdir -p $(@D)
$(MCC) $(C_FLAGS_ALL) $(DEF) -MMD -c $< -o $@
$(MCC) $(C_FLAGS) $(DEF) -MMD -c $< -o $@
clean:
-rm $(BUILD_DIR)/$(BIN) $(OBJ) $(DEP)
......
......@@ -24,7 +24,7 @@ void malloc_comm_array(char **array, int qty, int myId, int numP) {
struct Dist_data dist_data;
get_block_dist(qty, myId, numP, &dist_data);
if( (*array = malloc( (size_t) dist_data.tamBl * sizeof(char))) == NULL) {
if( (*array = malloc(dist_data.tamBl * sizeof(char))) == NULL) {
printf("Memory Error (Malloc Arrays(%d))\n", dist_data.tamBl);
exit(1);
}
......@@ -60,7 +60,7 @@ int send_sync(char *array, int qty, int myId, int numP, MPI_Comm intercomm, int
dist_data.intercomm = intercomm;
// Create arrays which contains info about how many elements will be send to each created process
mallocCounts(&counts, (size_t)numP_child);
mallocCounts(&counts, numP_child);
getIds_intercomm(dist_data, numP_child, &idS); // Obtener rango de Id hijos a los que este proceso manda datos
......@@ -87,12 +87,12 @@ void recv_sync(char **array, int qty, int myId, int numP, MPI_Comm intercomm, in
// Obtener distribución para este hijo
get_block_dist(qty, myId, numP, &dist_data);
*array = malloc( (size_t)dist_data.tamBl * sizeof(char));
*array = malloc(dist_data.tamBl * sizeof(char));
//(*array)[dist_data.tamBl] = '\0';
dist_data.intercomm = intercomm;
/* PREPARAR DATOS DE RECEPCION SOBRE VECTOR*/
mallocCounts(&counts, (size_t)numP_parents);
mallocCounts(&counts, numP_parents);
getIds_intercomm(dist_data, numP_parents, &idS); // Obtener el rango de Ids de padres del que este proceso recibira datos
......@@ -167,7 +167,7 @@ int send_async(char *array, int qty, int myId, int numP, MPI_Comm intercomm, int
dist_data.intercomm = intercomm;
// Create arrays which contains info about how many elements will be send to each created process
mallocCounts(&counts, (size_t)numP_child);
mallocCounts(&counts, numP_child);
getIds_intercomm(dist_data, numP_child, &idS); // Obtener rango de Id hijos a los que este proceso manda datos
......@@ -217,17 +217,17 @@ void recv_async(char **array, int qty, int myId, int numP, MPI_Comm intercomm, i
// Obtener distribución para este hijo
get_block_dist(qty, myId, numP, &dist_data);
*array = malloc( (size_t)dist_data.tamBl * sizeof(char));
*array = malloc( dist_data.tamBl * sizeof(char));
dist_data.intercomm = intercomm;
/* PREPARAR DATOS DE RECEPCION SOBRE VECTOR*/
mallocCounts(&counts, (size_t)numP_parents);
mallocCounts(&counts, numP_parents);
getIds_intercomm(dist_data, numP_parents, &idS); // Obtener el rango de Ids de padres del que este proceso recibira datos
// MAL_USE_THREAD sigue el camino sincrono
if(parents_wait == MAL_USE_POINT) {
comm_req = (MPI_Request *) malloc((size_t)numP_parents * sizeof(MPI_Request));
comm_req = (MPI_Request *) malloc(numP_parents * sizeof(MPI_Request));
for(i=0; i<numP_parents; i++){
comm_req[i] = MPI_REQUEST_NULL;
}
......
......@@ -18,7 +18,7 @@ void prepare_comm_alltoall(int myId, int numP, int numP_other, int n, struct Cou
struct Dist_data dist_data;
get_block_dist(n, myId, numP, &dist_data);
mallocCounts(counts, (size_t)numP_other);
mallocCounts(counts, numP_other);
get_util_ids(dist_data, numP_other, &idS);
if(idS[0] == 0) {
......@@ -42,7 +42,7 @@ void prepare_comm_allgatherv(int numP, int n, struct Counts *counts) {
int i;
struct Dist_data dist_data;
mallocCounts(counts, (size_t)numP);
mallocCounts(counts, numP);
get_block_dist(n, 0, numP, &dist_data);
counts->counts[0] = dist_data.tamBl;
......
......@@ -311,7 +311,7 @@ void malleability_add_data(void *data, size_t total_qty, int type, int is_replic
} else if(mall_conf->comm_type == MAL_USE_IBARRIER) {
total_reqs = 2;
} else if(mall_conf->comm_type == MAL_USE_POINT) {
total_reqs = (size_t) mall->numC;
total_reqs = mall->numC;
}
add_data(data, total_qty, type, total_reqs, dist_a_data);
......@@ -387,12 +387,12 @@ void send_data(int numP_children, malleability_data_t *data_struct, int is_async
if(is_asynchronous) {
for(i=0; i < data_struct->entries; i++) {
aux = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
send_async(aux, (int) data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_children, data_struct->requests, mall_conf->comm_type);
send_async(aux, data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_children, data_struct->requests, mall_conf->comm_type);
}
} else {
for(i=0; i < data_struct->entries; i++) {
aux = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
send_sync(aux, (int) data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_children);
send_sync(aux, data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_children);
}
}
}
......@@ -409,13 +409,13 @@ void recv_data(int numP_parents, malleability_data_t *data_struct, int is_asynch
if(is_asynchronous) {
for(i=0; i < data_struct->entries; i++) {
aux = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
recv_async(&aux, (int) data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_parents, mall_conf->comm_type);
recv_async(&aux, data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_parents, mall_conf->comm_type);
data_struct->arrays[i] = (void *) aux;
}
} else {
for(i=0; i < data_struct->entries; i++) {
aux = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
recv_sync(&aux, (int) data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_parents);
recv_sync(&aux, data_struct->qty[i], mall->myId, mall->numP, mall->intercomm, numP_parents);
data_struct->arrays[i] = (void *) aux;
}
}
......@@ -445,7 +445,7 @@ void Children_init() {
recv_config_file(mall->root, mall->intercomm, &(mall_conf->config_file));
mall_conf->results = (results_data *) malloc(sizeof(results_data));
init_results_data(mall_conf->results, (size_t) mall_conf->config_file->n_resizes, (size_t) mall_conf->config_file->n_stages, RESULTS_INIT_DATA_QTY);
init_results_data(mall_conf->results, mall_conf->config_file->n_resizes, mall_conf->config_file->n_stages, RESULTS_INIT_DATA_QTY);
if(dist_a_data->entries || rep_a_data->entries) { // Recibir datos asincronos
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_CHILDREN, mall->myId, root_parents, mall->intercomm);
......@@ -474,7 +474,7 @@ void Children_init() {
} else {
datatype = MPI_CHAR;
}
MPI_Bcast(rep_s_data->arrays[i], (int) rep_s_data->qty[i], datatype, root_parents, mall->intercomm);
MPI_Bcast(rep_s_data->arrays[i], rep_s_data->qty[i], datatype, root_parents, mall->intercomm);
}
}
......@@ -652,7 +652,7 @@ int end_redistribution() {
} else {
datatype = MPI_CHAR;
}
MPI_Bcast(rep_s_data->arrays[i], (int) rep_s_data->qty[i], datatype, rootBcast, mall->intercomm);
MPI_Bcast(rep_s_data->arrays[i], rep_s_data->qty[i], datatype, rootBcast, mall->intercomm);
}
}
......
......@@ -66,8 +66,8 @@ void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *d
MPI_Bcast(MPI_BOTTOM, 1, entries_type, rootBcast, intercomm);
if(is_children_group) {
if(data_struct_rep->entries != (size_t) 0) init_malleability_data_struct(data_struct_rep, data_struct_rep->entries);
if(data_struct_dist->entries != (size_t) 0) init_malleability_data_struct(data_struct_dist, data_struct_dist->entries);
if(data_struct_rep->entries != 0) init_malleability_data_struct(data_struct_rep, data_struct_rep->entries);
if(data_struct_dist->entries != 0) init_malleability_data_struct(data_struct_dist, data_struct_dist->entries);
}
def_malleability_qty_type(data_struct_dist, data_struct_rep, &struct_type);
......@@ -197,8 +197,8 @@ void def_malleability_qty_type(malleability_data_t *data_struct_rep, malleabilit
MPI_Datatype types[counts];
types[0] = types[1] = types[2] = types[3] = MPI_INT;
blocklengths[0] = blocklengths[1] = (int)data_struct_rep->entries;
blocklengths[2] = blocklengths[3] = (int)data_struct_dist->entries;
blocklengths[0] = blocklengths[1] = data_struct_rep->entries;
blocklengths[2] = blocklengths[3] = data_struct_dist->entries;
MPI_Get_address((data_struct_rep->qty), &displs[0]);
MPI_Get_address((data_struct_rep->types), &displs[1]);
......
......@@ -19,8 +19,8 @@ void gestor_usr2() {}
void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int root, void *results_void) {
int pid = getpid();
int *pids_counts = malloc((size_t)numP * sizeof(int));
int *pids_displs = malloc((size_t)numP * sizeof(int));
int *pids_counts = malloc(numP * sizeof(int));
int *pids_displs = malloc(numP * sizeof(int));
int i, count=1;
if(myId < numC) {
......
......@@ -84,7 +84,7 @@ void processes_dist(struct physical_dist dist, MPI_Info *info_spawn) {
node_dist(dist, &procs_array, &used_nodes);
switch(dist.info_type) {
case MALL_DIST_STRING:
generate_info_string_slurm(dist.nodelist, procs_array, (size_t) used_nodes, info_spawn);
generate_info_string_slurm(dist.nodelist, procs_array, used_nodes, info_spawn);
break;
case MALL_DIST_HOSTFILE:
generate_info_hostfile_slurm(dist.nodelist, procs_array, used_nodes, info_spawn);
......@@ -114,7 +114,7 @@ void processes_dist(struct physical_dist dist, MPI_Info *info_spawn) {
void node_dist(struct physical_dist dist, int **qty, int *used_nodes) {
int i, *procs;
procs = calloc((size_t)dist.num_nodes, sizeof(int)); // Numero de procesos por nodo
procs = calloc(dist.num_nodes, sizeof(int)); // Numero de procesos por nodo
/* GET NEW DISTRIBUTION */
switch(dist.dist_type) {
......@@ -127,7 +127,7 @@ void node_dist(struct physical_dist dist, int **qty, int *used_nodes) {
}
//Copy results to output vector qty
*qty = calloc((size_t)*used_nodes, sizeof(int)); // Numero de procesos por nodo
*qty = calloc(*used_nodes, sizeof(int)); // Numero de procesos por nodo
for(i=0; i< *used_nodes; i++) {
(*qty)[i] = procs[i];
}
......@@ -215,7 +215,7 @@ void generate_info_string(int target_qty, MPI_Info *info){
char *host_string, host[9] = "localhost";
// CREATE AND SET STRING HOSTS
write_str_node(&host_string, 0, (size_t)target_qty, host);
write_str_node(&host_string, 0, target_qty, host);
// SET MAPPING
MPI_Info_create(info);
MPI_Info_set(*info, "hosts", host_string);
......@@ -252,7 +252,7 @@ void fill_str_hosts_slurm(char *nodelist, int *qty, size_t used_nodes, char **ho
hostlist = slurm_hostlist_create(nodelist);
while ( (host = slurm_hostlist_shift(hostlist)) && i < used_nodes) {
if(qty[i] != 0) {
len = (size_t) write_str_node(hostfile_str, len, (size_t)qty[i], host);
len = write_str_node(hostfile_str, len, qty[i], host);
}
i++;
free(host);
......@@ -390,7 +390,7 @@ int write_hostfile_node(int ptr, int qty, char *node_name) {
len_node = strlen(node_name);
err = snprintf(NULL, 0, "%d", qty);
if(err < 0) return -1;
len_int = (size_t) err;
len_int = err;
len = len_node + len_int + 3;
line = malloc(len * sizeof(char));
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment