Commit 21cd964f authored by iker_martin's avatar iker_martin
Browse files

Modified function to save iteration data. Now it saves median of times between...

Modified function to save iteration data. Now it saves median of times between processes for each iteration. Fixed a bug where stage times where not saved correctly
parent ea61a4d5
...@@ -102,7 +102,13 @@ void reset_results_index(results_data *results) { ...@@ -102,7 +102,13 @@ void reset_results_index(results_data *results) {
results->iter_index = 0; results->iter_index = 0;
} }
//=============================================================== FIXME BORRAR?
int compare(const void *_a, const void *_b) {
double *a, *b;
a = (double *) _a;
b = (double *) _b;
return (*a - *b);
}
/* /*
* Obtiene para cada iteracion, el tiempo maximo entre todos los procesos * Obtiene para cada iteracion, el tiempo maximo entre todos los procesos
* que han participado. * que han participado.
...@@ -112,12 +118,32 @@ void reset_results_index(results_data *results) { ...@@ -112,12 +118,32 @@ void reset_results_index(results_data *results) {
*/ */
void compute_results_iter(results_data *results, int myId, int numP, int root, MPI_Comm comm) { //TODO Probar a quedarse la MEDIA en vez de MAX? void compute_results_iter(results_data *results, int myId, int numP, int root, MPI_Comm comm) { //TODO Probar a quedarse la MEDIA en vez de MAX?
if(myId == root) { if(myId == root) {
MPI_Reduce(MPI_IN_PLACE, results->iters_time, results->iter_index, MPI_DOUBLE, MPI_SUM, root, comm); /*MPI_Reduce(MPI_IN_PLACE, results->iters_time, results->iter_index, MPI_DOUBLE, MPI_SUM, root, comm);
for(size_t i=0; i<results->iter_index; i++) { for(size_t i=0; i<results->iter_index; i++) {
results->iters_time[i] = results->iters_time[i] / numP; results->iters_time[i] = results->iters_time[i] / numP;
} }*/
} else { } else {
MPI_Reduce(results->iters_time, NULL, results->iter_index, MPI_DOUBLE, MPI_SUM, root, comm); //MPI_Reduce(results->iters_time, NULL, results->iter_index, MPI_DOUBLE, MPI_SUM, root, comm);
}
double *aux_all_iters, *aux_id_iters, median;
if(myId == root) {
aux_all_iters = malloc(numP *results->iter_index * sizeof(double));
}
MPI_Gather(results->iters_time, results->iter_index, MPI_DOUBLE, aux_all_iters, results->iter_index, MPI_DOUBLE, root, comm);
if(myId == root) {
aux_id_iters = malloc(numP * sizeof(double));
for(size_t i=0; i<results->iter_index; i++) {
for(int j=0; j<numP; j++) {
aux_id_iters[j] = aux_all_iters[i+(results->iter_index*j)];
}
// Get Median
qsort(aux_id_iters, results->iter_index, sizeof(double), &compare);
median = aux_id_iters[numP/2];
if (numP % 2 == 0) median = (aux_id_iters[numP/2 - 1] + aux_id_iters[numP/2]) / 2;
results->iters_time[i] = median;
}
free(aux_all_iters);
free(aux_id_iters);
} }
} }
......
...@@ -141,6 +141,11 @@ int main(int argc, char *argv[]) { ...@@ -141,6 +141,11 @@ int main(int argc, char *argv[]) {
MPI_Comm_size(comm, &(group->numP)); MPI_Comm_size(comm, &(group->numP));
MPI_Comm_rank(comm, &(group->myId)); MPI_Comm_rank(comm, &(group->myId));
if(res==1) { // Se ha llegado al final de la aplicacion
MPI_Barrier(comm); // TODO Posible error al utilizar SHRINK
results->exec_time = MPI_Wtime() - results->exec_start - results->wasted_time;
}
print_local_results(); print_local_results();
reset_results_index(results); reset_results_index(results);
} while(config_file->n_resizes > group->grp + 1 && config_file->groups[group->grp].sm == MALL_SPAWN_MERGE); } while(config_file->n_resizes > group->grp + 1 && config_file->groups[group->grp].sm == MALL_SPAWN_MERGE);
...@@ -148,12 +153,6 @@ int main(int argc, char *argv[]) { ...@@ -148,12 +153,6 @@ int main(int argc, char *argv[]) {
// //
// TERMINA LA EJECUCION ---------------------------------------------------------- // TERMINA LA EJECUCION ----------------------------------------------------------
// //
if(res==1) { // Se ha llegado al final de la aplicacion
MPI_Barrier(comm); // TODO Posible error al utilizar SHRINK
results->exec_time = MPI_Wtime() - results->exec_start - results->wasted_time;
}
print_final_results(); // Pasado este punto ya no pueden escribir los procesos print_final_results(); // Pasado este punto ya no pueden escribir los procesos
if(comm != MPI_COMM_WORLD && comm != MPI_COMM_NULL) { if(comm != MPI_COMM_WORLD && comm != MPI_COMM_NULL) {
...@@ -337,6 +336,7 @@ int print_local_results() { ...@@ -337,6 +336,7 @@ int print_local_results() {
char *file_name; char *file_name;
compute_results_iter(results, group->myId, group->numP, ROOT, comm); compute_results_iter(results, group->myId, group->numP, ROOT, comm);
compute_results_stages(results, group->myId, group->numP, config_file->n_stages, ROOT, comm);
if(group->myId == ROOT) { if(group->myId == ROOT) {
ptr_out = dup(1); ptr_out = dup(1);
......
...@@ -697,7 +697,7 @@ int shrink_redistribution() { ...@@ -697,7 +697,7 @@ int shrink_redistribution() {
double time_extra = MPI_Wtime(); double time_extra = MPI_Wtime();
//TODO REFACTOR -- Que solo la llamada de collect iters este fuera de los hilos //TODO REFACTOR -- Que solo la llamada de collect iters este fuera de los hilos
zombies_collect_suspended(mall->comm, mall->myId, mall->numP, mall->numC, mall->root, (void *) mall_conf->results); zombies_collect_suspended(mall->comm, mall->myId, mall->numP, mall->numC, mall->root, (void *) mall_conf->results, mall_conf->config_file->n_stages);
if(mall->myId < mall->numC) { if(mall->myId < mall->numC) {
if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->thread_comm)); if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->thread_comm));
......
...@@ -17,7 +17,7 @@ int offset_pids, *pids = NULL; ...@@ -17,7 +17,7 @@ int offset_pids, *pids = NULL;
void gestor_usr2() {} void gestor_usr2() {}
void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int root, void *results_void) { void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int root, void *results_void, int n_stages) {
int pid = getpid(); int pid = getpid();
int *pids_counts = malloc(numP * sizeof(int)); int *pids_counts = malloc(numP * sizeof(int));
int *pids_displs = malloc(numP * sizeof(int)); int *pids_displs = malloc(numP * sizeof(int));
...@@ -44,6 +44,7 @@ void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int ...@@ -44,6 +44,7 @@ void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int
// Needed to ensure iteration times are collected before suspending these processes // Needed to ensure iteration times are collected before suspending these processes
results_data *results = (results_data *) results_void; results_data *results = (results_data *) results_void;
compute_results_iter(results, myId, numP,root, comm); compute_results_iter(results, myId, numP,root, comm);
compute_results_stages(results, myId, numP, n_stages, root, comm);
if(myId >= numC) { if(myId >= numC) {
zombies_suspend(); zombies_suspend();
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <mpi.h> #include <mpi.h>
#include <signal.h> #include <signal.h>
void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int root, void *results_void); void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int root, void *results_void, int n_stages);
void zombies_service_init(); void zombies_service_init();
void zombies_service_free(); void zombies_service_free();
void zombies_awake(); void zombies_awake();
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment