Commit 3bbc423c authored by iker_martin's avatar iker_martin
Browse files

Arreglado bug para AT=0. Hay que ampliar el uso de AT

parent f0f94137
......@@ -182,7 +182,6 @@ int checkpoint(int iter, int state, MPI_Request **comm_req) {
state = check_redistribution(iter, comm_req);
}
printf("P%d/%d Malleability END state=%d\n", group->myId, group->numP, state);
}
return state;
......@@ -320,14 +319,19 @@ int check_redistribution(int iter, MPI_Request **comm_req) {
MPI_Abort(MPI_COMM_WORLD, test_err);
}
//MPI_Wait(req_completed, MPI_STATUS_IGNORE); //TODO BORRAR??
//int delete_me;
//MPI_Allreduce(&completed, &delete_me, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); //TODO BORRAR
//if(group->myId == ROOT) {printf("Suma=%d para %d procs\n", delete_me, group->numP); fflush(stdout);}
MPI_Allreduce(&completed, &all_completed, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
if(!all_completed) return MAL_ASYNC_PENDING; // Continue only if asynchronous send has ended
//MPI_Wait(req_completed, MPI_STATUS_IGNORE); TODO BORRAR??
if(config_file->aib == MAL_USE_IBARRIER) {
MPI_Wait(&(*comm_req)[0], MPI_STATUS_IGNORE); // Indicar como completado el envio asincrono
//Para la desconexión de ambos grupos de procesos es necesario indicar a MPI que esta
//Para la desconexión de ambos grupos de procesos es necesario indicar a MPI que esta comm
//ha terminado, aunque solo se pueda llegar a este punto cuando ha terminado
}
free(*comm_req);
......@@ -382,7 +386,12 @@ void Sons_init() {
group->compute_comm_array = malloc(config_file->comm_tam * sizeof(char));
}
if(config_file->adr) { // Recibir datos asincronos
recv_sync(&(group->async_array), config_file->adr, group->myId, group->numP, ROOT, group->parents, numP_parents);
if(config_file->aib == MAL_USE_NORMAL || config_file->aib == MAL_USE_IBARRIER || config_file->aib == MAL_USE_POINT) {
recv_async(&(group->async_array), config_file->adr, group->myId, group->numP, ROOT, group->parents, numP_parents, config_file->aib);
} else if (config_file->aib == MAL_USE_THREAD) {
recv_sync(&(group->async_array), config_file->adr, group->myId, group->numP, ROOT, group->parents, numP_parents);
}
results->async_time[group->grp] = MPI_Wtime();
MPI_Bcast(&(group->iter_start), 1, MPI_INT, ROOT, group->parents);
}
......@@ -435,7 +444,6 @@ void iterate(double *matrix, int n, int async_comm) {
}
if(config_file->comm_tam) {
printf("P%d/%d Bcast\n", group->myId, group->numP);
MPI_Bcast(group->compute_comm_array, config_file->comm_tam, MPI_CHAR, ROOT, MPI_COMM_WORLD);
}
......
......@@ -66,7 +66,7 @@ void malloc_comm_array(char **array, int qty, int myId, int numP) {
//================================================================================
//================================================================================
//========================SINCHRONOUS FUNCTIONS===================================
//========================SYNCHRONOUS FUNCTIONS===================================
//================================================================================
//================================================================================
......@@ -228,6 +228,7 @@ int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm inte
(*comm_req)[i] = MPI_REQUEST_NULL;
}
send_async_point_arrays(dist_data, array, rootBcast, numP_child, idS[0], idS[1], counts, *comm_req);
} else if (parents_wait == MAL_USE_THREAD) { //TODO
}
freeCounts(&counts);
......@@ -272,18 +273,19 @@ void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm in
recv_async_point_arrays(dist_data, *array, root, numP_parents, idS[0], idS[1], counts, comm_req);
wait_err = MPI_Waitall(numP_parents, comm_req, MPI_STATUSES_IGNORE);
} else {
} else if (parents_wait == MAL_USE_NORMAL || parents_wait == MAL_USE_IBARRIER) {
comm_req = (MPI_Request *) malloc(sizeof(MPI_Request));
*comm_req = MPI_REQUEST_NULL;
recv_async_arrays(dist_data, *array, root, numP_parents, idS[0], idS[1], counts, comm_req);
wait_err = MPI_Wait(comm_req, MPI_STATUS_IGNORE);
} else if (parents_wait == MAL_USE_THREAD) { //TODO
}
if(wait_err != MPI_SUCCESS) {
MPI_Abort(MPI_COMM_WORLD, wait_err);
}
if(parents_wait == MAL_USE_IBARRIER) {
if(parents_wait == MAL_USE_IBARRIER) { //MAL USE IBARRIER END
MPI_Ibarrier(intercomm, &aux);
MPI_Wait(&aux, MPI_STATUS_IGNORE); //Es necesario comprobar que la comunicación ha terminado para desconectar los grupos de procesos
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment