Commit 9d374098 authored by iker_martin's avatar iker_martin
Browse files

Anadida nueva funcion de computo, PI, y las iteraciones son a cantidad de...

Anadida nueva funcion de computo, PI, y las iteraciones son a cantidad de operaciones en ambos casos
parent 755b4c39
......@@ -245,14 +245,15 @@ configuration *recv_config_file(int root, MPI_Comm intercomm) {
* de la estructura de configuracion con una sola comunicacion.
*/
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type) {
int i, counts = 7;
int blocklengths[7] = {1, 1, 1, 1, 1, 1, 1};
int i, counts = 8;
int blocklengths[8] = {1, 1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype types[counts];
// Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = MPI_INT;
types[6] = MPI_FLOAT;
types[7] = MPI_DOUBLE;
// Rellenar vector displs
MPI_Get_address(config_file, &dir);
......@@ -264,6 +265,7 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
MPI_Get_address(&(config_file->adr), &displs[4]);
MPI_Get_address(&(config_file->aib), &displs[5]);
MPI_Get_address(&(config_file->general_time), &displs[6]);
MPI_Get_address(&(config_file->Top), &displs[7]);
for(i=0;i<counts;i++) displs[i] -= dir;
......
......@@ -10,6 +10,7 @@ typedef struct
int matrix_tam, sdr, adr;
int aib;
float general_time;
double Top;
int *iters, *procs, *phy_dist;
float *factors;
......
......@@ -20,6 +20,7 @@ int check_redistribution(int iter, MPI_Request **comm_req);
void iterate(double *matrix, int n, int async_comm);
void computeMatrix(double *matrix, int n);
double computePiSerial(int n);
void initMatrix(double **matrix, int n);
void init_group_struct(char *argv[], int argc, int myId, int numP);
......@@ -97,7 +98,7 @@ int work() {
MPI_Request *async_comm;
maxiter = config_file->iters[group->grp];
initMatrix(&matrix, config_file->matrix_tam);
//initMatrix(&matrix, config_file->matrix_tam);
state = MAL_COMM_UNINITIALIZED;
res = 0;
......@@ -146,7 +147,7 @@ int checkpoint(int iter, int state, MPI_Request **comm_req) {
state = start_redistribution(numS, comm_req);
} else if(MAL_ASYNC_PENDING) {
} else if(state == MAL_ASYNC_PENDING) {
state = check_redistribution(iter, comm_req);
}
......@@ -236,7 +237,6 @@ int check_redistribution(int iter, MPI_Request **comm_req) {
req_completed = &(*comm_req)[1];
}
test_err = MPI_Test(req_completed, &completed, MPI_STATUS_IGNORE);
if (test_err != MPI_SUCCESS && test_err != MPI_ERR_PENDING) {
printf("P%d aborting -- Test Async\n", group->myId);
......@@ -244,11 +244,15 @@ int check_redistribution(int iter, MPI_Request **comm_req) {
}
MPI_Allreduce(&completed, &all_completed, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
if(!all_completed) return MAL_ASYNC_PENDING; // Continue only if asynchronous send has ended
//MPI_Wait(req_completed, MPI_STATUS_IGNORE);
if(config_file->aib == MAL_USE_IBARRIER) {
MPI_Wait(&(*comm_req)[0], MPI_STATUS_IGNORE); // Indicar como completado el envio asincrono
//Para la desconexión de ambos grupos de procesos es necesario indicar a MPI que esta
//ha terminado, aunque solo se pueda llegar a este punto cuando ha terminado
}
iter_send = iter;
......@@ -324,39 +328,54 @@ void Sons_init() {
void iterate(double *matrix, int n, int async_comm) {
double start_time, actual_time;
double time = config_file->general_time * config_file->factors[group->grp];
double Top = config_file->Top;
int i, operations = 0;
double aux = 0;
start_time = actual_time = MPI_Wtime();
operations = time / Top;
for(i=0; i < operations; i++) {
aux += computePiSerial(n);
}
actual_time = MPI_Wtime(); // Guardar tiempos
if(async_comm == MAL_ASYNC_PENDING) { // Se esta realizando una redistribucion de datos asincrona
operations=0;
}
results->iters_time[results->iter_index] = actual_time - start_time;
results->iters_type[results->iter_index] = operations;
results->iter_index = results->iter_index + 1;
}
/*
if(async_comm == MAL_ASYNC_PENDING) { // Se esta realizando una redistribucion de datos asincrona
operations = results->iters_type[config_file->iters[group->grp] - 1];
for (i=0; i<operations; i++) {
computeMatrix(matrix, n);
//computeMatrix(matrix, n);
computePi(n);
}
actual_time = MPI_Wtime(); // Guardar tiempos
operations = 0;
} else { // No hay redistribucion de datos actualmente
while (actual_time - start_time < time) {
computeMatrix(matrix, n);
//computeMatrix(matrix, n);
computePi(n);
operations++;
actual_time = MPI_Wtime(); // Guardar tiempos
}
}
results->iters_time[results->iter_index] = actual_time - start_time;
results->iters_type[results->iter_index] = operations;
results->iter_index = results->iter_index + 1;
}
*/
/*
* Realiza una multiplicación de matrices de tamaño n
*/
void computeMatrix(double *matrix, int n) {
int row, col, i, aux;
int row, col, i;
double aux;
for(row=0; i<n; row++) {
/* COMPUTE */
for(col=0; col<n; col++) {
for(col=0; i<n; col++) {
for(row=0; row<n; row++) {
aux=0;
for(i=0; i<n; i++) {
aux += matrix[row*n + i] * matrix[i*n + col];
......@@ -365,6 +384,23 @@ void computeMatrix(double *matrix, int n) {
}
}
double computePiSerial(int n) {
int i;
double h, sum, x, pi;
h = 1.0 / (double) n; //wide of the rectangle
sum = 0.0;
for (i = 0; i < n; i++) {
x = h * ((double)i + 0.5); //height of the rectangle
sum += 4.0 / (1.0 + x*x);
}
return pi = h * sum;
//MPI_Reduce(&sum, &res, 1, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
}
/*
* Init matrix
*/
......@@ -489,6 +525,19 @@ void init_application() {
if(config_file->adr > 0) {
malloc_comm_array(&(group->async_array), config_file->adr , group->myId, group->numP);
}
double result, start_time = MPI_Wtime();
int i;
result = 0;
for(i=0; i<10000; i++) {
result += computePiSerial(config_file->matrix_tam);
}
printf("Creado Top con valor %lf\n", result);
fflush(stdout);
config_file->Top = (MPI_Wtime() - start_time) / 10000; //Tiempo de una iteracion
MPI_Bcast(&(config_file->Top), 1, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
}
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment