Commit 95f9a9a6 authored by iker_martin's avatar iker_martin
Browse files

First commit

parent 8dc4bd2b
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mkl_blas.h>
#include <mkl_spblas.h>
#include "ScalarVectors.h"
#include "SparseMatrices.h"
#include <mpi.h>
#include <string.h>
#include "../malleability/malleabilityManager.h"
//#define ONLY_SYM 0
#define ROOT 0
//#define DEBUG 0
#define MAX_PROCS_SET 16
typedef struct {
double umbral, tol;
int iter, maxiter, n;
double beta, rho, alpha;
double *res, *z, *d, *vec;
SparseMatrix subm;
double *d_full;
int *dist_elem, *displs_elem;
int *dist_rows, *displs_rows;
int *vlen;
} Compute_data;
struct Dist_data {
int ini;
int fin;
int tamBl; // Numero de filas
int tot_r; // Total de filas en la matriz
int myId;
int numP;
int numP_parents;
MPI_Comm comm, comm_children, comm_parents;
MPI_Datatype scalars, arrays;
};
void init_app(Compute_data *computeData, struct Dist_data *dist_data, char* argv[]);
void get_mat_dist(Compute_data *computeData, struct Dist_data dist_data, SparseMatrix mat);
void get_rows_dist(Compute_data *computeData, int numP, int n);
void mat_alloc(Compute_data *computeData, SparseMatrix mat, struct Dist_data dist_data);
void computeSolution(Compute_data computeData, double **subsol, SparseMatrix mat, int myId, double **full_vec);
void pre_compute(Compute_data *computeData, struct Dist_data dist_data, double *subsol, double *full_vec);
int compute(Compute_data *computeData, struct Dist_data *dist_data, char *argv[]);
void free_computeData(Compute_data *computeData);
//===================================MALLEABILITY FUNCTIONS====================================================
int n_check = 30;
int dist_old(struct Dist_data *dist_data, Compute_data *computeData, char *argv[], int type_dist);
void send_matrix(struct Dist_data dist_data, Compute_data computeData, int rootBcast, int numP_child, int idI, int idE,
int *sendcounts, int *recvcounts,int *sdispls, int *rdispls);
void dist_new(struct Dist_data *dist_data, Compute_data *computeData);
void recv_matrix(struct Dist_data *dist_data, Compute_data *computeData, int idI, int idE,
int *sendcounts, int *recvcounts,int *sdispls, int *rdispls);
//----------------------------------------------------------------------------------------------------
void get_dist(int total_r, int id, int numP, struct Dist_data *dist_data);
void set_counts(int id, int numP, struct Dist_data data_dist, int *sendcounts);
void getIds_intercomm(struct Dist_data dist_data, int numP_other, int **idS);
//----------------------------------------------------------------------------------------------------
int main (int argc, char *argv[]) {
int terminate;
int num_nodes, num_cpus = 20;
char *nodelist = NULL;
Compute_data computeData;
computeData.vec = NULL; computeData.res = NULL;
computeData.dist_elem = NULL; computeData.displs_elem = NULL;
computeData.dist_rows = NULL; computeData.displs_rows = NULL;
int numP, myId, num_children = 0;
struct Dist_data dist_data;
if (argc >= 5) {
num_children = atoi(argv[2]);
nodelist = argv[3];
num_nodes = atoi(argv[4]);
num_cpus = num_nodes * num_cpus;
}
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numP);
MPI_Comm_rank(MPI_COMM_WORLD, &myId);
dist_data.myId = myId;
dist_data.numP = numP;
dist_data.comm = MPI_COMM_WORLD;
int new_group = init_malleability(myId, numP, ROOT, dist_data.comm, argv[0], nodelist, num_cpus, num_nodes);
if( !new_group ) { //First set of processes
init_app(&computeData, &dist_data, argv);
} else {
dist_new(&dist_data, &computeData);
}
// Esta variable se envia a los hijos para que conozcan cuantos padres tienen
dist_data.numP_parents = numP;
terminate = compute(&computeData, &dist_data, argv);
if(myId == ROOT && terminate) {
printf ("End(%d) --> (%d,%20.10e)\n", computeData.n, computeData.iter, computeData.tol);
}
// End of CG
free_malleability();
free_computeData(&computeData);
MPI_Finalize();
}
/*
* Init application data before
* starting iterative computation
*/
void init_app(Compute_data *computeData, struct Dist_data *dist_data, char* argv[]) {
SparseMatrix mat, sym;
double *full_vec = NULL;
double *subsol = NULL;
if(dist_data->myId == ROOT) {
#ifdef ONLY_SYM
printf ("Working with symmetric format\n");
CreateSparseMatrixHB (argv[1], &mat, 1);
#else
printf ("Working with general format\n");
CreateSparseMatrixHB (argv[1], &sym, 1);
DesymmetrizeSparseMatrices (sym, &mat);
RemoveSparseMatrix (&sym);
#endif
computeData->n = mat.dim1;
}
// Communicate number of rows to distribute and number of elements in the matrix
MPI_Bcast(&computeData->n, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
// Each process calcules their own distribution
get_dist(computeData->n, dist_data->myId, dist_data->numP, dist_data);
if(dist_data->myId == ROOT) { // ROOT gets rows and vpos/vval distribution
get_mat_dist(computeData, *dist_data, mat);
TransformHeadertoLength(mat.vptr, computeData->n); // From vptr to vlen
} else { // Non ROOT proceses gets row distribution
get_rows_dist(computeData, dist_data->numP, computeData->n);
CreateInts (&computeData->dist_elem, dist_data->numP*2);
InitInts (computeData->dist_elem, dist_data->numP * 2, 0.0, 0);
computeData->displs_elem = computeData->dist_elem + dist_data->numP;
}
// Allocate for each process their submatrix and get their distribution from ROOT
mat_alloc(computeData, mat, *dist_data);
computeSolution(*computeData, &subsol, mat, dist_data->myId, &full_vec);
pre_compute(computeData, *dist_data, subsol, full_vec);
//Free Initial data
RemoveDoubles(&subsol);
RemoveDoubles(&full_vec);
if(dist_data->myId == ROOT) {
RemoveSparseMatrix(&mat);
}
}
/*
* MPI Dist
* Broadcast the vptr array and each process gets the data that corresponds to itself.
*
* mat.vptr must be in vlen format to work correctly
*/
void get_mat_dist(Compute_data *computeData, struct Dist_data dist_data, SparseMatrix mat) {
int i, j;
struct Dist_data dist_data_aux;
#ifdef DEBUG
if(dist_data.myId == ROOT) printf("Distribuyendo vptr\n");
#endif
CreateInts (&computeData->dist_rows, dist_data.numP);
CreateInts (&computeData->displs_rows, dist_data.numP);
CreateInts (&computeData->dist_elem, dist_data.numP*2);
computeData->displs_elem = computeData->dist_elem + dist_data.numP;
InitInts (computeData->dist_rows, dist_data.numP, 0, 0);
InitInts (computeData->displs_rows, dist_data.numP, 0, 0);
InitInts (computeData->dist_elem, dist_data.numP*2, 0, 0);
// Fill dist_rows and dist_elem so each process can make ScatterV or GatherV calls
for(i=0; i<dist_data.numP; i++) {
get_dist(computeData->n, i, dist_data.numP, &dist_data_aux);
computeData->dist_rows[i] = dist_data_aux.tamBl;
computeData->dist_elem[i] = mat.vptr[dist_data_aux.fin] - mat.vptr[dist_data_aux.ini];
// Fill displacements
if(i!=0) {
computeData->displs_elem[i] = computeData->displs_elem[i-1] + computeData->dist_elem[i-1];
computeData->displs_rows[i] = computeData->displs_rows[i-1] + computeData->dist_rows[i-1];
}
}
#ifdef DEBUG
printf("Proc %d almacena %d filas con %d elementos\n", dist_data.myId, computeData->dist_rows[dist_data.myId], computeData->dist_elem[dist_data.myId]);
fflush(stdout);
#endif
}
/*
* MPI Dist
* Get the rows distribution of n rows in a given number of processes
*/
void get_rows_dist(Compute_data *computeData, int numP, int n) {
int i, j;
struct Dist_data dist_data;
CreateInts (&(computeData->dist_rows), numP);
CreateInts (&(computeData->displs_rows), numP);
InitInts (computeData->dist_rows, numP, 0, 0);
InitInts (computeData->displs_rows, numP, 0, 0);
// Fill dist_rows and dist_elem so each process can make ScatterV or GatherV calls
for(i=0; i<numP; i++) {
get_dist(n, i, numP, &dist_data);
computeData->dist_rows[i] = dist_data.tamBl;
// Fill displacements
if(i!=0) {
computeData->displs_rows[i] = computeData->displs_rows[i-1] + computeData->dist_rows[i-1];
}
}
}
/*
* Matrix allocation
*
* The matrix that each process will use is allocated and
* their vptr array initialised.
*
* MPI Dist
* Distribute vpos and vvalues data among processes
* Both arrays have the same distribution
*/
void mat_alloc(Compute_data *computeData, SparseMatrix mat, struct Dist_data dist_data) {
int i;
int elems; // Number of elements this process has
#ifdef DEBUG
if(dist_data.myId == ROOT) printf("Distribuyendo vpos y vvalue\n");
#endif
// dist_rows[myId] is the number of rows, n the number of columns, and dist_elem[myId] is the number of elements this process will have in the matrix
CreateSparseMatrixVptr(&(computeData->subm), dist_data.tamBl, computeData->n, 0);
computeData->subm.vptr[0] = 0;
MPI_Scatterv((mat.vptr)+1, computeData->dist_rows, computeData->displs_rows, MPI_INT, (computeData->subm.vptr)+1, dist_data.tamBl, MPI_INT, ROOT, MPI_COMM_WORLD);
TransformLengthtoHeader(computeData->subm.vptr, computeData->subm.dim1); // The array is converted from vlen to vptr
elems = computeData->subm.vptr[dist_data.tamBl];
CreateSparseMatrixValues(&(computeData->subm), dist_data.tamBl, computeData->n, elems, 0);
MPI_Scatterv(mat.vpos, computeData->dist_elem, computeData->displs_elem, MPI_INT, computeData->subm.vpos, elems, MPI_INT, ROOT, MPI_COMM_WORLD);
MPI_Scatterv(mat.vval, computeData->dist_elem, computeData->displs_elem, MPI_DOUBLE, computeData->subm.vval, elems, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
// Free elem arrays, as they are not going to be used again
RemoveInts (&computeData->dist_elem);
}
/*
* Compute solution
*/
void computeSolution(Compute_data computeData, double **subsol, SparseMatrix mat, int myId, double **full_vec) {
CreateDoubles (subsol, computeData.dist_rows[myId]);
InitDoubles (*subsol, computeData.dist_rows[myId], 0.0, 0.0);
CreateDoubles(full_vec, computeData.n);
InitDoubles (*full_vec, computeData.n, 1.0, 0.0);
//Compute SOLUTION
#ifdef ONLY_SYM
ProdSymSparseMatrixVector (computeData.subm, *full_vec, *subsol); // sol += A * x
#else
ProdSparseMatrixVector (computeData.subm, *full_vec, *subsol); // sol += A * x
#endif
/*
#ifdef DEBUG
int aux, i;
double *solD = NULL, *sol = NULL;
if(myId == ROOT) {
printf("Computing solution\n");
CreateDoubles (&sol, computeData.n);
CreateDoubles (&solD, computeData.n);
InitDoubles (sol, computeData.n, 0.0, 0.0);
InitDoubles (solD, computeData.n, 0.0, 0.0);
TransformLengthtoHeader(mat.vptr, mat.dim1); // vlen to vptr (At mat_alloc was needed as vlen)
}
MPI_Gatherv(*subsol, computeData.dist_rows[myId], MPI_DOUBLE, sol, computeData.dist_rows, computeData.displs_rows, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
if(myId == ROOT) {
#ifdef ONLY_SYM
ProdSymSparseMatrixVector (mat, *full_vec, solD); // solD += A * x
#else
ProdSparseMatrixVector (mat, *full_vec, solD); // solD += A * x
#endif // ONLY_SIM
aux = 1;
printf("Checking sol array is ok\n");
for(i=0; i<mat.dim1; i++) {
if(sol[i] != solD[i]) {
printf("[%d]Expected %lf - Result %lf\n", i, solD[i],sol[i]);
aux = 0;
}
}
if(aux) printf("sol array is correct\n");
}
RemoveDoubles (&sol);
RemoveDoubles (&solD);
#endif // DEBUG
*/
}
/*
* Realiza los preparativos para pasar al bucle de computo principal
* inicializando los datos y realizando una primera iteración
*/
void pre_compute(Compute_data *computeData, struct Dist_data dist_data, double *subsol, double *full_vec) {
int IZERO = 0, IONE = 1;
double DONE = 1.0, DMONE = -1.0, DZERO = 0.0;
if(dist_data.myId == ROOT) {
printf("Start CG\n");
}
computeData->res = NULL; computeData->z = NULL; computeData->d = NULL;
computeData->umbral = 1.0e-8;
CreateDoubles(&computeData->res, dist_data.tamBl);
CreateDoubles(&computeData->z, dist_data.tamBl);
CreateDoubles(&computeData->d, dist_data.tamBl);
CreateDoubles (&computeData->vec, dist_data.tamBl);
CreateDoubles (&computeData->d_full, computeData->n);
InitDoubles (computeData->vec, dist_data.tamBl, DZERO, DZERO); // x = 0
InitDoubles (full_vec, computeData->n, DZERO, DZERO); // full_x = 0
computeData->iter = 0;
#ifdef ONLY_SYM
ProdSymSparseMatrixVector (computeData->subm, full_vec, computeData->z); // z += A * full_x
// mkl_dcsrsymv ("U", &n, mat.vval, mat.vptr, mat.vpos, vec, z); // z = A * full_x
#else
ProdSparseMatrixVector (computeData->subm, full_vec, computeData->z); // z += A * full_x
#endif
dcopy (&(dist_data.tamBl), subsol, &IONE, computeData->res, &IONE); // res = b
daxpy (&(dist_data.tamBl), &DMONE, computeData->z, &IONE, computeData->res, &IONE); // res -= z
//dcopy (&(computeData.subm.dim1), computeData.res, &IONE, &(computeData.d+computeData.displs_rows[myId]), &IONE); // d_full = res
MPI_Allgatherv(computeData->res, dist_data.tamBl, MPI_DOUBLE, computeData->d_full, computeData->dist_rows, computeData->displs_rows, MPI_DOUBLE, MPI_COMM_WORLD);
dcopy (&(dist_data.tamBl), &(computeData->d_full[dist_data.ini]), &IONE, computeData->d, &IONE); // d = d_full[ini] to d_full[ini+tamBl]
computeData->beta = ddot (&(dist_data.tamBl), computeData->res, &IONE, computeData->res, &IONE); // beta = res' * res
MPI_Allreduce(MPI_IN_PLACE, &computeData->beta, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
computeData->tol = sqrt (computeData->beta); // tol = sqrt(beta) = norm (res)
}
/*
* Bucle de computo principal
*/
int compute(Compute_data *computeData, struct Dist_data *dist_data, char *argv[]) {
int IZERO = 0, IONE = 1;
double DONE = 1.0, DMONE = -1.0, DZERO = 0.0;
int ended_loop = 1;
int cnt = 0, created;
computeData->maxiter = 1000;
MPI_Barrier(MPI_COMM_WORLD);
printf("PRUEBA %d\n", computeData->iter); fflush(stdout);
while ((computeData->iter < computeData->maxiter) && (computeData->tol > computeData->umbral)) {
//while (computeData->tol > computeData->umbral) {
//malleability_checkpoint();
if (created) { ended_loop = 0; break;}
// if(dist_data->myId == ROOT) printf ("(%d,%20.10e)\n", computeData->iter, computeData->tol);
// COMPUTATION
#ifdef ONLY_SYM
ProdSymSparseMatrixVector (computeData->subm, computeData->d_full, computeData->z); // z += A * d_full
#else
ProdSparseMatrixVector (computeData->subm, computeData->d_full, computeData->z); // z += A * d_full
#endif
MPI_Barrier(MPI_COMM_WORLD);
printf("PRUEBA 1\n"); fflush(stdout);
computeData->rho = ddot (&(dist_data->tamBl), computeData->d, &IONE, computeData->z, &IONE); // rho = (d * z)
MPI_Allreduce(MPI_IN_PLACE, &computeData->rho, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); // Reduce(rho, SUM)
computeData->rho = computeData->beta / computeData->rho; // rho = beta / aux
daxpy (&(dist_data->tamBl), &computeData->rho, computeData->d, &IONE, computeData->vec, &IONE); // x += rho * d
computeData->rho = -computeData->rho;
daxpy (&(dist_data->tamBl), &computeData->rho, computeData->z, &IONE, computeData->res, &IONE); // res -= rho * z
computeData->alpha = computeData->beta; // alpha = beta
computeData->beta = ddot (&(dist_data->tamBl), computeData->res, &IONE, computeData->res, &IONE); // beta = res' * res
MPI_Barrier(MPI_COMM_WORLD);
printf("PRUEBA 2\n"); fflush(stdout);
MPI_Allreduce(MPI_IN_PLACE, &computeData->beta, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); // Reduce(beta, SUM)
computeData->alpha = computeData->beta / computeData->alpha; // alpha = beta / alpha
dscal (&(dist_data->tamBl), &computeData->alpha, computeData->d, &IONE); // d = alpha * d
daxpy (&(dist_data->tamBl), &DONE, computeData->res, &IONE, computeData->d, &IONE); // d += res
MPI_Barrier(MPI_COMM_WORLD);
printf("PRUEBA 3\n"); fflush(stdout);
MPI_Allgatherv(computeData->d, dist_data->tamBl, MPI_DOUBLE, computeData->d_full,
computeData->dist_rows, computeData->displs_rows, MPI_DOUBLE, MPI_COMM_WORLD); // d_full = Gather(d)
computeData->tol = sqrt (computeData->beta); // tol = sqrt(beta) = norm (res)
computeData->iter++;
printf("TEST %d\n", computeData->iter);
}
#ifdef DEBUG
if(dist_data->myId == ROOT) printf ("Ended loop\n");
#endif
return ended_loop;
}
void free_computeData(Compute_data *computeData) {
RemoveDoubles (&computeData->res);
RemoveDoubles (&computeData->z);
RemoveDoubles (&computeData->d);
RemoveDoubles (&computeData->vec);
RemoveDoubles (&computeData->d_full);
RemoveSparseMatrix2 (&computeData->subm);
RemoveInts (&computeData->dist_rows);
RemoveInts (&computeData->displs_rows);
RemoveInts (&computeData->vlen);
}
/*
* _____________________________________________________________________________________
* || ||
* || ||
* || DISTRIBUTION FUNCTIONS ||
* || ||
* || ||
* \_____________________________________________________________________________________/
*/
/*
* Las siguientes funciones están todas relacionadas con la distribución de los datos
* o procesos.
*/
/*
* ========================================================================================
* ========================================================================================
* ========================PARENTS COMMUNICATION FUNCTIONS=================================
* ========================================================================================
* ========================================================================================
*/
/*
*/
int dist_old(struct Dist_data *dist_data, Compute_data *computeData, char *argv[], int type_dist) {
int i;
//set_malleability_configuration(sm, ss, phy_dist, rm, rs);
//set_children_number(numC);
malleability_add_data(&(computeData->iter), 1, MAL_INT, 1, 1);
malleability_add_data(&(computeData->tol), 1, MAL_DOUBLE, 1, 1);
malleability_add_data(&(computeData->beta), 1, MAL_DOUBLE, 1, 1);
malleability_add_data(&(computeData->umbral), 1, MAL_DOUBLE, 1, 1);
malleability_add_data(&(computeData->vec), computeData->n, MAL_DOUBLE, 0, 1);
malleability_add_data(&(computeData->res), computeData->n, MAL_DOUBLE, 0, 1);
malleability_add_data(&(computeData->z), computeData->n, MAL_DOUBLE, 0, 1);
malleability_add_data(&(computeData->d_full), computeData->n, MAL_DOUBLE, 1, 1);
CreateInts(&(computeData->vlen), dist_data->tamBl+1);
for(i=0; i<=dist_data->tamBl; i++) {
computeData->vlen[i] = computeData->subm.vptr[i];
}
TransformHeadertoLength(computeData->vlen, computeData->subm.dim1); // De vptr a vlen
malleability_add_data(&(computeData->vlen), computeData->n, MAL_INT, 1, 1); //FIXME Ultimo valor puede sere asinc
malleability_add_data(&(computeData->subm.vpos), computeData->n, MAL_INT, 1, 1);
malleability_add_data(&(computeData->subm.vval), computeData->n, MAL_DOUBLE, 1, 1);
}
/*
MPI_Bcast(computeData->d_full, computeData->n, MPI_DOUBLE, rootBcast, dist_data.comm_children);
MPI_Alltoallv(computeData->res, sendcounts, sdispls, dist_data.arrays, NULL, recvcounts, rdispls, MPI_INT, dist_data.comm_children);
*/
void send_matrix(struct Dist_data dist_data, Compute_data computeData, int rootBcast, int numP_child, int idI, int idE,
int *sendcounts, int *recvcounts,int *sdispls, int *rdispls) {
int i;
TransformHeadertoLength(computeData.subm.vptr, computeData.subm.dim1); // De vptr a vlen
// Distribuir vlen con los hijos
MPI_Alltoallv(computeData.subm.vptr+1, sendcounts, sdispls, MPI_INT, NULL, recvcounts, rdispls, MPI_INT, dist_data.comm_children);
TransformLengthtoHeader(computeData.subm.vptr, computeData.subm.dim1); // De vlen a vptr
// Calcular cuantos elementos se van a enviar a cada proceso hijo
if(idI == 0 && sendcounts[0] > 0) {
sendcounts[0] = computeData.subm.vptr[sdispls[0] + sendcounts[0]] - computeData.subm.vptr[sdispls[0]];
idI++;
}
for(i=idI; i<idE; i++) {
if(sendcounts[i] > 0) {
sendcounts[i] = computeData.subm.vptr[sdispls[i] + sendcounts[i]] - computeData.subm.vptr[sdispls[i]];
}
sdispls[i] = sdispls[i-1] + sendcounts[i-1];
}
//print_counts(dist_data, sendcounts, sdispls, numP_child, "Send");
/* COMUNICACION DE DATOS */
MPI_Alltoallv(computeData.subm.vpos, sendcounts, sdispls, MPI_INT, NULL, recvcounts, rdispls, MPI_INT, dist_data.comm_children);
MPI_Alltoallv(computeData.subm.vval, sendcounts, sdispls, MPI_DOUBLE, NULL, recvcounts, rdispls, MPI_DOUBLE, dist_data.comm_children);
}
/*
* ========================================================================================
* ========================================================================================
* ========================CHILDREN COMMUNICATION FUNCTIONS================================
* ========================================================================================
* ========================================================================================
*/
/*
* Función llamada por un set de procesos hijos.
*
* Primero los hijos obtienen de los padres una información iniciar
* con la que conocer el tamaño de sus vectores y matriz, como asi
* tambien cuantos datos van a recibir de cada padre.
*
* Tras esto se preparan para recibir los datos de los padres.
*
*/
void dist_new(struct Dist_data *dist_data, Compute_data *computeData) {
void *value = NULL;
malleability_get_data(&value, 0, 1, 1);
computeData->iter = *((int *)value);
malleability_get_data(&value, 1, 1, 1);
computeData->tol = *((double *)value);
malleability_get_data(&value, 2, 1, 1);
computeData->beta = *((double *)value);
malleability_get_data(&value, 3, 1, 1);
computeData->umbral = *((double *)value);
malleability_get_data(&value, 0, 0, 1);
computeData->vec = ((double *)value);
malleability_get_data(&value, 1, 0, 1);
computeData->res = ((double *)value);
malleability_get_data(&value, 2, 0, 1);
computeData->z = ((double *)value);
malleability_get_data(&value, 4, 1, 1);
computeData->d_full = ((double *)value);
malleability_get_data(&value, 5, 1, 1);
computeData->subm.vptr = ((int *)value);
malleability_get_data(&value, 6, 1, 1);
computeData->subm.vpos = ((int *)value);
malleability_get_data(&value, 7, 1, 1);
computeData->subm.vval = ((double *)value);
TransformLengthtoHeader(computeData->subm.vptr, computeData->subm.dim1); // De vlen a vptr
}
/*
MPI_Bcast(computeData->d_full, computeData->n, MPI_DOUBLE, ROOT, dist_data->comm_parents); // Recibir vectores RES y D_FULL
MPI_Alltoallv(aux, sendcounts, sdispls, MPI_INT, computeData->res, recvcounts, rdispls, dist_data->arrays, dist_data->comm_parents);
dcopy (&(dist_data->tamBl), &(computeData->d_full[dist_data->ini]), &IONE, computeData->d, &IONE); // Copia parcial de D_FULL a D
*/
void recv_matrix(struct Dist_data *dist_data, Compute_data *computeData, int idI, int idE,
int *sendcounts, int *recvcounts,int *sdispls, int *rdispls) {
int i;
double *aux;
int *aux_int, elems;
Compute_data dist_parents;
/* PREPARAR DATOS DE RECEPCION SOBRE MATRIZ */
get_rows_dist(&dist_parents, dist_data->numP_parents, computeData->n);
get_rows_dist(computeData, dist_data->numP, computeData->n);
CreateSparseMatrixVptr(&(computeData->subm), dist_data->tamBl, computeData->n, 0);
MPI_Alltoallv(aux_int, sendcounts, sdispls, MPI_INT, (computeData->subm.vptr)+1, recvcounts, rdispls, MPI_INT, dist_data->comm_parents);
TransformLengthtoHeader(computeData->subm.vptr, computeData->subm.dim1); // De vlen a vptr
elems = computeData->subm.vptr[dist_data->tamBl];
CreateSparseMatrixValues(&(computeData->subm), dist_data->tamBl, computeData->n, elems, 0);
// Calcular cuantos elementos se van a recibir de cada proceso padre
if(idI == 0 && recvcounts[0] > 0) {
recvcounts[0] = computeData->subm.vptr[rdispls[0] + recvcounts[0]] - computeData->subm.vptr[rdispls[0]];
idI++;
}
for(i=idI; i<idE; i++) {
if(recvcounts[i] > 0) {
recvcounts[i] = computeData->subm.vptr[rdispls[i] + recvcounts[i]] - computeData->subm.vptr[rdispls[i]];
}
rdispls[i] = rdispls[i-1] + recvcounts[i-1];
}
//print_counts(*dist_data, recvcounts, rdispls, numP_parents, "Recv");
/* COMUNICACION DE DATOS */
MPI_Alltoallv(aux_int, sendcounts, sdispls, MPI_INT, computeData->subm.vpos, recvcounts, rdispls, MPI_INT, dist_data->comm_parents);
MPI_Alltoallv(aux, sendcounts, sdispls, MPI_DOUBLE, computeData->subm.vval, recvcounts, rdispls, MPI_DOUBLE, dist_data->comm_parents);
free(dist_parents.dist_rows);
free(dist_parents.displs_rows);
}
/*
* ========================================================================================
* ========================================================================================
* ================================DISTRIBUTION FUNCTIONS==================================
* ========================================================================================
* ========================================================================================
*/
/*
* Obtiene para el Id que se pasa junto a su
* numero de procesos total, con cuantas filas (tamBl),
* elementos por fila, y total de filas (fin - ini)
* con las que va a trabajar el proceso
*/
void get_dist(int total_r, int id, int numP, struct Dist_data *dist_data) {
int rem;
dist_data->tot_r = total_r;
dist_data->tamBl = total_r / numP;
rem = total_r % numP;
if(id < rem) { // First subgroup
dist_data->ini = id * dist_data->tamBl + id;
dist_data->fin = (id+1) * dist_data->tamBl + (id+1);
} else { // Second subgroup
dist_data->ini = id * dist_data->tamBl + rem;
dist_data->fin = (id+1) * dist_data->tamBl + rem;
}
if(dist_data->fin > total_r) {
dist_data->fin = total_r;
}
if(dist_data->ini > dist_data->fin) {
dist_data->ini = dist_data->fin;
}
dist_data->tamBl = dist_data->fin - dist_data->ini;
}
/*
* Obtiene para un Id de proceso, cuantos elementos va
* a enviar/recibir el proceso myId
*/
void set_counts(int id, int numP, struct Dist_data data_dist, int *sendcounts) {
struct Dist_data other;
int biggest_ini, smallest_end, tot_rows;
get_dist(data_dist.tot_r, id, numP, &other);
// Si el rango de valores no coincide, se pasa al siguiente proceso
if(data_dist.ini >= other.fin || data_dist.fin <= other.ini) {
return;
}
// Obtiene el proceso con mayor ini entre los dos procesos
if(data_dist.ini > other.ini) {
biggest_ini = data_dist.ini;
} else {
biggest_ini = other.ini;
}
// Obtiene el proceso con menor fin entre los dos procesos
if(data_dist.fin < other.fin) {
smallest_end = data_dist.fin;
} else {
smallest_end = other.fin;
}
sendcounts[id] = smallest_end - biggest_ini; // Numero de elementos a enviar/recibir del proceso Id
}
/*
* Obtiene para un proceso de un grupo a que rango procesos de
* otro grupo tiene que enviar o recibir datos.
*
* Devuelve el primer identificador y el último (Excluido) con el que
* comunicarse.
*/
void getIds_intercomm(struct Dist_data dist_data, int numP_other, int **idS) {
int idI, idE;
int tamOther = dist_data.tot_r / numP_other;
int remOther = dist_data.tot_r % numP_other;
int middle = (tamOther + 1) * remOther;
if(middle > dist_data.ini) { // First subgroup
idI = dist_data.ini / (tamOther + 1);
} else { // Second subgroup
idI = ((dist_data.ini - middle) / tamOther) + remOther;
}
if(middle >= dist_data.fin) { // First subgroup
idE = dist_data.fin / (tamOther + 1);
idE = (dist_data.fin % (tamOther + 1) > 0 && idE+1 <= numP_other) ? idE+1 : idE;
} else { // Second subgroup
idE = ((dist_data.fin - middle) / tamOther) + remOther;
idE = ((dist_data.fin - middle) % tamOther > 0 && idE+1 <= numP_other) ? idE+1 : idE;
}
//free(*idS);
CreateInts(idS, 2);
(*idS)[0] = idI;
(*idS)[1] = idE;
}
/*
double starttime, endtime, total, res;
MPI_Barrier(MPI_COMM_WORLD);
starttime = MPI_Wtime();
endtime = MPI_Wtime();
total = endtime - starttime;
MPI_Reduce(&total, &res, 1, MPI_DOUBLE, MPI_MAX, ROOT, MPI_COMM_WORLD);
if(dist_data.myId == ROOT) {printf("Tiempo BCAST PADRE %f\n", total); fflush(stdout);}
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ScalarVectors.h"
int CreateInts (int **vint, int num)
{
if ((*vint = (int *) malloc (sizeof(int)*num)) == NULL)
{ printf ("Memory Error (CreateInts(%d))\n", num); exit (1); }
}
int InitInts (int *vint, int n, int frst, int incr)
{
int i, *p1 = vint, num = frst;
for (i=0; i<n; i++)
{ *(p1++) = num; num += incr; }
}
int CopyInts (int *src, int *dst, int n)
{ memmove (dst, src, sizeof(int) * n); }
int CopyShiftInts (int *src, int *dst, int n, int shft)
{
int i, *p1 = src, *p2 = dst;
for (i=0; i<n; i++)
*(p2++) = *(p1++) + shft;
}
int TransformLengthtoHeader (int *vec, int n)
{
int i, *pi = vec;
for (i=0; i<n; i++) { *(pi+1) += *pi; pi++; }
}
int TransformHeadertoLength (int *vec, int n)
{
int i, *pi = &(vec[n]);
for (i=n; i>=1; i--) { *pi -= *(pi-1); pi--; }
}
void GetIntFromString (char *string, int *pnum, int numC, int shft)
{
int j = 0, num = 0, neg = 0;
char *pchar = string;
while ((j < numC) && ((*pchar < '0') || (*pchar > '9')) &&
(*pchar != '+') && (*pchar != '-')) { j++; pchar++; }
if (j < numC)
{
if ((*pchar == '+') || (*pchar == '-'))
{ neg = (*pchar == '-'); j++; pchar++; }
while ((j < numC) && (*pchar >= '0') && (*pchar <= '9'))
{ num = num * 10 + (*pchar - 48); j++; pchar++; }
}
if (neg) num = -num;
*pnum = num + shft;
}
void GetIntsFromString (char *string, int *vec, int numN, int numC, int shft)
{
int i, *pint = vec;
char *pchar = string;
for (i=0; i<numN; i++)
{ GetIntFromString (pchar, (pint++), numC, shft); pchar += numC; }
}
void GetFormatsFromString (char *string, int *vec, int numN, int numC)
{
int i, k = 0;
int *pint = vec;
char *pchar = string, *pch = NULL, c = ' ', c2;
for (i=0; i<numN; i++)
{
pch = pchar;
while (*pch == ' ') pch++;
sscanf (pch, "(%i%c", pint, &c);
if ((c == 'P') || (c == 'p'))
{
sscanf (pch, "(%i%c%i%c%i.%i)", &k, &c2, pint, &c, pint+1, pint+2);
pint += 3;
}
else if ((c == 'E') || (c == 'e') || (c == 'D') || (c == 'd') ||
(c == 'F') || (c == 'f') || (c == 'G') || (c == 'g'))
{
sscanf (pch, "(%i%c%i.%i)", pint, &c, pint+1, pint+2);
pint += 3;
}
else
{ sscanf (pch, "(%i%c%i)", pint, &c, pint+1); pint += 2; }
pchar += numC;
}
}
int PrintInts (int *vint, int num)
{
int i, *pi = vint;
for (i=0; i<num; i++) printf ("%d ", *(pi++));
printf ("\n");
}
int RemoveInts (int **vint)
{ free (*vint); *vint = NULL; }
int CreateDoubles (double **vdouble, int num)
{
if ((*vdouble = (double *) malloc (sizeof(double)*num)) == NULL)
{ printf ("Memory Error (CreateDoubles(%d))\n", num); exit (1); }
}
int InitDoubles (double *vdouble, int n, double frst, double incr)
{
int i;
double *p1 = vdouble, num = frst;
for (i=0; i<n; i++)
{ *(p1++) = num; num += incr; }
}
void GetDoubleFromString (char *string, double *pdbl, int numC)
{
int j, k, exp, neg;
double num, frac;
char *pchar = string;
j = 0; exp = 0; neg = 0; num = 0.0; frac = 1.0;
while ((j < numC) && ((*pchar < '0') || (*pchar > '9')) &&
(*pchar != '+') && (*pchar != '-') && (*pchar != '.')) { j++; pchar++; }
if (j < numC)
{
if ((*pchar == '+') || (*pchar == '-'))
{ neg = (*pchar == '-'); j++; pchar++; }
if (j < numC)
{
if (*pchar != '.')
while ((j < numC) && (*pchar >= '0') && (*pchar <= '9'))
{ num = num * 10 + (*pchar - 48); j++; pchar++; }
if (j < numC)
{
if (*pchar == '.')
{
j++; pchar++;
while ((j < numC) && (*pchar >= '0') && (*pchar <= '9'))
{ frac /= 10; num += (*pchar-48) * frac; j++; pchar++; }
}
if (neg) num = -num;
if (j < numC)
{
if ((*pchar == 'e') || (*pchar == 'E') || (*pchar == 'd') || (*pchar == 'D'))
{
neg = 0; j++; pchar++;
if (j < numC)
{
if ((*pchar == '+') || (*pchar == '-'))
{ neg = (*pchar == '-'); j++; pchar++; }
if (j < numC)
{
while ((j < numC) && (*pchar >= '0') &&
(*pchar <= '9'))
{ exp = exp*10 + (*pchar-48); j++; pchar++; }
if (neg) exp = -exp;
for (k=0; k<exp; k++) num *= 10;
for (k=0; k>exp; k--) num /= 10;
}
}
}
}
}
else
if (neg) num = -num;
}
}
*pdbl = num;
}
void GetDoublesFromString (char *string, double *vec, int numN, int numC)
{
int i;
double *paux = vec;
char *pchar = string;
for (i=0; i<numN; i++)
{ GetDoubleFromString (pchar, (paux++), numC); pchar += numC; }
}
int PrintDoubles (double *vdouble, int num)
{
int i;
double *pd = vdouble;
for (i=0; i<num; i++) printf ("%e ", *(pd++));
printf ("\n");
}
int RemoveDoubles (double **vdouble)
{ free (*vdouble); *vdouble = NULL; }
#ifndef ScalarVector
#define ScalarVector 1
#include <stdio.h>
extern int CreateInts (int **vint, int num);
extern int InitInts (int *vint, int n, int frst, int incr);
extern int CopyInts (int *src, int *dst, int n);
extern int CopyShiftInts (int *src, int *dst, int n, int shft);
extern int TransformLengthtoHeader (int *vec, int n);
extern int TransformHeadertoLength (int *vec, int n);
extern void GetIntFromString (char *string, int *pnum, int numC, int shft);
extern void GetIntsFromString (char *string, int *vec, int numN, int numC, int shft);
extern void GetFormatsFromString (char *string, int *vec, int numN, int numC);
extern int PrintInts (int *vint, int num);
extern int RemoveInts (int **vint);
extern int CreateDoubles (double **vdouble, int num);
extern int InitDoubles (double *vdouble, int n, double frst, double incr);
extern void GetDoubleFromString (char *string, double *pdbl, int numC);
extern void GetDoublesFromString (char *string, double *vec, int numN, int numC);
extern int PrintDoubles (double *vdouble, int num);
extern int RemoveDoubles (double **vdouble);
#endif
#include <stdio.h>
#include <stdlib.h>
#include "ScalarVectors.h"
#include "SparseMatrices.h"
#define length1 82
#define length2 82
FILE *OpenFile (char *name, char *attr)
{
FILE *fich;
if ((fich = fopen (name, attr)) == NULL)
{ printf ("File %s not exists \n", name); exit(1); }
return fich;
}
void ReadStringFile (FILE *file, char *string, int length)
{
char *s = NULL;
if ((s = fgets (string, length, file)) == NULL)
{ printf ("Error en lectura \n"); exit (1); }
}
int CreateSparseMatrix (ptr_SparseMatrix spr, int numR, int numC, int numE,
int msr)
{
spr->dim1 = numR; spr->dim2 = numC;
CreateInts (&(spr->vptr), numE+numR+1);
*(spr->vptr) = ((msr)? (numR+1): 0);
spr->vpos = spr->vptr + ((msr)? 0: (numR+1));
CreateDoubles (&(spr->vval), numE+(numR+1)*msr);
}
int CreateSparseMatrixVptr (ptr_SparseMatrix spr, int numR, int numC,
int msr)
{
spr->dim1 = numR; spr->dim2 = numC;
CreateInts (&(spr->vptr), numR+1);
*(spr->vptr) = ((msr)? (numR+1): 0);
return 0;
}
int CreateSparseMatrixValues (ptr_SparseMatrix spr, int numR, int numC, int numE,
int msr)
{
CreateInts (&(spr->vpos), numE+(numR+1)*msr);
CreateDoubles (&(spr->vval), numE+(numR+1)*msr);
return 0;
}
int PrintSparseMatrix (SparseMatrix spr, int CorF)
{
int i, j;
if (spr.vptr == spr.vpos)
{
printf ("Diagonals : \n ");
for (i=0; i<spr.dim1; i++) printf ("%f ", spr.vval[i]); printf ("\n");
}
printf ("Pointers: \n ");
if (spr.dim1 > 0)
for (i=0; i<spr.dim1; i++) printf ("%d ", spr.vptr[i]); printf ("\n");
printf ("Values: \n");
for (i=0; i<spr.dim1; i++)
{ printf (" Row %d --> ", i+CorF);
for (j=(spr.vptr[i]-CorF); j<(spr.vptr[i+1]-CorF); j++)
printf ("(%d,%f) ", spr.vpos[j], spr.vval[j]);
printf ("\n"); }
printf ("\n");
}
int RemoveSparseMatrix (ptr_SparseMatrix spr)
{
spr->dim1 = -1; spr->dim2 = -1;
RemoveInts (&(spr->vptr));
RemoveDoubles (&(spr->vval));
}
int RemoveSparseMatrix2 (ptr_SparseMatrix spr)
{
spr->dim1 = -1; spr->dim2 = -1;
RemoveInts (&(spr->vptr));
RemoveInts (&(spr->vpos));
RemoveDoubles (&(spr->vval));
return 0;
}
int ProdSparseMatrixVector (SparseMatrix spr, double *vec, double *res)
{
int i, j;
double aux;
for (i=0; i<spr.dim1; i++)
{
aux = 0.0;
for (j=spr.vptr[i]; j<spr.vptr[i+1]; j++) {
aux += spr.vval[j] * vec[spr.vpos[j]];
}
res[i] = aux;
}
}
int DesymmetrizeSparseMatrices (SparseMatrix src, ptr_SparseMatrix dst)
{
int i, j, row, col, pos1, pos2;
int n = src.dim1, nnz = 0;
int *sizes = NULL;
CreateInts (&sizes, n);
InitInts (sizes, n, 0, 0);
for (i=0; i<n; i++) {
for (j=src.vptr[i]; j<src.vptr[i+1]; j++) {
sizes[i]++; nnz++;
if (src.vpos[j] != i) {
sizes[src.vpos[j]]++; nnz++;
}
}
}
CreateSparseMatrix (dst, n, n, nnz, 0);
CopyInts (sizes, (dst->vptr)+1, n);
dst->vptr[0] = 0; TransformLengthtoHeader (dst->vptr, n);
CopyInts (dst->vptr, sizes, n);
for (i=0; i<n; i++) {
for (j=src.vptr[i]; j<src.vptr[i+1]; j++) {
row = i; pos1 = sizes[row];
dst->vpos[pos1] = src.vpos[j];
dst->vval[pos1] = src.vval[j];
sizes[row]++;
if (src.vpos[j] != i) {
col = src.vpos[j]; pos2 = sizes[col];
dst->vpos[pos2] = row;
dst->vval[pos2] = src.vval[j];
sizes[col]++;
}
}
}
RemoveInts (&sizes);
}
int ProdSymSparseMatrixVector (SparseMatrix spr, double *vec, double *res)
{
int i, j, k;
double aux, val;
for (i=0; i<spr.dim1; i++) res[i] = 0.0;
for (i=0; i<spr.dim1; i++)
{
aux = 0.0;
for (j=spr.vptr[i]; j<spr.vptr[i+1]; j++) {
k = spr.vpos[j]; val = spr.vval[j];
aux += val * vec[k];
if (k != i) res[k] += (val * vec[i]);
}
res[i] += aux;
}
}
void CreateSparseMatrixHB (char *nameFile, ptr_SparseMatrix spr, int FtoC)
{
FILE *file;
char string[length1], *s = NULL;
int i, j, k = 0, shft = (FtoC)?-1:0;
int *vptr = NULL, *vpos = NULL;
double *vval = NULL;
int lines[5], dim[4], formats[10];
file = OpenFile (nameFile, "r");
ReadStringFile (file, string, length1);
ReadStringFile (file, string, length1);
GetIntsFromString (string, lines, 5, 14, 0);
ReadStringFile (file, string, length1);
GetIntsFromString ((string+14), dim, 4, 14, 0);
CreateSparseMatrix (spr, dim[0], dim[1], dim[2], 0);
vptr = spr->vptr; vpos = spr->vpos; vval = spr->vval;
ReadStringFile (file, string, length1);
GetFormatsFromString (string, formats, 2, 16);
GetFormatsFromString ((string+32), (formats+4), 1+(lines[4] > 0), 20);
if (lines[4] > 0) ReadStringFile (file, string, length1);
j = 0;
for (i = 0; i < lines[1]; i++)
{
ReadStringFile (file, string, length2);
k = ((dim[0] + 1) - j);
if (k > formats[0]) k = formats[0];
GetIntsFromString (string, (vptr+j), k, formats[1], shft);
j+=formats[0];
}
j = 0;
for (i = 0; i < lines[2]; i++)
{
ReadStringFile (file, string, length2);
k = (dim[2] - j);
if (k > formats[2]) k = formats[2];
GetIntsFromString (string, (vpos+j), k, formats[3], shft);
j+=formats[2];
}
j = 0;
for (i = 0; i < lines[3]; i++)
{
ReadStringFile (file, string, length2);
k = (dim[2] - j);
if (k > formats[4]) k = formats[4];
GetDoublesFromString (string, (vval+j), k, formats[5]);
j+=formats[4];
}
fclose (file);
}
#ifndef SparseMatrixTip
#define SparseMatrixTip 1
typedef struct
{
int dim1, dim2;
int *vptr;
int *vpos;
double *vval;
} SparseMatrix, *ptr_SparseMatrix;
extern FILE *OpenFile (char *name, char *attr);
extern void ReadStringFile (FILE *file, char *string, int length);
extern int CreateSparseMatrix (ptr_SparseMatrix spr, int numR, int numC, int numE, int msr);
extern int CreateSparseMatrixVptr (ptr_SparseMatrix spr, int numR, int numC, int msr);
extern int CreateSparseMatrixValues (ptr_SparseMatrix spr, int numR, int numC, int numE, int msr);
extern int RemoveSparseMatrix (ptr_SparseMatrix spr);
extern int RemoveSparseMatrix2 (ptr_SparseMatrix spr);
extern int PrintSparseMatrix (SparseMatrix spr, int CorF);
extern int ProdSparseMatrixVector (SparseMatrix spr, double *vec, double *res);
extern int DesymmetrizeSparseMatrices (SparseMatrix src, ptr_SparseMatrix dst);
extern int ProdSymSparseMatrixVector (SparseMatrix spr, double *vec, double *res);
extern void CreateSparseMatrixHB (char *nameFile, ptr_SparseMatrix spr, int FtoC);
#endif
CC = gcc
MCC = mpicc
#C_FLAGS_ALL = -Wall -Wextra -Wshadow -Wfatal-errors -Wconversion -Wpedantic
C_FLAGS =
LD_FLAGS = -lm -pthread
DEF =
.PHONY : clean clear install install_slurm
# MKL MonoHebra
MKL_FSINGLE = -lmkl_blas95_lp64 -lmkl_lapack95_lp64 -lmkl_gf_lp64 -lmkl_sequential -lmkl_core -lpthread -lm
LIBMONO = -L$(DIR_MKL) $(MKL_FSINGLE)
# Final binary
BIN = a.out
# Put all auto generated stuff to this build dir.
BUILD_DIR = ./build
# List of all directories where source files are located
SRCDIRS = IOcodes Main malleability malleability/spawn_methods malleability/distribution_methods
# List of all .c source files.
C_FILES = $(foreach dire, $(SRCDIRS), $(wildcard $(dire)/*.c))
# All .o files go to build dir.
OBJ = $(C_FILES:%.c=$(BUILD_DIR)/%.o)
# Gcc will create these .d files containing dependencies.
DEP = $(OBJ:%.o=%.d)
# Default target named after the binary.
$(BIN) : $(BUILD_DIR)/$(BIN)
# Actual target of the binary - depends on all .o files.
$(BUILD_DIR)/$(BIN) : $(OBJ)
$(MCC) $(C_FLAGS) $^ -o $@ $(LD_FLAGS) $(LIBMONO)
# Include all .d files
# .d files are used for knowing the dependencies of each source file
-include $(DEP)
# Build target for every single object file.
# The potential dependency on header files is covered
# by calling `-include $(DEP)`.
# The -MMD flags additionaly creates a .d file with
# the same name as the .o file.
$(BUILD_DIR)/%.o : %.c
mkdir -p $(@D)
$(MCC) $(C_FLAGS) $(DEF) -MMD -c $< -o $@
clean:
-rm $(BUILD_DIR)/$(BIN) $(OBJ) $(DEP)
clear:
-rm -rf $(BUILDDIR)
install: $(BIN)
echo "Done"
# Builds target with slurm
install_slurm: LD_FLAGS += -lslurm
install_slurm: DEF += -DUSE_SLURM
install_slurm: install
CC = mpicc
CFLAGS = $(MKL_COPTIONS)
CLINKER = mpicc
F77 = mpif77
FFLAGS =
FLINKER = mpif77
LDFLAGS =
OPTFLAGS =
LIBLIST = $(LIBDIRS_4) $(LIBLIST_4)
LDLIBS += -lslurm
INCDIRS = -I/home/ulc/cursos/curso355/TFM/Compact_CG_2
EXECS = ConjugateGradient ConjugateGradient_mt
all: $(EXECS)
DIR_MKL=$(MKLRROT)/lib/intel64
# MKL MonoHebra
MKL_FSINGLE = -lmkl_blas95_lp64 -lmkl_lapack95_lp64 -lmkl_gf_lp64 -lmkl_sequential -lmkl_core -lpthread -lm
LIBMONO = -L$(DIR_MKL) $(MKL_FSINGLE)
# MKL MultiHebra_INTEL
MKL_FMULTIS_INTEL= -lmkl_blas95_lp64 -lmkl_lapack95_lp64 -lmkl_gf_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm
LIBMULTI = -L$(DIR_MKL) $(MKL_FMULTIS_INTEL)
#Test
ConjugateGradient: ConjugateGradient.o SparseMatrices.o ScalarVectors.o
$(CLINKER) $(LDFLAGS) $(OPTFLAGS) $(INCDIRS) -o ConjugateGradient ConjugateGradient.o SparseMatrices.o ScalarVectors.o $(LIBMONO) $(LDLIBS)
ConjugateGradient_mt: ConjugateGradient.o SparseMatrices.o ScalarVectors.o
$(CLINKER) $(LDFLAGS) $(OPTFLAGS) $(INCDIRS) -o ConjugateGradient_mt ConjugateGradient.o SparseMatrices.o ScalarVectors.o $(LIBMULTI) $(LDLIBS)
clean:
/bin/rm -rf core *.o $(EXECS)
.c.o:
echo compilando
$(CC) $(CFLAGS) $(LDLIBS) $(INCDIRS) -c $*.c
.f.o:
$(F77) $(FFLAGS) -c $*.f
.F.o:
$(F77) $(FFLAGS) -c $*.F
.h.h:
echo compilando
1SYMMETRIC STIFFNESS MATRIX SMALL GENERALIZED EIGENVALUE PROBLEM BCSSTK01
74 4 14 56 0
RSA 48 48 224 0
(16I5) (16I5) (4E20.12)
1 9 17 25 31 37 43 49 55 62 66 70 75 85 95 104
112 120 127 132 136 141 144 146 149 154 158 161 164 167 169 173
178 183 185 188 191 196 201 205 208 211 213 216 219 221 222 224
225
1 5 6 7 11 19 25 30 2 4 6 8 10 20 24 26
3 4 5 9 21 23 27 28 4 8 10 22 27 28 5 7
11 21 23 29 6 12 20 24 25 30 7 11 12 13 31 36
8 10 12 14 18 32 9 10 11 15 17 33 34 10 16 33
34 11 15 17 35 12 14 18 31 36 13 17 18 19 23 37
42 43 47 48 14 15 16 18 20 22 38 44 45 46 15 16
17 21 39 40 44 45 46 16 20 22 39 40 44 45 46 17
18 19 23 41 43 47 48 18 24 37 42 43 47 48 19 23
24 43 48 20 22 24 44 21 22 23 45 46 22 45 46 23
47 24 43 48 25 29 30 31 35 26 28 32 34 27 28 33
28 32 34 29 31 35 30 36 31 35 36 37 32 34 36 38
42 33 34 35 39 41 34 40 35 39 41 36 38 42 37 41
42 43 47 38 40 42 44 46 39 40 41 45 40 44 46 41
43 47 42 48 43 47 48 44 45 46 45 46 46 47 48 48
.283226851852E+07 .100000000000E+07 .208333333333E+07 -.333333333333E+04
.100000000000E+07 -.280000000000E+07 -.289351851852E+05 .208333333333E+07
.163544753086E+07 -.200000000000E+07 .555555555555E+07 -.666666666667E+04
-.200000000000E+07 -.308641975309E+05 .555555555555E+07 -.159791666667E+07
.172436728395E+07 -.208333333333E+07 -.277777777778E+07 -.168000000000E+07
-.154320987654E+05 -.277777777778E+07 -.289351851852E+05 -.208333333333E+07
.100333333333E+10 .200000000000E+07 .400000000000E+09 -.333333333333E+07
.208333333333E+07 .100000000000E+09 .106750000000E+10 -.100000000000E+07
.200000000000E+09 .277777777778E+07 .333333333333E+09 -.833333333333E+06
.153533333333E+10 -.200000000000E+07 -.555555555555E+07 .666666666667E+09
-.208333333333E+07 .100000000000E+09 .283226851852E+07 -.100000000000E+07
.208333333333E+07 -.280000000000E+07 -.289351851852E+05 .208333333333E+07
.163544753086E+07 .200000000000E+07 .555555555555E+07 -.308641975309E+05
.555555555555E+07 -.159791666667E+07 .172436728395E+07 -.208333333333E+07
-.277777777778E+07 -.154320987654E+05 -.277777777778E+07 -.289351851852E+05
-.208333333333E+07 .100333333333E+10 -.333333333333E+07 .208333333333E+07
.100000000000E+09 .106750000000E+10 .277777777778E+07 .333333333333E+09
-.833333333333E+06 .153533333333E+10 -.555555555555E+07 .666666666667E+09
-.208333333333E+07 .100000000000E+09 .283609946950E+07 -.214928529451E+07
.235916180402E+07 -.333333333333E+04 -.100000000000E+07 -.289351851852E+05
.208333333333E+07 -.383095098171E+04 -.114928529451E+07 .275828470683E+06
.176741074446E+07 .517922131816E+06 .429857058902E+07 -.555555555555E+07
-.666666666667E+04 .200000000000E+07 -.159791666667E+07 -.131963213599E+06
-.517922131816E+06 .229857058902E+07 .389003806848E+07 -.263499027470E+07
.277777777778E+07 -.168000000000E+07 -.289351851852E+05 -.208333333333E+07
-.517922131816E+06 -.216567078453E+07 -.551656941367E+06 .197572063531E+10
-.200000000000E+07 .400000000000E+09 .208333333333E+07 .100000000000E+09
-.229857058902E+07 .551656941366E+06 .486193650990E+09 .152734651547E+10
-.109779731332E+09 .100000000000E+07 .200000000000E+09 -.833333333333E+06
.114928529451E+07 .229724661236E+09 -.557173510779E+08 .156411143711E+10
-.200000000000E+07 -.208333333333E+07 .100000000000E+09 -.275828470683E+06
-.557173510779E+08 .109411960038E+08 .283226851852E+07 .100000000000E+07
.208333333333E+07 -.289351851852E+05 .208333333333E+07 .163544753086E+07
-.200000000000E+07 -.555555555555E+07 -.159791666667E+07 .172436728395E+07
-.208333333333E+07 .277777777778E+07 -.289351851852E+05 -.208333333333E+07
.100333333333E+10 .208333333333E+07 .100000000000E+09 .106750000000E+10
-.833333333333E+06 .153533333333E+10 -.208333333333E+07 .100000000000E+09
.608796296296E+05 .125000000000E+07 .416666666667E+06 -.416666666667E+04
.125000000000E+07 .337291666667E+07 -.250000000000E+07 -.833333333333E+04
-.250000000000E+07 .241171296296E+07 -.416666666667E+06 -.235500000000E+07
.150000000000E+10 .250000000000E+07 .500000000000E+09 .501833333333E+09
-.125000000000E+07 .250000000000E+09 .502500000000E+09 -.250000000000E+07
.398587962963E+07 -.125000000000E+07 .416666666667E+06 -.392500000000E+07
.341149691358E+07 .250000000000E+07 .694444444444E+07 -.385802469136E+05
.694444444445E+07 .243100308642E+07 -.416666666667E+06 -.347222222222E+07
-.192901234568E+05 -.347222222222E+07 .150416666667E+10 -.416666666667E+07
.133516666667E+10 .347222222222E+07 .416666666667E+09 .216916666667E+10
-.694444444444E+07 .833333333333E+09 .398587962963E+07 -.125000000000E+07
.416666666667E+06 -.416666666667E+04 -.125000000000E+07 .341149691358E+07
.250000000000E+07 -.694444444445E+07 -.833333333333E+04 .250000000000E+07
.243100308642E+07 -.416666666667E+06 .347222222222E+07 -.235500000000E+07
.150416666667E+10 -.250000000000E+07 .500000000000E+09 .133516666667E+10
.125000000000E+07 .250000000000E+09 .216916666667E+10 -.250000000000E+07
.647105806113E+05 .239928529451E+07 .140838195984E+06 .350487988027E+07
.517922131816E+06 -.479857058902E+07 .457738374749E+07 .134990274700E+06
.247238730198E+10 .961679848804E+09 -.109779731332E+09 .531278103775E+09
This source diff could not be displayed because it is too large. You can view the blob instead.
1SYMMETRIC MATRIX FROM NINE POINT START ON A 30 X 30 GRID. GR 30 30
696 46 217 433 0
RSA 900 900 4322 0
(20I4) (20I4) (10F8.1)
1 5 10 15 20 25 30 35 40 45 50 55 60 65 70 75 80 85 90 95
100 105 110 115 120 125 130 135 140 145 148 152 157 162 167 172 177 182 187 192
197 202 207 212 217 222 227 232 237 242 247 252 257 262 267 272 277 282 287 292
295 299 304 309 314 319 324 329 334 339 344 349 354 359 364 369 374 379 384 389
394 399 404 409 414 419 424 429 434 439 442 446 451 456 461 466 471 476 481 486
491 496 501 506 511 516 521 526 531 536 541 546 551 556 561 566 571 576 581 586
589 593 598 603 608 613 618 623 628 633 638 643 648 653 658 663 668 673 678 683
688 693 698 703 708 713 718 723 728 733 736 740 745 750 755 760 765 770 775 780
785 790 795 800 805 810 815 820 825 830 835 840 845 850 855 860 865 870 875 880
883 887 892 897 902 907 912 917 922 927 932 937 942 947 952 957 962 967 972 977
982 987 992 9971002100710121017102210271030103410391044104910541059106410691074
10791084108910941099110411091114111911241129113411391144114911541159116411691174
11771181118611911196120112061211121612211226123112361241124612511256126112661271
12761281128612911296130113061311131613211324132813331338134313481353135813631368
13731378138313881393139814031408141314181423142814331438144314481453145814631468
14711475148014851490149515001505151015151520152515301535154015451550155515601565
15701575158015851590159516001605161016151618162216271632163716421647165216571662
16671672167716821687169216971702170717121717172217271732173717421747175217571762
17651769177417791784178917941799180418091814181918241829183418391844184918541859
18641869187418791884188918941899190419091912191619211926193119361941194619511956
19611966197119761981198619911996200120062011201620212026203120362041204620512056
20592063206820732078208320882093209821032108211321182123212821332138214321482153
21582163216821732178218321882193219822032206221022152220222522302235224022452250
22552260226522702275228022852290229523002305231023152320232523302335234023452350
23532357236223672372237723822387239223972402240724122417242224272432243724422447
24522457246224672472247724822487249224972500250425092514251925242529253425392544
25492554255925642569257425792584258925942599260426092614261926242629263426392644
26472651265626612666267126762681268626912696270127062711271627212726273127362741
27462751275627612766277127762781278627912794279828032808281328182823282828332838
28432848285328582863286828732878288328882893289829032908291329182923292829332938
29412945295029552960296529702975298029852990299530003005301030153020302530303035
30403045305030553060306530703075308030853088309230973102310731123117312231273132
31373142314731523157316231673172317731823187319231973202320732123217322232273232
32353239324432493254325932643269327432793284328932943299330433093314331933243329
33343339334433493354335933643369337433793382338633913396340134063411341634213426
34313436344134463451345634613466347134763481348634913496350135063511351635213526
35293533353835433548355335583563356835733578358335883593359836033608361336183623
36283633363836433648365336583663366836733676368036853690369537003705371037153720
37253730373537403745375037553760376537703775378037853790379538003805381038153820
38233827383238373842384738523857386238673872387738823887389238973902390739123917
39223927393239373942394739523957396239673970397439793984398939943999400440094014
40194024402940344039404440494054405940644069407440794084408940944099410441094114
41174121412641314136414141464151415641614166417141764181418641914196420142064211
42164221422642314236424142464251425642614264426642684270427242744276427842804282
42844286428842904292429442964298430043024304430643084310431243144316431843204322
4323
1 2 31 32 2 3 31 32 33 3 4 32 33 34 4 5 33 34 35 5
6 34 35 36 6 7 35 36 37 7 8 36 37 38 8 9 37 38 39 9
10 38 39 40 10 11 39 40 41 11 12 40 41 42 12 13 41 42 43 13
14 42 43 44 14 15 43 44 45 15 16 44 45 46 16 17 45 46 47 17
18 46 47 48 18 19 47 48 49 19 20 48 49 50 20 21 49 50 51 21
22 50 51 52 22 23 51 52 53 23 24 52 53 54 24 25 53 54 55 25
26 54 55 56 26 27 55 56 57 27 28 56 57 58 28 29 57 58 59 29
30 58 59 60 30 59 60 31 32 61 62 32 33 61 62 63 33 34 62 63
64 34 35 63 64 65 35 36 64 65 66 36 37 65 66 67 37 38 66 67
68 38 39 67 68 69 39 40 68 69 70 40 41 69 70 71 41 42 70 71
72 42 43 71 72 73 43 44 72 73 74 44 45 73 74 75 45 46 74 75
76 46 47 75 76 77 47 48 76 77 78 48 49 77 78 79 49 50 78 79
80 50 51 79 80 81 51 52 80 81 82 52 53 81 82 83 53 54 82 83
84 54 55 83 84 85 55 56 84 85 86 56 57 85 86 87 57 58 86 87
88 58 59 87 88 89 59 60 88 89 90 60 89 90 61 62 91 92 62 63
91 92 93 63 64 92 93 94 64 65 93 94 95 65 66 94 95 96 66 67
95 96 97 67 68 96 97 98 68 69 97 98 99 69 70 98 99 100 70 71
99 100 101 71 72 100 101 102 72 73 101 102 103 73 74 102 103 104 74 75
103 104 105 75 76 104 105 106 76 77 105 106 107 77 78 106 107 108 78 79
107 108 109 79 80 108 109 110 80 81 109 110 111 81 82 110 111 112 82 83
111 112 113 83 84 112 113 114 84 85 113 114 115 85 86 114 115 116 86 87
115 116 117 87 88 116 117 118 88 89 117 118 119 89 90 118 119 120 90 119
120 91 92 121 122 92 93 121 122 123 93 94 122 123 124 94 95 123 124 125
95 96 124 125 126 96 97 125 126 127 97 98 126 127 128 98 99 127 128 129
99 100 128 129 130 100 101 129 130 131 101 102 130 131 132 102 103 131 132 133
103 104 132 133 134 104 105 133 134 135 105 106 134 135 136 106 107 135 136 137
107 108 136 137 138 108 109 137 138 139 109 110 138 139 140 110 111 139 140 141
111 112 140 141 142 112 113 141 142 143 113 114 142 143 144 114 115 143 144 145
115 116 144 145 146 116 117 145 146 147 117 118 146 147 148 118 119 147 148 149
119 120 148 149 150 120 149 150 121 122 151 152 122 123 151 152 153 123 124 152
153 154 124 125 153 154 155 125 126 154 155 156 126 127 155 156 157 127 128 156
157 158 128 129 157 158 159 129 130 158 159 160 130 131 159 160 161 131 132 160
161 162 132 133 161 162 163 133 134 162 163 164 134 135 163 164 165 135 136 164
165 166 136 137 165 166 167 137 138 166 167 168 138 139 167 168 169 139 140 168
169 170 140 141 169 170 171 141 142 170 171 172 142 143 171 172 173 143 144 172
173 174 144 145 173 174 175 145 146 174 175 176 146 147 175 176 177 147 148 176
177 178 148 149 177 178 179 149 150 178 179 180 150 179 180 151 152 181 182 152
153 181 182 183 153 154 182 183 184 154 155 183 184 185 155 156 184 185 186 156
157 185 186 187 157 158 186 187 188 158 159 187 188 189 159 160 188 189 190 160
161 189 190 191 161 162 190 191 192 162 163 191 192 193 163 164 192 193 194 164
165 193 194 195 165 166 194 195 196 166 167 195 196 197 167 168 196 197 198 168
169 197 198 199 169 170 198 199 200 170 171 199 200 201 171 172 200 201 202 172
173 201 202 203 173 174 202 203 204 174 175 203 204 205 175 176 204 205 206 176
177 205 206 207 177 178 206 207 208 178 179 207 208 209 179 180 208 209 210 180
209 210 181 182 211 212 182 183 211 212 213 183 184 212 213 214 184 185 213 214
215 185 186 214 215 216 186 187 215 216 217 187 188 216 217 218 188 189 217 218
219 189 190 218 219 220 190 191 219 220 221 191 192 220 221 222 192 193 221 222
223 193 194 222 223 224 194 195 223 224 225 195 196 224 225 226 196 197 225 226
227 197 198 226 227 228 198 199 227 228 229 199 200 228 229 230 200 201 229 230
231 201 202 230 231 232 202 203 231 232 233 203 204 232 233 234 204 205 233 234
235 205 206 234 235 236 206 207 235 236 237 207 208 236 237 238 208 209 237 238
239 209 210 238 239 240 210 239 240 211 212 241 242 212 213 241 242 243 213 214
242 243 244 214 215 243 244 245 215 216 244 245 246 216 217 245 246 247 217 218
246 247 248 218 219 247 248 249 219 220 248 249 250 220 221 249 250 251 221 222
250 251 252 222 223 251 252 253 223 224 252 253 254 224 225 253 254 255 225 226
254 255 256 226 227 255 256 257 227 228 256 257 258 228 229 257 258 259 229 230
258 259 260 230 231 259 260 261 231 232 260 261 262 232 233 261 262 263 233 234
262 263 264 234 235 263 264 265 235 236 264 265 266 236 237 265 266 267 237 238
266 267 268 238 239 267 268 269 239 240 268 269 270 240 269 270 241 242 271 272
242 243 271 272 273 243 244 272 273 274 244 245 273 274 275 245 246 274 275 276
246 247 275 276 277 247 248 276 277 278 248 249 277 278 279 249 250 278 279 280
250 251 279 280 281 251 252 280 281 282 252 253 281 282 283 253 254 282 283 284
254 255 283 284 285 255 256 284 285 286 256 257 285 286 287 257 258 286 287 288
258 259 287 288 289 259 260 288 289 290 260 261 289 290 291 261 262 290 291 292
262 263 291 292 293 263 264 292 293 294 264 265 293 294 295 265 266 294 295 296
266 267 295 296 297 267 268 296 297 298 268 269 297 298 299 269 270 298 299 300
270 299 300 271 272 301 302 272 273 301 302 303 273 274 302 303 304 274 275 303
304 305 275 276 304 305 306 276 277 305 306 307 277 278 306 307 308 278 279 307
308 309 279 280 308 309 310 280 281 309 310 311 281 282 310 311 312 282 283 311
312 313 283 284 312 313 314 284 285 313 314 315 285 286 314 315 316 286 287 315
316 317 287 288 316 317 318 288 289 317 318 319 289 290 318 319 320 290 291 319
320 321 291 292 320 321 322 292 293 321 322 323 293 294 322 323 324 294 295 323
324 325 295 296 324 325 326 296 297 325 326 327 297 298 326 327 328 298 299 327
328 329 299 300 328 329 330 300 329 330 301 302 331 332 302 303 331 332 333 303
304 332 333 334 304 305 333 334 335 305 306 334 335 336 306 307 335 336 337 307
308 336 337 338 308 309 337 338 339 309 310 338 339 340 310 311 339 340 341 311
312 340 341 342 312 313 341 342 343 313 314 342 343 344 314 315 343 344 345 315
316 344 345 346 316 317 345 346 347 317 318 346 347 348 318 319 347 348 349 319
320 348 349 350 320 321 349 350 351 321 322 350 351 352 322 323 351 352 353 323
324 352 353 354 324 325 353 354 355 325 326 354 355 356 326 327 355 356 357 327
328 356 357 358 328 329 357 358 359 329 330 358 359 360 330 359 360 331 332 361
362 332 333 361 362 363 333 334 362 363 364 334 335 363 364 365 335 336 364 365
366 336 337 365 366 367 337 338 366 367 368 338 339 367 368 369 339 340 368 369
370 340 341 369 370 371 341 342 370 371 372 342 343 371 372 373 343 344 372 373
374 344 345 373 374 375 345 346 374 375 376 346 347 375 376 377 347 348 376 377
378 348 349 377 378 379 349 350 378 379 380 350 351 379 380 381 351 352 380 381
382 352 353 381 382 383 353 354 382 383 384 354 355 383 384 385 355 356 384 385
386 356 357 385 386 387 357 358 386 387 388 358 359 387 388 389 359 360 388 389
390 360 389 390 361 362 391 392 362 363 391 392 393 363 364 392 393 394 364 365
393 394 395 365 366 394 395 396 366 367 395 396 397 367 368 396 397 398 368 369
397 398 399 369 370 398 399 400 370 371 399 400 401 371 372 400 401 402 372 373
401 402 403 373 374 402 403 404 374 375 403 404 405 375 376 404 405 406 376 377
405 406 407 377 378 406 407 408 378 379 407 408 409 379 380 408 409 410 380 381
409 410 411 381 382 410 411 412 382 383 411 412 413 383 384 412 413 414 384 385
413 414 415 385 386 414 415 416 386 387 415 416 417 387 388 416 417 418 388 389
417 418 419 389 390 418 419 420 390 419 420 391 392 421 422 392 393 421 422 423
393 394 422 423 424 394 395 423 424 425 395 396 424 425 426 396 397 425 426 427
397 398 426 427 428 398 399 427 428 429 399 400 428 429 430 400 401 429 430 431
401 402 430 431 432 402 403 431 432 433 403 404 432 433 434 404 405 433 434 435
405 406 434 435 436 406 407 435 436 437 407 408 436 437 438 408 409 437 438 439
409 410 438 439 440 410 411 439 440 441 411 412 440 441 442 412 413 441 442 443
413 414 442 443 444 414 415 443 444 445 415 416 444 445 446 416 417 445 446 447
417 418 446 447 448 418 419 447 448 449 419 420 448 449 450 420 449 450 421 422
451 452 422 423 451 452 453 423 424 452 453 454 424 425 453 454 455 425 426 454
455 456 426 427 455 456 457 427 428 456 457 458 428 429 457 458 459 429 430 458
459 460 430 431 459 460 461 431 432 460 461 462 432 433 461 462 463 433 434 462
463 464 434 435 463 464 465 435 436 464 465 466 436 437 465 466 467 437 438 466
467 468 438 439 467 468 469 439 440 468 469 470 440 441 469 470 471 441 442 470
471 472 442 443 471 472 473 443 444 472 473 474 444 445 473 474 475 445 446 474
475 476 446 447 475 476 477 447 448 476 477 478 448 449 477 478 479 449 450 478
479 480 450 479 480 451 452 481 482 452 453 481 482 483 453 454 482 483 484 454
455 483 484 485 455 456 484 485 486 456 457 485 486 487 457 458 486 487 488 458
459 487 488 489 459 460 488 489 490 460 461 489 490 491 461 462 490 491 492 462
463 491 492 493 463 464 492 493 494 464 465 493 494 495 465 466 494 495 496 466
467 495 496 497 467 468 496 497 498 468 469 497 498 499 469 470 498 499 500 470
471 499 500 501 471 472 500 501 502 472 473 501 502 503 473 474 502 503 504 474
475 503 504 505 475 476 504 505 506 476 477 505 506 507 477 478 506 507 508 478
479 507 508 509 479 480 508 509 510 480 509 510 481 482 511 512 482 483 511 512
513 483 484 512 513 514 484 485 513 514 515 485 486 514 515 516 486 487 515 516
517 487 488 516 517 518 488 489 517 518 519 489 490 518 519 520 490 491 519 520
521 491 492 520 521 522 492 493 521 522 523 493 494 522 523 524 494 495 523 524
525 495 496 524 525 526 496 497 525 526 527 497 498 526 527 528 498 499 527 528
529 499 500 528 529 530 500 501 529 530 531 501 502 530 531 532 502 503 531 532
533 503 504 532 533 534 504 505 533 534 535 505 506 534 535 536 506 507 535 536
537 507 508 536 537 538 508 509 537 538 539 509 510 538 539 540 510 539 540 511
512 541 542 512 513 541 542 543 513 514 542 543 544 514 515 543 544 545 515 516
544 545 546 516 517 545 546 547 517 518 546 547 548 518 519 547 548 549 519 520
548 549 550 520 521 549 550 551 521 522 550 551 552 522 523 551 552 553 523 524
552 553 554 524 525 553 554 555 525 526 554 555 556 526 527 555 556 557 527 528
556 557 558 528 529 557 558 559 529 530 558 559 560 530 531 559 560 561 531 532
560 561 562 532 533 561 562 563 533 534 562 563 564 534 535 563 564 565 535 536
564 565 566 536 537 565 566 567 537 538 566 567 568 538 539 567 568 569 539 540
568 569 570 540 569 570 541 542 571 572 542 543 571 572 573 543 544 572 573 574
544 545 573 574 575 545 546 574 575 576 546 547 575 576 577 547 548 576 577 578
548 549 577 578 579 549 550 578 579 580 550 551 579 580 581 551 552 580 581 582
552 553 581 582 583 553 554 582 583 584 554 555 583 584 585 555 556 584 585 586
556 557 585 586 587 557 558 586 587 588 558 559 587 588 589 559 560 588 589 590
560 561 589 590 591 561 562 590 591 592 562 563 591 592 593 563 564 592 593 594
564 565 593 594 595 565 566 594 595 596 566 567 595 596 597 567 568 596 597 598
568 569 597 598 599 569 570 598 599 600 570 599 600 571 572 601 602 572 573 601
602 603 573 574 602 603 604 574 575 603 604 605 575 576 604 605 606 576 577 605
606 607 577 578 606 607 608 578 579 607 608 609 579 580 608 609 610 580 581 609
610 611 581 582 610 611 612 582 583 611 612 613 583 584 612 613 614 584 585 613
614 615 585 586 614 615 616 586 587 615 616 617 587 588 616 617 618 588 589 617
618 619 589 590 618 619 620 590 591 619 620 621 591 592 620 621 622 592 593 621
622 623 593 594 622 623 624 594 595 623 624 625 595 596 624 625 626 596 597 625
626 627 597 598 626 627 628 598 599 627 628 629 599 600 628 629 630 600 629 630
601 602 631 632 602 603 631 632 633 603 604 632 633 634 604 605 633 634 635 605
606 634 635 636 606 607 635 636 637 607 608 636 637 638 608 609 637 638 639 609
610 638 639 640 610 611 639 640 641 611 612 640 641 642 612 613 641 642 643 613
614 642 643 644 614 615 643 644 645 615 616 644 645 646 616 617 645 646 647 617
618 646 647 648 618 619 647 648 649 619 620 648 649 650 620 621 649 650 651 621
622 650 651 652 622 623 651 652 653 623 624 652 653 654 624 625 653 654 655 625
626 654 655 656 626 627 655 656 657 627 628 656 657 658 628 629 657 658 659 629
630 658 659 660 630 659 660 631 632 661 662 632 633 661 662 663 633 634 662 663
664 634 635 663 664 665 635 636 664 665 666 636 637 665 666 667 637 638 666 667
668 638 639 667 668 669 639 640 668 669 670 640 641 669 670 671 641 642 670 671
672 642 643 671 672 673 643 644 672 673 674 644 645 673 674 675 645 646 674 675
676 646 647 675 676 677 647 648 676 677 678 648 649 677 678 679 649 650 678 679
680 650 651 679 680 681 651 652 680 681 682 652 653 681 682 683 653 654 682 683
684 654 655 683 684 685 655 656 684 685 686 656 657 685 686 687 657 658 686 687
688 658 659 687 688 689 659 660 688 689 690 660 689 690 661 662 691 692 662 663
691 692 693 663 664 692 693 694 664 665 693 694 695 665 666 694 695 696 666 667
695 696 697 667 668 696 697 698 668 669 697 698 699 669 670 698 699 700 670 671
699 700 701 671 672 700 701 702 672 673 701 702 703 673 674 702 703 704 674 675
703 704 705 675 676 704 705 706 676 677 705 706 707 677 678 706 707 708 678 679
707 708 709 679 680 708 709 710 680 681 709 710 711 681 682 710 711 712 682 683
711 712 713 683 684 712 713 714 684 685 713 714 715 685 686 714 715 716 686 687
715 716 717 687 688 716 717 718 688 689 717 718 719 689 690 718 719 720 690 719
720 691 692 721 722 692 693 721 722 723 693 694 722 723 724 694 695 723 724 725
695 696 724 725 726 696 697 725 726 727 697 698 726 727 728 698 699 727 728 729
699 700 728 729 730 700 701 729 730 731 701 702 730 731 732 702 703 731 732 733
703 704 732 733 734 704 705 733 734 735 705 706 734 735 736 706 707 735 736 737
707 708 736 737 738 708 709 737 738 739 709 710 738 739 740 710 711 739 740 741
711 712 740 741 742 712 713 741 742 743 713 714 742 743 744 714 715 743 744 745
715 716 744 745 746 716 717 745 746 747 717 718 746 747 748 718 719 747 748 749
719 720 748 749 750 720 749 750 721 722 751 752 722 723 751 752 753 723 724 752
753 754 724 725 753 754 755 725 726 754 755 756 726 727 755 756 757 727 728 756
757 758 728 729 757 758 759 729 730 758 759 760 730 731 759 760 761 731 732 760
761 762 732 733 761 762 763 733 734 762 763 764 734 735 763 764 765 735 736 764
765 766 736 737 765 766 767 737 738 766 767 768 738 739 767 768 769 739 740 768
769 770 740 741 769 770 771 741 742 770 771 772 742 743 771 772 773 743 744 772
773 774 744 745 773 774 775 745 746 774 775 776 746 747 775 776 777 747 748 776
777 778 748 749 777 778 779 749 750 778 779 780 750 779 780 751 752 781 782 752
753 781 782 783 753 754 782 783 784 754 755 783 784 785 755 756 784 785 786 756
757 785 786 787 757 758 786 787 788 758 759 787 788 789 759 760 788 789 790 760
761 789 790 791 761 762 790 791 792 762 763 791 792 793 763 764 792 793 794 764
765 793 794 795 765 766 794 795 796 766 767 795 796 797 767 768 796 797 798 768
769 797 798 799 769 770 798 799 800 770 771 799 800 801 771 772 800 801 802 772
773 801 802 803 773 774 802 803 804 774 775 803 804 805 775 776 804 805 806 776
777 805 806 807 777 778 806 807 808 778 779 807 808 809 779 780 808 809 810 780
809 810 781 782 811 812 782 783 811 812 813 783 784 812 813 814 784 785 813 814
815 785 786 814 815 816 786 787 815 816 817 787 788 816 817 818 788 789 817 818
819 789 790 818 819 820 790 791 819 820 821 791 792 820 821 822 792 793 821 822
823 793 794 822 823 824 794 795 823 824 825 795 796 824 825 826 796 797 825 826
827 797 798 826 827 828 798 799 827 828 829 799 800 828 829 830 800 801 829 830
831 801 802 830 831 832 802 803 831 832 833 803 804 832 833 834 804 805 833 834
835 805 806 834 835 836 806 807 835 836 837 807 808 836 837 838 808 809 837 838
839 809 810 838 839 840 810 839 840 811 812 841 842 812 813 841 842 843 813 814
842 843 844 814 815 843 844 845 815 816 844 845 846 816 817 845 846 847 817 818
846 847 848 818 819 847 848 849 819 820 848 849 850 820 821 849 850 851 821 822
850 851 852 822 823 851 852 853 823 824 852 853 854 824 825 853 854 855 825 826
854 855 856 826 827 855 856 857 827 828 856 857 858 828 829 857 858 859 829 830
858 859 860 830 831 859 860 861 831 832 860 861 862 832 833 861 862 863 833 834
862 863 864 834 835 863 864 865 835 836 864 865 866 836 837 865 866 867 837 838
866 867 868 838 839 867 868 869 839 840 868 869 870 840 869 870 841 842 871 872
842 843 871 872 873 843 844 872 873 874 844 845 873 874 875 845 846 874 875 876
846 847 875 876 877 847 848 876 877 878 848 849 877 878 879 849 850 878 879 880
850 851 879 880 881 851 852 880 881 882 852 853 881 882 883 853 854 882 883 884
854 855 883 884 885 855 856 884 885 886 856 857 885 886 887 857 858 886 887 888
858 859 887 888 889 859 860 888 889 890 860 861 889 890 891 861 862 890 891 892
862 863 891 892 893 863 864 892 893 894 864 865 893 894 895 865 866 894 895 896
866 867 895 896 897 867 868 896 897 898 868 869 897 898 899 869 870 898 899 900
870 899 900 871 872 872 873 873 874 874 875 875 876 876 877 877 878 878 879 879
880 880 881 881 882 882 883 883 884 884 885 885 886 886 887 887 888 888 889 889
890 890 891 891 892 892 893 893 894 894 895 895 896 896 897 897 898 898 899 899
900 900
8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0
-1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0
-1.0 -1.0 8.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
-1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0
-1.0 -1.0 -1.0 8.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 -1.0 -1.0 8.0 -1.0 -1.0 -1.0 -1.0
8.0 -1.0 -1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0
-1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0
-1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0
-1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0
-1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0
-1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0 -1.0 8.0
-1.0 8.0
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string.h>
#include "distribution_methods/block_distribution.h"
#include "CommDist.h"
void prepare_redistribution(int qty, int myId, int numP, int numO, int is_children_group, int is_intercomm, char **recv, struct Counts *s_counts, struct Counts *r_counts);
void check_requests(struct Counts s_counts, struct Counts r_counts, MPI_Request **requests, size_t *request_qty);
void sync_point2point(char *send, char *recv, int is_intercomm, int myId, struct Counts s_counts, struct Counts r_counts, MPI_Comm comm);
void sync_rma(char *send, char *recv, struct Counts r_counts, int tamBl, MPI_Comm comm, int red_method);
void sync_rma_lock(char *recv, struct Counts r_counts, MPI_Win win);
void sync_rma_lockall(char *recv, struct Counts r_counts, MPI_Win win);
void async_point2point(char *send, char *recv, struct Counts s_counts, struct Counts r_counts, MPI_Comm comm, MPI_Request *requests);
void perform_manual_communication(char *send, char *recv, int myId, struct Counts s_counts, struct Counts r_counts);
/*
* Reserva memoria para un vector de hasta "qty" elementos.
* Los "qty" elementos se disitribuyen entre los "numP" procesos
* que llaman a esta funcion.
*/
void malloc_comm_array(char **array, int qty, int myId, int numP) {
struct Dist_data dist_data;
get_block_dist(qty, myId, numP, &dist_data);
if( (*array = calloc(dist_data.tamBl, sizeof(char))) == NULL) {
printf("Memory Error (Malloc Arrays(%d))\n", dist_data.tamBl);
exit(1);
}
/*
int i;
for(i=0; i<dist_data.tamBl; i++) {
(*array)[i] = '!' + i + dist_data.ini;
}
printf("P%d Tam %d String: %s\n", myId, dist_data.tamBl, *array);
*/
}
//================================================================================
//================================================================================
//========================SYNCHRONOUS FUNCTIONS===================================
//================================================================================
//================================================================================
/*
* Performs a communication to redistribute an array in a block distribution.
* In the redistribution is differenciated parent group from the children and the values each group indicates can be
* different.
*
* - send (IN): Array with the data to send. This data can not be null for parents.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
* If the process receives data and is NULL, the behaviour is undefined.
* - qty (IN): Sum of elements shared by all processes that will send data.
* - myId (IN): Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
* - numP (IN): Size of the local group. If it is a children group, this parameter must correspond to using
* "MPI_Comm_size(comm)". For the parents is not always the size obtained from "comm".
* - numO (IN): Amount of processes in the remote group. For the parents is the target quantity of processes after the
* resize, while for the children is the amount of parents.
* - is_children_group (IN): Indicates wether this MPI rank is a children(TRUE) or a parent(FALSE).
* - comm (IN): Communicator to use to perform the redistribution.
*
* returns: An integer indicating if the operation has been completed(TRUE) or not(FALSE). //FIXME In this case is always true...
*/
int sync_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int red_method, MPI_Comm comm) {
int is_intercomm, aux_comm_used = 0;
struct Counts s_counts, r_counts;
struct Dist_data dist_data;
MPI_Comm aux_comm = MPI_COMM_NULL;
/* PREPARE COMMUNICATION */
MPI_Comm_test_inter(comm, &is_intercomm);
prepare_redistribution(qty, myId, numP, numO, is_children_group, is_intercomm, recv, &s_counts, &r_counts);
if(is_intercomm) {
MPI_Intercomm_merge(comm, is_children_group, &aux_comm);
aux_comm_used = 1;
}
/* PERFORM COMMUNICATION */
switch(red_method) {
case MALL_RED_RMA_LOCKALL:
case MALL_RED_RMA_LOCK:
if(is_children_group) {
get_block_dist(qty, myId, numP, &dist_data);
} else {
get_block_dist(qty, myId, numO, &dist_data);
}
if(is_intercomm) {
MPI_Intercomm_merge(comm, is_children_group, &aux_comm);
aux_comm_used = 1;
} else { aux_comm = comm; }
sync_rma(send, *recv, r_counts, dist_data.tamBl, aux_comm, red_method);
break;
case MALL_RED_POINT:
sync_point2point(send, *recv, is_intercomm, myId, s_counts, r_counts, aux_comm);
break;
case MALL_RED_BASELINE:
default:
MPI_Alltoallv(send, s_counts.counts, s_counts.displs, MPI_CHAR, *recv, r_counts.counts, r_counts.displs, MPI_CHAR, aux_comm);
break;
}
if(aux_comm_used) {
MPI_Comm_free(&aux_comm);
}
freeCounts(&s_counts);
freeCounts(&r_counts);
return 1; //FIXME In this case is always true...
}
/*
* Performs a series of blocking point2point communications to redistribute an array in a block distribution.
* It should be called after calculating how data should be redistributed.
*
* - send (IN): Array with the data to send. This value can not be NULL for parents.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to
* receive data. If the process receives data and is NULL, the behaviour is undefined.
* - is_intercomm (IN): Indicates wether the communicator is an intercommunicator (TRUE) or an
* intracommunicator (FALSE).
* - myId (IN): Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
* - s_counts (IN): Struct which describes how many elements will send this process to each children and
* the displacements.
* - r_counts (IN): Structure which describes how many elements will receive this process from each parent
* and the displacements.
* - comm (IN): Communicator to use to perform the redistribution.
*
*/
void sync_point2point(char *send, char *recv, int is_intercomm, int myId, struct Counts s_counts, struct Counts r_counts, MPI_Comm comm) {
int i, j, init, end, total_sends;
MPI_Request *sends;
init = s_counts.idI;
end = s_counts.idE;
if(!is_intercomm && (s_counts.idI == myId || s_counts.idE == myId + 1)) {
perform_manual_communication(send, recv, myId, s_counts, r_counts);
if(s_counts.idI == myId) init = s_counts.idI+1;
else end = s_counts.idE-1;
}
total_sends = end - init;
j = 0;
if(total_sends > 0) {
sends = (MPI_Request *) malloc(total_sends * sizeof(MPI_Request));
}
for(i=init; i<end; i++) {
sends[j] = MPI_REQUEST_NULL;
MPI_Isend(send+s_counts.displs[i], s_counts.counts[i], MPI_CHAR, i, 99, comm, &(sends[j]));
j++;
}
init = r_counts.idI;
end = r_counts.idE;
if(!is_intercomm) {
if(r_counts.idI == myId) init = r_counts.idI+1;
else if(r_counts.idE == myId + 1) end = r_counts.idE-1;
}
for(i=init; i<end; i++) {
MPI_Recv(recv+r_counts.displs[i], r_counts.counts[i], MPI_CHAR, i, 99, comm, MPI_STATUS_IGNORE);
}
if(total_sends > 0) {
MPI_Waitall(total_sends, sends, MPI_STATUSES_IGNORE);
}
}
/*
* Performs synchronous MPI-RMA operations to redistribute an array in a block distribution. Is should be called after calculating
* how data should be redistributed
*
* - send (IN): Array with the data to send. This value can not be NULL for parents.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
* If the process receives data and is NULL, the behaviour is undefined.
* - r_counts (IN): Structure which describes how many elements will receive this process from each parent and the
* displacements.
* - tamBl (IN): How many elements are stored in the parameter "send".
* - comm (IN): Communicator to use to perform the redistribution. Must be an intracommunicator as MPI-RMA requirements.
* - red_method (IN): Type of data redistribution to use. In this case indicates the RMA operation(Lock or LockAll).
*
*/
void sync_rma(char *send, char *recv, struct Counts r_counts, int tamBl, MPI_Comm comm, int red_method) {
int aux_array_used;
MPI_Win win;
aux_array_used = 0;
if(send == NULL) {
tamBl = 1;
send = malloc(tamBl*sizeof(char)); //TODO Check if the value can be NULL at WIN_create
aux_array_used = 1;
}
MPI_Win_create(send, (MPI_Aint)tamBl, sizeof(char), MPI_INFO_NULL, comm, &win);
switch(red_method) {
case MALL_RED_RMA_LOCKALL:
sync_rma_lockall(recv, r_counts, win);
break;
case MALL_RED_RMA_LOCK:
sync_rma_lock(recv, r_counts, win);
break;
}
MPI_Win_free(&win);
if(aux_array_used) {
free(send);
send = NULL;
}
}
/*
* Performs a passive MPI-RMA data redistribution for a single array using the passive epochs Lock/Unlock.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
* If the process receives data and is NULL, the behaviour is undefined.
* - r_counts (IN): Structure which describes how many elements will receive this process from each parent and the
* displacements.
* - win (IN): Window to use to perform the redistribution.
*
*/
void sync_rma_lock(char *recv, struct Counts r_counts, MPI_Win win) {
int i, target_displs;
target_displs = r_counts.first_target_displs;
for(i=r_counts.idI; i<r_counts.idE; i++) {
MPI_Win_lock(MPI_LOCK_SHARED, i, MPI_MODE_NOCHECK, win);
MPI_Get(recv+r_counts.displs[i], r_counts.counts[i], MPI_CHAR, i, target_displs, r_counts.counts[i], MPI_CHAR, win);
MPI_Win_unlock(i, win);
target_displs=0;
}
}
/*
* Performs a passive MPI-RMA data redistribution for a single array using the passive epochs Lockall/Unlockall.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
* If the process receives data and is NULL, the behaviour is undefined.
* - r_counts (IN): Structure which describes how many elements will receive this process from each parent and the
* displacements.
* - win (IN): Window to use to perform the redistribution.
*
*/
void sync_rma_lockall(char *recv, struct Counts r_counts, MPI_Win win) {
int i, target_displs;
target_displs = r_counts.first_target_displs;
MPI_Win_lock_all(MPI_MODE_NOCHECK, win);
for(i=r_counts.idI; i<r_counts.idE; i++) {
MPI_Get(recv+r_counts.displs[i], r_counts.counts[i], MPI_CHAR, i, target_displs, r_counts.counts[i], MPI_CHAR, win);
target_displs=0;
}
MPI_Win_unlock_all(win);
}
//================================================================================
//================================================================================
//========================ASYNCHRONOUS FUNCTIONS==================================
//================================================================================
//================================================================================
/*
* //TODO Añadir estrategia IBARRIER
* Performs a communication to redistribute an array in a block distribution with non-blocking MPI functions.
* In the redistribution is differenciated parent group from the children and the values each group indicates can be
* different.
*
* - send (IN): Array with the data to send. This data can not be null for parents.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
* If the process receives data and is NULL, the behaviour is undefined.
* - qty (IN): Sum of elements shared by all processes that will send data.
* - myId (IN): Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
* - numP (IN): Size of the local group. If it is a children group, this parameter must correspond to using
* "MPI_Comm_size(comm)". For the parents is not always the size obtained from "comm".
* - numO (IN): Amount of processes in the remote group. For the parents is the target quantity of processes after the
* resize, while for the children is the amount of parents.
* - is_children_group (IN): Indicates wether this MPI rank is a children(TRUE) or a parent(FALSE).
* - comm (IN): Communicator to use to perform the redistribution.
* - requests (OUT): Pointer to array of requests to be used to determine if the communication has ended. If the pointer
* is null or not enough space has been reserved the pointer is allocated/reallocated.
* - request_qty (OUT): Quantity of requests to be used. If a process sends and receives data, this value will be
* modified to the expected value.
*
* returns: An integer indicating if the operation has been completed(TRUE) or not(FALSE). //FIXME In this case is always false...
*/
int async_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int red_method, int red_strategies, MPI_Comm comm, MPI_Request **requests, size_t *request_qty) {
int is_intercomm, aux_comm_used = 0;
struct Counts s_counts, r_counts;
MPI_Comm aux_comm = MPI_COMM_NULL;
/* PREPARE COMMUNICATION */
MPI_Comm_test_inter(comm, &is_intercomm);
prepare_redistribution(qty, myId, numP, numO, is_children_group, is_intercomm, recv, &s_counts, &r_counts);
check_requests(s_counts, r_counts, requests, request_qty);
/* PERFORM COMMUNICATION */
switch(red_method) {
case MALL_RED_RMA_LOCKALL:
case MALL_RED_RMA_LOCK:
return MALL_DENIED; //TODO Realizar versiones asíncronas
case MALL_RED_POINT:
async_point2point(send, *recv, s_counts, r_counts, comm, *requests);
break;
case MALL_RED_BASELINE:
default:
MPI_Ialltoallv(send, s_counts.counts, s_counts.displs, MPI_CHAR, *recv, r_counts.counts, r_counts.displs, MPI_CHAR, comm, &((*requests)[0]));
break;
}
/* POST REQUESTS CHECKS */
if(is_children_group) {
MPI_Waitall(*request_qty, *requests, MPI_STATUSES_IGNORE);
}
if(malleability_red_contains_strat(red_strategies, MALL_RED_IBARRIER, NULL)) { //FIXME Strategy not fully implemented
MPI_Ibarrier(comm, &((*requests)[*request_qty-1]) ); //FIXME Not easy to read...
if(is_children_group) {
MPI_Wait(&((*requests)[*request_qty-1]), MPI_STATUSES_IGNORE); //FIXME Not easy to read...
}
}
if(aux_comm_used) {
MPI_Comm_free(&aux_comm);
}
freeCounts(&s_counts);
freeCounts(&r_counts);
return 0; //FIXME In this case is always false...
}
/*
* Performs a series of non-blocking point2point communications to redistribute an array in a block distribution.
* It should be called after calculating how data should be redistributed.
*
* - send (IN): Array with the data to send. This value can not be NULL for parents.
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to
* receive data. If the process receives data and is NULL, the behaviour is undefined.
* - s_counts (IN): Struct which describes how many elements will send this process to each children and
* the displacements.
* - r_counts (IN): Structure which describes how many elements will receive this process from each parent
* and the displacements.
* - comm (IN): Communicator to use to perform the redistribution.
* - requests (OUT): Pointer to array of requests to be used to determine if the communication has ended.
*
*/
void async_point2point(char *send, char *recv, struct Counts s_counts, struct Counts r_counts, MPI_Comm comm, MPI_Request *requests) {
int i, j = 0;
for(i=s_counts.idI; i<s_counts.idE; i++) {
MPI_Isend(send+s_counts.displs[i], s_counts.counts[i], MPI_CHAR, i, 99, comm, &(requests[j]));
j++;
}
for(i=r_counts.idI; i<r_counts.idE; i++) {
MPI_Irecv(recv+r_counts.displs[i], r_counts.counts[i], MPI_CHAR, i, 99, comm, &(requests[j]));
j++;
}
}
/*
* ========================================================================================
* ========================================================================================
* ================================DISTRIBUTION FUNCTIONS==================================
* ========================================================================================
* ========================================================================================
*/
/*
* Performs a communication to redistribute an array in a block distribution. For each process calculates
* how many elements sends/receives to other processes for the new group.
*
* - qty (IN): Sum of elements shared by all processes that will send data.
* - myId (IN): Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
* - numP (IN): Size of the local group. If it is a children group, this parameter must correspond to using
* "MPI_Comm_size(comm)". For the parents is not always the size obtained from "comm".
* - numO (IN): Amount of processes in the remote group. For the parents is the target quantity of processes after the
* resize, while for the children is the amount of parents.
* - is_children_group (IN): Indicates wether this MPI rank is a children(TRUE) or a parent(FALSE).
* - is_intercomm (IN): Indicates wether the used communicator is a intercomunicator(TRUE) or intracommunicator(FALSE).
* - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
* process receives data and is NULL, the behaviour is undefined.
* - s_counts (OUT): Struct where is indicated how many elements sends this process to processes in the new group.
* - r_counts (OUT): Struct where is indicated how many elements receives this process from other processes in the previous group.
*
*/
void prepare_redistribution(int qty, int myId, int numP, int numO, int is_children_group, int is_intercomm, char **recv, struct Counts *s_counts, struct Counts *r_counts) {
int array_size = numO;
int offset_ids = 0;
struct Dist_data dist_data;
if(is_intercomm) {
offset_ids = numP; //FIXME Modify only if active?
} else {
array_size = numP > numO ? numP : numO;
}
mallocCounts(s_counts, array_size+offset_ids);
mallocCounts(r_counts, array_size+offset_ids);
if(is_children_group) {
offset_ids = 0;
prepare_comm_alltoall(myId, numP, numO, qty, offset_ids, r_counts);
// Obtener distribución para este hijo
get_block_dist(qty, myId, numP, &dist_data);
*recv = malloc(dist_data.tamBl * sizeof(char));
//get_block_dist(qty, myId, numP, &dist_data);
//print_counts(dist_data, r_counts->counts, r_counts->displs, numO+offset_ids, 0, "Children C ");
} else {
//get_block_dist(qty, myId, numP, &dist_data);
prepare_comm_alltoall(myId, numP, numO, qty, offset_ids, s_counts);
if(!is_intercomm && myId < numO) {
prepare_comm_alltoall(myId, numO, numP, qty, offset_ids, r_counts);
// Obtener distribución para este hijo y reservar vector de recibo
get_block_dist(qty, myId, numO, &dist_data);
*recv = malloc(dist_data.tamBl * sizeof(char));
//print_counts(dist_data, r_counts->counts, r_counts->displs, array_size, 0, "Children P ");
}
//print_counts(dist_data, s_counts->counts, s_counts->displs, numO+offset_ids, 0, "Parents ");
}
}
/*
* Ensures that the array of request of a process has an amount of elements equal to the amount of communication
* functions the process will perform. In case the array is not initialized or does not have enough space it is
* allocated/reallocated to the minimum amount of space needed.
*
* - s_counts (IN): Struct where is indicated how many elements sends this process to processes in the new group.
* - r_counts (IN): Struct where is indicated how many elements receives this process from other processes in the previous group.
* - requests (OUT): Pointer to array of requests to be used to determine if the communication has ended. If the pointer
* is null or not enough space has been reserved the pointer is allocated/reallocated.
* - request_qty (OUT): Quantity of requests to be used. If the value is smaller than the amount of communication
* functions to perform, it is modified to the minimum value.
*/
void check_requests(struct Counts s_counts, struct Counts r_counts, MPI_Request **requests, size_t *request_qty) {
size_t i, sum;
MPI_Request *aux;
sum = (size_t) s_counts.idE - s_counts.idI;
sum += (size_t) r_counts.idE - r_counts.idI;
if (*requests != NULL && sum <= *request_qty) return; // Expected amount of requests
// FIXME Si es la estrategia Ibarrier como se tiene en cuenta en el total??
if (*requests == NULL) {
*requests = (MPI_Request *) malloc(sum * sizeof(MPI_Request));
} else { // Array exists, but is too small
aux = (MPI_Request *) realloc(*requests, sum * sizeof(MPI_Request));
*requests = aux;
}
if (*requests == NULL) {
fprintf(stderr, "Fatal error - It was not possible to allocate/reallocate memory for the MPI_Requests before the redistribution\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
for(i=0; i < sum; i++) {
(*requests)[i] = MPI_REQUEST_NULL;
}
*request_qty = sum;
}
/*
* Special case to perform a manual copy of data when a process has to send data to itself. Only used
* when the MPI communication is not able to hand this situation. An example is when using point to point
* communications and the process has to perform a Send and Recv to itself
* - send (IN): Array with the data to send. This value can not be NULL.
* - recv (OUT): Array where data will be written. This value can not be NULL.
* - myId (IN): Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
* - s_counts (IN): Struct where is indicated how many elements sends this process to processes in the new group.
* - r_counts (IN): Struct where is indicated how many elements receives this process from other processes in the previous group.
*/
void perform_manual_communication(char *send, char *recv, int myId, struct Counts s_counts, struct Counts r_counts) {
int i;
for(i=0; i<s_counts.counts[myId];i++) {
recv[i+r_counts.displs[myId]] = send[i+s_counts.displs[myId]];
}
}
/*
* Función para obtener si entre las estrategias elegidas, se utiliza
* la estrategia pasada como segundo argumento.
*
* Devuelve en "result" 1(Verdadero) si utiliza la estrategia, 0(Falso) en caso
* contrario.
*/
int malleability_red_contains_strat(int comm_strategies, int strategy, int *result) {
int value = comm_strategies % strategy ? 0 : 1;
if(result != NULL) *result = value;
return value;
}
#ifndef COMMDIST_H
#define COMMDIST_H
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string.h>
#include "malleabilityStates.h"
//#define MAL_COMM_COMPLETED 0
//#define MAL_COMM_UNINITIALIZED 2
//#define MAL_ASYNC_PENDING 1
//#define MAL_USE_NORMAL 0
//#define MAL_USE_IBARRIER 1
//#define MAL_USE_POINT 2
//#define MAL_USE_THREAD 3
int sync_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int comm_type, MPI_Comm comm);
int async_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int red_method, int red_strategies, MPI_Comm comm, MPI_Request **requests, size_t *request_qty);
int send_async(char *array, int qty, int myId, int numP, MPI_Comm intercomm, int numP_child, MPI_Request **comm_req, int red_method, int red_strategies);
void recv_async(char **array, int qty, int myId, int numP, MPI_Comm intercomm, int numP_parents, int red_method, int red_strategies);
void malloc_comm_array(char **array, int qty, int myId, int numP);
int malleability_red_contains_strat(int comm_strategies, int strategy, int *result);
#endif
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include "block_distribution.h"
void set_interblock_counts(int id, int numP, struct Dist_data data_dist, int offset_ids, int *sendcounts);
void get_util_ids(struct Dist_data dist_data, int numP_other, int **idS);
/*
* Prepares a communication from "numP" processes to "numP_other" processes
* of "n" elements an returns an struct of counts with 3 arrays to perform the
* communications.
*
* The struct should be freed with freeCounts
*/
void prepare_comm_alltoall(int myId, int numP, int numP_other, int n, int offset_ids, struct Counts *counts) {
int i, *idS, first_id = 0;
struct Dist_data dist_data, dist_target;
if(counts == NULL) {
fprintf(stderr, "Counts is NULL for rank %d/%d ", myId, numP);
MPI_Abort(MPI_COMM_WORLD, -3);
}
get_block_dist(n, myId, numP, &dist_data);
get_util_ids(dist_data, numP_other, &idS);
counts->idI = idS[0] + offset_ids;
counts->idE = idS[1] + offset_ids;
get_block_dist(n, idS[0], numP_other, &dist_target); // RMA Specific operation -- uses idS[0], not idI
counts->first_target_displs = dist_data.ini - dist_target.ini; // RMA Specific operation
if(idS[0] == 0) { // Uses idS[0], not idI
set_interblock_counts(counts->idI, numP_other, dist_data, offset_ids, counts->counts);
first_id++;
}
for(i=counts->idI + first_id; i<counts->idE; i++) {
set_interblock_counts(i, numP_other, dist_data, offset_ids, counts->counts);
counts->displs[i] = counts->displs[i-1] + counts->counts[i-1];
}
free(idS);
for(i=0; i<numP_other; i++) {
if(counts->counts[i] < 0) {
fprintf(stderr, "Counts value [i=%d/%d] is negative for rank %d/%d ", i, numP_other, myId, numP);
MPI_Abort(MPI_COMM_WORLD, -3);
}
if(counts->displs[i] < 0) {
fprintf(stderr, "Displs value [i=%d/%d] is negative for rank %d/%d ", i, numP_other, myId, numP);
MPI_Abort(MPI_COMM_WORLD, -3);
}
}
}
/*
* Prepares a communication of "numP" processes of "n" elements an
* returns an struct of counts with 3 arrays to perform the
* communications.
*
* The struct should be freed with freeCounts
*/
void prepare_comm_allgatherv(int numP, int n, struct Counts *counts) {
int i;
struct Dist_data dist_data;
mallocCounts(counts, numP);
get_block_dist(n, 0, numP, &dist_data);
counts->counts[0] = dist_data.tamBl;
for(i=1; i<numP; i++){
get_block_dist(n, i, numP, &dist_data);
counts->counts[i] = dist_data.tamBl;
counts->displs[i] = counts->displs[i-1] + counts->counts[i-1];
}
}
/*
* ========================================================================================
* ========================================================================================
* ================================DISTRIBUTION FUNCTIONS==================================
* ========================================================================================
* ========================================================================================
*/
/*
* Obatains for "Id" and "numP", how many
* elements per row will have process "Id"
* and fills the results in a Dist_data struct
*/
void get_block_dist(int qty, int id, int numP, struct Dist_data *dist_data) {
int rem;
dist_data->myId = id;
dist_data->numP = numP;
dist_data->qty = qty;
dist_data->tamBl = qty / numP;
rem = qty % numP;
if(id < rem) { // First subgroup
dist_data->ini = id * dist_data->tamBl + id;
dist_data->fin = (id+1) * dist_data->tamBl + (id+1);
} else { // Second subgroup
dist_data->ini = id * dist_data->tamBl + rem;
dist_data->fin = (id+1) * dist_data->tamBl + rem;
}
if(dist_data->fin > qty) {
dist_data->fin = qty;
}
if(dist_data->ini > dist_data->fin) {
dist_data->ini = dist_data->fin;
}
dist_data->tamBl = dist_data->fin - dist_data->ini;
}
/*
* Obtiene para el Id de un proceso dado, cuantos elementos
* enviara o recibira desde el proceso indicado en Dist_data.
*/
void set_interblock_counts(int id, int numP, struct Dist_data data_dist, int offset_ids, int *sendcounts) {
struct Dist_data other;
int biggest_ini, smallest_end;
get_block_dist(data_dist.qty, id - offset_ids, numP, &other);
// Si el rango de valores no coincide, se pasa al siguiente proceso
if(data_dist.ini >= other.fin || data_dist.fin <= other.ini) {
return;
}
// Obtiene el proceso con mayor ini entre los dos procesos
if(data_dist.ini > other.ini) {
biggest_ini = data_dist.ini;
} else {
biggest_ini = other.ini;
}
// Obtiene el proceso con menor fin entre los dos procesos
if(data_dist.fin < other.fin) {
smallest_end = data_dist.fin;
} else {
smallest_end = other.fin;
}
sendcounts[id] = smallest_end - biggest_ini; // Numero de elementos a enviar/recibir del proceso Id
}
/*
* Obtiene para un proceso de un grupo a que rango procesos de
* otro grupo tiene que enviar o recibir datos.
*
* Devuelve el primer identificador y el último (Excluido) con el que
* comunicarse.
*/
void get_util_ids(struct Dist_data dist_data, int numP_other, int **idS) {
int idI, idE;
int tamOther = dist_data.qty / numP_other;
int remOther = dist_data.qty % numP_other;
// Indica el punto de corte del grupo de procesos externo que
// divide entre los procesos que tienen
// un tamaño tamOther + 1 y un tamaño tamOther
int middle = (tamOther + 1) * remOther;
// Calcular idI teniendo en cuenta si se comunica con un
// proceso con tamano tamOther o tamOther+1
if(middle > dist_data.ini) { // First subgroup (tamOther+1)
idI = dist_data.ini / (tamOther + 1);
} else { // Second subgroup (tamOther)
idI = ((dist_data.ini - middle) / tamOther) + remOther;
}
// Calcular idR teniendo en cuenta si se comunica con un
// proceso con tamano tamOther o tamOther+1
if(middle >= dist_data.fin) { // First subgroup (tamOther +1)
idE = dist_data.fin / (tamOther + 1);
idE = (dist_data.fin % (tamOther + 1) > 0 && idE+1 <= numP_other) ? idE+1 : idE;
} else { // Second subgroup (tamOther)
idE = ((dist_data.fin - middle) / tamOther) + remOther;
idE = ((dist_data.fin - middle) % tamOther > 0 && idE+1 <= numP_other) ? idE+1 : idE;
}
*idS = malloc(2 * sizeof(int));
(*idS)[0] = idI;
(*idS)[1] = idE;
}
/*
* ========================================================================================
* ========================================================================================
* ==============================INIT/FREE/PRINT FUNCTIONS=================================
* ========================================================================================
* ========================================================================================
*/
/*
* Reserva memoria para los vectores de counts/displs de la funcion
* MPI_Alltoallv. Todos los vectores tienen un tamaño de numP, que es la
* cantidad de procesos en el otro grupo de procesos.
*
* El vector counts indica cuantos elementos se comunican desde este proceso
* al proceso "i" del otro grupo.
*
* El vector displs indica los desplazamientos necesarios para cada comunicacion
* con el proceso "i" del otro grupo.
*
*/
void mallocCounts(struct Counts *counts, size_t numP) {
counts->counts = calloc(numP, sizeof(int));
if(counts->counts == NULL) { MPI_Abort(MPI_COMM_WORLD, -2);}
counts->displs = calloc(numP, sizeof(int));
if(counts->displs == NULL) { MPI_Abort(MPI_COMM_WORLD, -2);}
counts->len = numP;
counts->idI = -1;
counts->idE = -1;
counts->first_target_displs = -1;
}
/*
* Libera la memoria interna de una estructura Counts.
*
* No libera la memoria de la estructura counts si se ha alojado
* de forma dinamica.
*/
void freeCounts(struct Counts *counts) {
if(counts == NULL) {
return;
}
if(counts->counts != NULL) {
free(counts->counts);
counts->counts = NULL;
}
if(counts->displs != NULL) {
free(counts->displs);
counts->displs = NULL;
}
}
/*
* Muestra la informacion de comunicaciones de un proceso
* Si se activa la bandera "include_zero" a verdadero se mostraran para el vector
* xcounts los valores a 0.
*
* En "name" se puede indicar un string con el fin de identificar mejor a que vectores
* se refiere la llamada.
*/
void print_counts(struct Dist_data data_dist, int *xcounts, int *xdispls, int size, int include_zero, const char* name) {
int i;
for(i=0; i < size; i++) {
if(xcounts[i] != 0 || include_zero) {
printf("P%d of %d | %scounts[%d]=%d disp=%d\n", data_dist.myId, data_dist.numP, name, i, xcounts[i], xdispls[i]);
}
}
}
#ifndef mall_block_distribution
#define mall_block_distribution
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
struct Dist_data {
int ini; //Primer elemento a enviar
int fin; //Ultimo elemento a enviar
int tamBl; // Total de elementos
int qty; // Total number of rows of the full disperse matrix
int myId;
int numP;
MPI_Comm intercomm;
};
struct Counts {
int len, idI, idE;
int first_target_displs; // RMA. Indicates displacement for first target when performing a Get.
int *counts;
int *displs;
};
void prepare_comm_alltoall(int myId, int numP, int numP_other, int n, int offset_ids, struct Counts *counts);
void prepare_comm_allgatherv(int numP, int n, struct Counts *counts);
void get_block_dist(int qty, int id, int numP, struct Dist_data *dist_data);
void mallocCounts(struct Counts *counts, size_t numP);
void freeCounts(struct Counts *counts);
void print_counts(struct Dist_data data_dist, int *xcounts, int *xdispls, int size, int include_zero, const char* name);
#endif
#ifndef MALLEABILITY_DATA_STRUCTURES_H
#define MALLEABILITY_DATA_STRUCTURES_H
/*
* Shows available data structures for inner ussage.
*/
#include <mpi.h>
/* --- SPAWN STRUCTURES --- */
struct physical_dist {
int num_cpus, num_nodes;
char *nodelist;
int target_qty, already_created;
int dist_type, info_type;
};
typedef struct {
int myId, root, root_parents;
int spawn_qty, initial_qty, target_qty;
int already_created;
int spawn_method, spawn_is_single, spawn_is_async;
char *cmd; //Executable name
MPI_Info mapping;
MPI_Datatype dtype;
struct physical_dist dist; // Used to create mapping var
MPI_Comm comm, returned_comm;
} Spawn_data;
#endif
#include <pthread.h>
#include <string.h>
#include "malleabilityManager.h"
#include "malleabilityStates.h"
#include "malleabilityDataStructures.h"
#include "malleabilityTypes.h"
#include "malleabilityZombies.h"
#include "spawn_methods/GenericSpawn.h"
#include "CommDist.h"
#define MALLEABILITY_USE_SYNCHRONOUS 0
#define MALLEABILITY_USE_ASYNCHRONOUS 1
void send_data(int numP_children, malleability_data_t *data_struct, int is_asynchronous);
void recv_data(int numP_parents, malleability_data_t *data_struct, int is_asynchronous);
void Children_init();
int spawn_step();
int start_redistribution();
int check_redistribution();
int end_redistribution();
int shrink_redistribution();
void comm_node_data(int rootBcast, int is_child_group);
void def_nodeinfo_type(MPI_Datatype *node_type);
int thread_creation();
int thread_check();
void* thread_async_work();
void print_comms_state();
void malleability_comms_update(MPI_Comm comm);
typedef struct {
int spawn_method;
int spawn_dist;
int spawn_strategies;
int red_method;
int red_strategies;
int grp;
} malleability_config_t;
typedef struct { //FIXME numC_spawned no se esta usando
int myId, numP, numC, numC_spawned, root, root_parents;
pthread_t async_thread;
MPI_Comm comm, thread_comm;
MPI_Comm intercomm;
MPI_Comm user_comm;
int dup_user_comm;
char *name_exec, *nodelist;
int num_cpus, num_nodes, nodelist_len;
} malleability_t;
int state = MALL_UNRESERVED; //FIXME Mover a otro lado
malleability_config_t *mall_conf;
malleability_t *mall;
malleability_data_t *rep_s_data;
malleability_data_t *dist_s_data;
malleability_data_t *rep_a_data;
malleability_data_t *dist_a_data;
/*
* Inicializa la reserva de memoria para el modulo de maleabilidad
* creando todas las estructuras necesarias y copias de comunicadores
* para no interferir en la aplicación.
*
* Si es llamada por un grupo de procesos creados de forma dinámica,
* inicializan la comunicacion con sus padres. En este caso, al terminar
* la comunicacion los procesos hijo estan preparados para ejecutar la
* aplicacion.
*/
int init_malleability(int myId, int numP, int root, MPI_Comm comm, char *name_exec, char *nodelist, int num_cpus, int num_nodes) {
MPI_Comm dup_comm, thread_comm;
mall_conf = (malleability_config_t *) malloc(sizeof(malleability_config_t));
mall = (malleability_t *) malloc(sizeof(malleability_t));
rep_s_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
dist_s_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
rep_a_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
dist_a_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
mall->dup_user_comm = 0;
MPI_Comm_dup(comm, &dup_comm);
MPI_Comm_dup(comm, &thread_comm);
MPI_Comm_set_name(dup_comm, "MPI_COMM_MALL");
MPI_Comm_set_name(thread_comm, "MPI_COMM_MALL_THREAD");
mall->myId = myId;
mall->numP = numP;
mall->root = root;
mall->comm = dup_comm;
mall->thread_comm = thread_comm;
mall->user_comm = comm;
mall->name_exec = name_exec;
mall->nodelist = nodelist;
mall->num_cpus = num_cpus;
mall->num_nodes = num_nodes;
rep_s_data->entries = 0;
rep_a_data->entries = 0;
dist_s_data->entries = 0;
dist_a_data->entries = 0;
state = MALL_NOT_STARTED;
zombies_service_init();
// Si son el primer grupo de procesos, obtienen los datos de los padres
MPI_Comm_get_parent(&(mall->intercomm));
if(mall->intercomm != MPI_COMM_NULL ) {
Children_init();
return MALLEABILITY_CHILDREN;
}
if(nodelist != NULL) { //TODO To be deprecated by using Slurm or else statement
mall->nodelist_len = strlen(nodelist);
} else { // If no nodelist is detected, get it from the actual run
mall->nodelist = malloc(MPI_MAX_PROCESSOR_NAME * sizeof(char));
MPI_Get_processor_name(mall->nodelist, &mall->nodelist_len);
//TODO Get name of each process and create real nodelist
}
return MALLEABILITY_NOT_CHILDREN;
}
/*
* Elimina toda la memoria reservado por el modulo
* de maleabilidad y asegura que los zombies
* despierten si los hubiese.
*/
void free_malleability() {
free_malleability_data_struct(rep_s_data);
free_malleability_data_struct(rep_a_data);
free_malleability_data_struct(dist_s_data);
free_malleability_data_struct(dist_a_data);
free(rep_s_data);
free(rep_a_data);
free(dist_s_data);
free(dist_a_data);
if(mall->comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->comm));
if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->thread_comm));
free(mall);
free(mall_conf);
zombies_awake();
zombies_service_free();
state = MALL_UNRESERVED;
}
/*
* TODO Reescribir
* Se realiza el redimensionado de procesos por parte de los padres.
*
* Se crean los nuevos procesos con la distribucion fisica elegida y
* a continuacion se transmite la informacion a los mismos.
*
* Si hay datos asincronos a transmitir, primero se comienza a
* transmitir estos y se termina la funcion. Se tiene que comprobar con
* llamando a la función de nuevo que se han terminado de enviar
*
* Si hay ademas datos sincronos a enviar, no se envian aun.
*
* Si solo hay datos sincronos se envian tras la creacion de los procesos
* y finalmente se desconectan los dos grupos de procesos.
*/
int malleability_checkpoint() {
double end_real_time;
switch(state) {
case MALL_UNRESERVED:
break;
case MALL_NOT_STARTED:
// Comprobar si se tiene que realizar un redimensionado
//mall_conf->results->malleability_time[mall_conf->grp] = MPI_Wtime();
state = spawn_step();
if (state == MALL_SPAWN_COMPLETED || state == MALL_SPAWN_ADAPT_POSTPONE){
malleability_checkpoint();
}
break;
case MALL_SPAWN_PENDING: // Comprueba si el spawn ha terminado y comienza la redistribucion
case MALL_SPAWN_SINGLE_PENDING:
state = check_spawn_state(&(mall->intercomm), mall->comm, &end_real_time);
if (state == MALL_SPAWN_COMPLETED || state == MALL_SPAWN_ADAPTED) {
//mall_conf->results->spawn_time[mall_conf->grp] = MPI_Wtime() - mall_conf->results->spawn_start;
//mall_conf->results->spawn_real_time[mall_conf->grp] = end_real_time - mall_conf->results->spawn_start;
malleability_checkpoint();
}
break;
case MALL_SPAWN_ADAPT_POSTPONE:
case MALL_SPAWN_COMPLETED:
state = start_redistribution();
malleability_checkpoint();
break;
case MALL_DIST_PENDING:
if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_THREAD, NULL)) {
state = thread_check();
} else {
state = check_redistribution();
}
if(state != MALL_DIST_PENDING) {
malleability_checkpoint();
}
break;
case MALL_SPAWN_ADAPT_PENDING:
//mall_conf->results->spawn_start = MPI_Wtime();
unset_spawn_postpone_flag(state);
state = check_spawn_state(&(mall->intercomm), mall->comm, &end_real_time);
if(!malleability_spawn_contains_strat(mall_conf->spawn_strategies, MALL_SPAWN_PTHREAD, NULL)) {
//mall_conf->results->spawn_time[mall_conf->grp] = MPI_Wtime() - mall_conf->results->spawn_start;
malleability_checkpoint();
}
break;
case MALL_SPAWN_ADAPTED:
state = shrink_redistribution();
malleability_checkpoint();
break;
case MALL_DIST_COMPLETED: //TODO No es esto muy feo?
//mall_conf->results->malleability_end = MPI_Wtime();
state = MALL_COMPLETED;
break;
}
return state;
}
// Funciones solo necesarias por el benchmark
//-------------------------------------------------------------------------------------------------------------
void set_benchmark_grp(int grp) {
mall_conf->grp = grp;
}
//-------------------------------------------------------------------------------------------------------------
void set_malleability_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int red_method, int red_strategies) {
mall_conf->spawn_method = spawn_method;
mall_conf->spawn_strategies = spawn_strategies;
mall_conf->spawn_dist = spawn_dist;
mall_conf->red_method = red_method;
mall_conf->red_strategies = red_strategies;
}
/*
* To be deprecated
* Tiene que ser llamado despues de setear la config
*/
void set_children_number(int numC){
if((mall_conf->spawn_method == MALL_SPAWN_MERGE) && (numC >= mall->numP)) {
mall->numC = numC;
mall->numC_spawned = numC - mall->numP;
if(numC == mall->numP) { // Migrar
mall->numC_spawned = numC;
mall_conf->spawn_method = MALL_SPAWN_BASELINE;
}
} else {
mall->numC = numC;
mall->numC_spawned = numC;
}
}
/*
* TODO
*/
void get_malleability_user_comm(MPI_Comm *comm) {
if(mall->dup_user_comm) {
if(mall->user_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->user_comm));
MPI_Comm_dup(mall->comm, &(mall->user_comm));
MPI_Comm_set_name(mall->user_comm, "MPI_COMM_MALL_USER");
mall->dup_user_comm = 0;
}
*comm = mall->user_comm;
}
/*
* Anyade a la estructura concreta de datos elegida
* el nuevo set de datos "data" de un total de "total_qty" elementos.
*
* Los datos variables se tienen que anyadir cuando quieran ser mandados, no antes
*
* Mas informacion en la funcion "add_data".
*
* //FIXME Si es constante se debería ir a asincrono, no sincrono
*/
void malleability_add_data(void *data, size_t total_qty, int type, int is_replicated, int is_constant) {
size_t total_reqs = 0;
if(is_constant) {
if(is_replicated) {
add_data(data, total_qty, type, total_reqs, rep_s_data);
} else {
add_data(data, total_qty, type, total_reqs, dist_s_data);
}
} else {
if(is_replicated) {
add_data(data, total_qty, type, total_reqs, rep_a_data); //FIXME total_reqs==0 ???
} else {
if(mall_conf->red_method == MALL_RED_BASELINE) {
total_reqs = 1;
} else if(mall_conf->red_method == MALL_RED_IBARRIER) { //TODO This is a strategy, not a method
total_reqs = 2;
} else if(mall_conf->red_method == MALL_RED_POINT) {
total_reqs = mall->numC;
}
add_data(data, total_qty, type, total_reqs, dist_a_data);
}
}
}
/*
* Modifica en la estructura concreta de datos elegida en el indice "index"
* con el set de datos "data" de un total de "total_qty" elementos.
*
* Los datos variables se tienen que modificar cuando quieran ser mandados, no antes
*
* Mas informacion en la funcion "modify_data".
* //FIXME Si es constante se debería ir a asincrono, no sincrono
*/
void malleability_modify_data(void *data, size_t index, size_t total_qty, int type, int is_replicated, int is_constant) {
size_t total_reqs = 0;
if(is_constant) {
if(is_replicated) {
modify_data(data, index, total_qty, type, total_reqs, rep_s_data);
} else {
modify_data(data, index, total_qty, type, total_reqs, dist_s_data);
}
} else {
if(is_replicated) {
modify_data(data, index, total_qty, type, total_reqs, rep_a_data); //FIXME total_reqs==0 ???
} else {
if(mall_conf->red_method == MALL_RED_BASELINE) {
total_reqs = 1;
} else if(mall_conf->red_method == MALL_RED_IBARRIER) { //TODO This is a strategy, not a method
total_reqs = 2;
} else if(mall_conf->red_method == MALL_RED_POINT) {
total_reqs = mall->numC;
}
modify_data(data, index, total_qty, type, total_reqs, dist_a_data);
}
}
}
/*
* Devuelve el numero de entradas para la estructura de descripcion de
* datos elegida.
* //FIXME Si es constante se debería ir a asincrono, no sincrono
*/
void malleability_get_entries(size_t *entries, int is_replicated, int is_constant){
if(is_constant) {
if(is_replicated) {
*entries = rep_s_data->entries;
} else {
*entries = dist_s_data->entries;
}
} else {
if(is_replicated) {
*entries = rep_a_data->entries;
} else {
*entries = dist_a_data->entries;
}
}
}
/*
* Devuelve el elemento de la lista "index" al usuario.
* La devolución es en el mismo orden que lo han metido los padres
* con la funcion "malleability_add_data()".
* Es tarea del usuario saber el tipo de esos datos.
* TODO Refactor a que sea automatico
* //FIXME Si es constante se debería ir a asincrono, no sincrono
*/
void malleability_get_data(void **data, size_t index, int is_replicated, int is_constant) {
malleability_data_t *data_struct;
if(is_constant) {
if(is_replicated) {
data_struct = rep_s_data;
} else {
data_struct = dist_s_data;
}
} else {
if(is_replicated) {
data_struct = rep_a_data;
} else {
data_struct = dist_a_data;
}
}
*data = data_struct->arrays[index];
}
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//================DATA COMMUNICATION====================||
//======================================================||
//======================================================||
/*
* Funcion generalizada para enviar datos desde los hijos.
* La asincronizidad se refiere a si el hilo padre e hijo lo hacen
* de forma bloqueante o no. El padre puede tener varios hilos.
*/
void send_data(int numP_children, malleability_data_t *data_struct, int is_asynchronous) {
size_t i;
char *aux_send, *aux_recv;
if(is_asynchronous) {
for(i=0; i < data_struct->entries; i++) {
aux_send = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
aux_recv = NULL;
async_communication(aux_send, &aux_recv, data_struct->qty[i], mall->myId, mall->numP, numP_children, MALLEABILITY_NOT_CHILDREN, mall_conf->red_method, mall_conf->red_strategies, mall->intercomm,
&(data_struct->requests[i]), &(data_struct->request_qty[i]));
if(aux_recv != NULL) data_struct->arrays[i] = (void *) aux_recv;
}
} else {
for(i=0; i < data_struct->entries; i++) {
aux_send = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
aux_recv = NULL;
sync_communication(aux_send, &aux_recv, data_struct->qty[i], mall->myId, mall->numP, numP_children, MALLEABILITY_NOT_CHILDREN, mall_conf->red_method, mall->intercomm);
if(aux_recv != NULL) data_struct->arrays[i] = (void *) aux_recv;
}
}
}
/*
* Funcion generalizada para recibir datos desde los hijos.
* La asincronizidad se refiere a si el hilo padre e hijo lo hacen
* de forma bloqueante o no. El padre puede tener varios hilos.
*/
void recv_data(int numP_parents, malleability_data_t *data_struct, int is_asynchronous) {
size_t i;
char *aux, aux_s;
if(is_asynchronous) {
for(i=0; i < data_struct->entries; i++) {
aux = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
async_communication(&aux_s, &aux, data_struct->qty[i], mall->myId, mall->numP, numP_parents, MALLEABILITY_CHILDREN, mall_conf->red_method, mall_conf->red_strategies, mall->intercomm,
&(data_struct->requests[i]), &(data_struct->request_qty[i]));
data_struct->arrays[i] = (void *) aux;
}
} else {
for(i=0; i < data_struct->entries; i++) {
aux = (char *) data_struct->arrays[i]; //TODO Comprobar que realmente es un char
sync_communication(&aux_s, &aux, data_struct->qty[i], mall->myId, mall->numP, numP_parents, MALLEABILITY_CHILDREN, mall_conf->red_method, mall->intercomm);
data_struct->arrays[i] = (void *) aux;
}
}
}
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//=====================CHILDREN=========================||
//======================================================||
//======================================================||
/*
* Inicializacion de los datos de los hijos.
* En la misma se reciben datos de los padres: La configuracion
* de la ejecucion a realizar; y los datos a recibir de los padres
* ya sea de forma sincrona, asincrona o ambas.
*/
void Children_init() {
size_t i;
int numP_parents, root_parents;
int is_intercomm;
malleability_connect_children(mall->myId, mall->numP, mall->root, mall->comm, &numP_parents, &root_parents, &(mall->intercomm));
MPI_Comm_test_inter(mall->intercomm, &is_intercomm);
if(!is_intercomm) { // For intracommunicators, these processes will be added
MPI_Comm_rank(mall->intercomm, &(mall->myId));
MPI_Comm_size(mall->intercomm, &(mall->numP));
}
comm_node_data(root_parents, MALLEABILITY_CHILDREN);
MPI_Bcast(&(mall_conf->red_method), 1, MPI_INT, root_parents, mall->intercomm);
MPI_Bcast(&(mall_conf->red_strategies), 1, MPI_INT, root_parents, mall->intercomm);
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_CHILDREN, mall->myId, root_parents, mall->intercomm);
if(dist_a_data->entries || rep_a_data->entries) { // Recibir datos asincronos
if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_THREAD, NULL)) {
recv_data(numP_parents, dist_a_data, MALLEABILITY_USE_SYNCHRONOUS);
} else {
recv_data(numP_parents, dist_a_data, MALLEABILITY_USE_ASYNCHRONOUS);
}
//mall_conf->results->async_end= MPI_Wtime(); // Obtener timestamp de cuando termina comm asincrona
}
comm_data_info(rep_s_data, dist_s_data, MALLEABILITY_CHILDREN, mall->myId, root_parents, mall->intercomm);
if(dist_s_data->entries || rep_s_data->entries) { // Recibir datos sincronos
recv_data(numP_parents, dist_s_data, MALLEABILITY_USE_SYNCHRONOUS);
//mall_conf->results->sync_end = MPI_Wtime(); // Obtener timestamp de cuando termina comm sincrona
// TODO Crear funcion especifica y anyadir para Asinc
// TODO Tener en cuenta el tipo y qty
for(i=0; i<rep_s_data->entries; i++) {
MPI_Datatype datatype;
if(rep_s_data->types[i] == MAL_INT) {
datatype = MPI_INT;
} else {
datatype = MPI_CHAR;
}
MPI_Bcast(rep_s_data->arrays[i], rep_s_data->qty[i], datatype, root_parents, mall->intercomm);
}
}
//mall_conf->results->malleability_end = MPI_Wtime(); // Obtener timestamp de cuando termina maleabilidad
if(!is_intercomm) {
malleability_comms_update(mall->intercomm);
}
MPI_Comm_disconnect(&(mall->intercomm)); //FIXME Error en OpenMPI + Merge
}
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//=====================PARENTS==========================||
//======================================================||
//======================================================||
/*
* Se encarga de realizar la creacion de los procesos hijos.
* Si se pide en segundo plano devuelve el estado actual.
*/
int spawn_step(){
//mall_conf->results->spawn_start = MPI_Wtime();
state = init_spawn(mall->name_exec, mall->num_cpus, mall->num_nodes, mall->nodelist, mall->myId, mall->numP, mall->numC, mall->root, mall_conf->spawn_dist, mall_conf->spawn_method, mall_conf->spawn_strategies, mall->thread_comm, &(mall->intercomm));
if(!malleability_spawn_contains_strat(mall_conf->spawn_strategies, MALL_SPAWN_PTHREAD, NULL)) {
//mall_conf->results->spawn_time[mall_conf->grp] = MPI_Wtime() - mall_conf->results->spawn_start;
}
return state;
}
/*
* Comienza la redistribucion de los datos con el nuevo grupo de procesos.
*
* Primero se envia la configuracion a utilizar al nuevo grupo de procesos y a continuacion
* se realiza el envio asincrono y/o sincrono si lo hay.
*
* En caso de que haya comunicacion asincrona, se comienza y se termina la funcion
* indicando que se ha comenzado un envio asincrono.
*
* Si no hay comunicacion asincrono se pasa a realizar la sincrona si la hubiese.
*
* Finalmente se envian datos sobre los resultados a los hijos y se desconectan ambos
* grupos de procesos.
*/
int start_redistribution() {
int rootBcast, is_intercomm;
is_intercomm = 0;
if(mall->intercomm != MPI_COMM_NULL) {
MPI_Comm_test_inter(mall->intercomm, &is_intercomm);
} else {
// Si no tiene comunicador creado, se debe a que se ha pospuesto el Spawn
// y se trata del spawn Merge Shrink
MPI_Comm_dup(mall->comm, &(mall->intercomm));
}
if(is_intercomm) {
rootBcast = mall->myId == mall->root ? MPI_ROOT : MPI_PROC_NULL;
} else {
rootBcast = mall->root;
}
comm_node_data(rootBcast, MALLEABILITY_NOT_CHILDREN);
MPI_Bcast(&(mall_conf->red_method), 1, MPI_INT, rootBcast, mall->intercomm);
MPI_Bcast(&(mall_conf->red_strategies), 1, MPI_INT, rootBcast, mall->intercomm);
comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
if(dist_a_data->entries || rep_a_data->entries) { // Enviar datos asincronos
//FIXME No se envian los datos replicados (rep_a_data)
//mall_conf->results->async_time[mall_conf->grp] = MPI_Wtime();
if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_THREAD, NULL)) {
return thread_creation();
} else {
send_data(mall->numC, dist_a_data, MALLEABILITY_USE_ASYNCHRONOUS);
return MALL_DIST_PENDING;
}
}
return end_redistribution();
}
/*
* Comprueba si la redistribucion asincrona ha terminado.
* Si no ha terminado la funcion termina indicandolo, en caso contrario,
* se continua con la comunicacion sincrona, el envio de resultados y
* se desconectan los grupos de procesos.
*
* Esta funcion permite dos modos de funcionamiento al comprobar si la
* comunicacion asincrona ha terminado.
* Si se utiliza el modo "MAL_USE_NORMAL" o "MAL_USE_POINT", se considera
* terminada cuando los padres terminan de enviar.
* Si se utiliza el modo "MAL_USE_IBARRIER", se considera terminada cuando
* los hijos han terminado de recibir.
*/
int check_redistribution() {
int is_intercomm, completed, local_completed, all_completed, test_err;
size_t i, j, req_qty;
MPI_Request *req_completed;
local_completed = 1;
test_err = 0;
//FIXME Modificar para que se tenga en cuenta rep_a_data
for(i=0; i<dist_a_data->entries; i++) {
req_completed = dist_a_data->requests[i];
req_qty = dist_a_data->request_qty[i];
if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL)) { //FIXME Strategy not fully implemented
test_err = MPI_Test(&(req_completed[req_qty-1]), &completed, MPI_STATUS_IGNORE);
local_completed = local_completed && completed;
} else {
for(j=0; j<req_qty; j++) {
test_err = MPI_Test(&(req_completed[j]), &completed, MPI_STATUS_IGNORE);
local_completed = local_completed && completed;
}
// test_err = MPI_Testall(req_qty, req_completed, &completed, MPI_STATUSES_IGNORE);
}
}
if (test_err != MPI_SUCCESS && test_err != MPI_ERR_PENDING) {
printf("P%d aborting -- Test Async\n", mall->myId);
MPI_Abort(MPI_COMM_WORLD, test_err);
}
MPI_Allreduce(&local_completed, &all_completed, 1, MPI_INT, MPI_MIN, mall->comm);
if(!all_completed) return MALL_DIST_PENDING; // Continue only if asynchronous send has ended
if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL)) { //FIXME Strategy not fully implemented
MPI_Waitall(req_qty, req_completed, MPI_STATUSES_IGNORE);
//Para la desconexión de ambos grupos de procesos es necesario indicar a MPI que esta comm
//ha terminado, aunque solo se pueda llegar a este punto cuando ha terminado
}
MPI_Comm_test_inter(mall->intercomm, &is_intercomm);
//if(!is_intercomm) mall_conf->results->async_end = MPI_Wtime(); // Merge method only
return end_redistribution();
}
/*
* Termina la redistribución de los datos con los hijos, comprobando
* si se han realizado iteraciones con comunicaciones en segundo plano
* y enviando cuantas iteraciones se han realizado a los hijos.
*
* Además se realizan las comunicaciones síncronas se las hay.
* Finalmente termina enviando los datos temporales a los hijos.
*/
int end_redistribution() {
size_t i;
int is_intercomm, rootBcast, local_state;
MPI_Comm_test_inter(mall->intercomm, &is_intercomm);
if(is_intercomm) {
rootBcast = mall->myId == mall->root ? MPI_ROOT : MPI_PROC_NULL;
} else {
rootBcast = mall->root;
}
comm_data_info(rep_s_data, dist_s_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
if(dist_s_data->entries || rep_s_data->entries) { // Enviar datos sincronos
//mall_conf->results->sync_time[mall_conf->grp] = MPI_Wtime();
send_data(mall->numC, dist_s_data, MALLEABILITY_USE_SYNCHRONOUS);
//if(!is_intercomm) mall_conf->results->sync_end = MPI_Wtime(); // Merge method only
// TODO Crear funcion especifica y anyadir para Asinc
// TODO Tener en cuenta el tipo
for(i=0; i<rep_s_data->entries; i++) {
MPI_Datatype datatype;
if(rep_s_data->types[i] == MAL_INT) {
datatype = MPI_INT;
} else {
datatype = MPI_CHAR;
}
MPI_Bcast(rep_s_data->arrays[i], rep_s_data->qty[i], datatype, rootBcast, mall->intercomm);
}
}
local_state = MALL_DIST_COMPLETED;
if(!is_intercomm) { // Merge Spawn
if(mall->numP < mall->numC) { // Expand
malleability_comms_update(mall->intercomm);
} else { // Shrink || Merge Shrink requiere de mas tareas
local_state = MALL_SPAWN_ADAPT_PENDING;
}
}
if(mall->intercomm != MPI_COMM_NULL && mall->intercomm != MPI_COMM_WORLD) {
MPI_Comm_disconnect(&(mall->intercomm)); //FIXME Error en OpenMPI + Merge
}
return local_state;
}
///=============================================
///=============================================
///=============================================
//TODO Add comment
int shrink_redistribution() {
double time_extra = MPI_Wtime();
//TODO Create new state before collecting zombies. Processes can perform tasks before that. Then call again Malleability to commit the change
zombies_collect_suspended(mall->user_comm, mall->myId, mall->numP, mall->numC, mall->root);
if(mall->myId < mall->numC) {
if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->thread_comm)); //FIXME Modificar a que se pida pro el usuario el cambio y se llama a comms_update
if(mall->comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->comm));
mall->dup_user_comm = 1;
MPI_Comm_dup(mall->intercomm, &(mall->thread_comm));
MPI_Comm_dup(mall->intercomm, &(mall->comm));
MPI_Comm_set_name(mall->thread_comm, "MPI_COMM_MALL_THREAD");
MPI_Comm_set_name(mall->comm, "MPI_COMM_MALL");
MPI_Comm_free(&(mall->intercomm));
//mall_conf->results->spawn_time[mall_conf->grp] += MPI_Wtime() - time_extra;
if(malleability_spawn_contains_strat(mall_conf->spawn_strategies,MALL_SPAWN_PTHREAD, NULL)) {
// mall_conf->results->spawn_real_time[mall_conf->grp] += MPI_Wtime() - time_extra;
}
return MALL_DIST_COMPLETED;
} else {
return MALL_ZOMBIE;
}
}
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//=================COMM NODE INFO ======================||
//======================================================||
//======================================================||
//TODO Add comment
void comm_node_data(int rootBcast, int is_child_group) {
MPI_Datatype node_type;
def_nodeinfo_type(&node_type);
MPI_Bcast(mall, 1, node_type, rootBcast, mall->intercomm);
if(is_child_group) {
mall->nodelist = malloc((mall->nodelist_len+1) * sizeof(char));
mall->nodelist[mall->nodelist_len] = '\0';
}
MPI_Bcast(mall->nodelist, mall->nodelist_len, MPI_CHAR, rootBcast, mall->intercomm);
MPI_Type_free(&node_type);
}
//TODO Add comment
void def_nodeinfo_type(MPI_Datatype *node_type) {
int i, counts = 3;
int blocklengths[3] = {1, 1, 1};
MPI_Aint displs[counts], dir;
MPI_Datatype types[counts];
// Rellenar vector types
types[0] = types[1] = types[2] = MPI_INT;
// Rellenar vector displs
MPI_Get_address(mall, &dir);
MPI_Get_address(&(mall->num_cpus), &displs[0]);
MPI_Get_address(&(mall->num_nodes), &displs[1]);
MPI_Get_address(&(mall->nodelist_len), &displs[2]);
for(i=0;i<counts;i++) displs[i] -= dir;
MPI_Type_create_struct(counts, blocklengths, displs, types, node_type);
MPI_Type_commit(node_type);
}
// TODO MOVER A OTRO LADO??
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//===============COMM PARENTS THREADS===================||
//======================================================||
//======================================================||
int comm_state; //FIXME Usar un handler
/*
* Crea una hebra para ejecutar una comunicación en segundo plano.
*/
int thread_creation() {
comm_state = MALL_DIST_PENDING;
if(pthread_create(&(mall->async_thread), NULL, thread_async_work, NULL)) {
printf("Error al crear el hilo\n");
MPI_Abort(MPI_COMM_WORLD, -1);
return -1;
}
return comm_state;
}
/*
* Comprobación por parte de una hebra maestra que indica
* si una hebra esclava ha terminado su comunicación en segundo plano.
*
* El estado de la comunicación es devuelto al finalizar la función.
*/
int thread_check() {
int all_completed = 0, is_intercomm;
// Comprueba que todos los hilos han terminado la distribucion (Mismo valor en commAsync)
MPI_Allreduce(&comm_state, &all_completed, 1, MPI_INT, MPI_MAX, mall->comm);
if(all_completed != MALL_DIST_COMPLETED) return MALL_DIST_PENDING; // Continue only if asynchronous send has ended
//FIXME No se tiene en cuenta el estado MALL_APP_ENDED
if(pthread_join(mall->async_thread, NULL)) {
printf("Error al esperar al hilo\n");
MPI_Abort(MPI_COMM_WORLD, -1);
return -2;
}
MPI_Comm_test_inter(mall->intercomm, &is_intercomm);
//if(!is_intercomm) mall_conf->results->async_end = MPI_Wtime(); // Merge method only
return end_redistribution();
}
/*
* Función ejecutada por una hebra.
* Ejecuta una comunicación síncrona con los hijos que
* para el usuario se puede considerar como en segundo plano.
*
* Cuando termina la comunicación la hebra maestra puede comprobarlo
* por el valor "commAsync".
*/
void* thread_async_work() {
send_data(mall->numC, dist_a_data, MALLEABILITY_USE_SYNCHRONOUS);
comm_state = MALL_DIST_COMPLETED;
pthread_exit(NULL);
}
//==============================================================================
/*
* Muestra por pantalla el estado actual de todos los comunicadores
*/
void print_comms_state() {
int tester;
char *test = malloc(MPI_MAX_OBJECT_NAME * sizeof(char));
MPI_Comm_get_name(mall->comm, test, &tester);
printf("P%d Comm=%d Name=%s\n", mall->myId, mall->comm, test);
MPI_Comm_get_name(mall->user_comm, test, &tester);
printf("P%d Comm=%d Name=%s\n", mall->myId, mall->user_comm, test);
if(mall->intercomm != MPI_COMM_NULL) {
MPI_Comm_get_name(mall->intercomm, test, &tester);
printf("P%d Comm=%d Name=%s\n", mall->myId, mall->intercomm, test);
}
free(test);
}
void malleability_comms_update(MPI_Comm comm) {
if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->thread_comm));
if(mall->comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->comm));
if(mall->user_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->user_comm)); //TODO No es peligroso?
MPI_Comm_dup(comm, &(mall->thread_comm));
MPI_Comm_dup(comm, &(mall->comm));
MPI_Comm_dup(comm, &(mall->user_comm));
MPI_Comm_set_name(mall->thread_comm, "MPI_COMM_MALL_THREAD");
MPI_Comm_set_name(mall->comm, "MPI_COMM_MALL");
MPI_Comm_set_name(mall->user_comm, "MPI_COMM_MALL_USER");
}
#ifndef MALLEABILITY_MANAGER_H
#define MALLEABILITY_MANAGER_H
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <mpi.h>
#include "malleabilityStates.h"
int init_malleability(int myId, int numP, int root, MPI_Comm comm, char *name_exec, char *nodelist, int num_cpus, int num_nodes);
void free_malleability();
int malleability_checkpoint();
void set_benchmark_grp(int grp);
void set_malleability_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int red_method, int red_strategies);
void set_children_number(int numC); // TODO TO BE DEPRECATED
void get_malleability_user_comm(MPI_Comm *comm);
void malleability_add_data(void *data, size_t total_qty, int type, int is_replicated, int is_constant);
void malleability_modify_data(void *data, size_t index, size_t total_qty, int type, int is_replicated, int is_constant);
void malleability_get_entries(size_t *entries, int is_replicated, int is_constant);
void malleability_get_data(void **data, size_t index, int is_replicated, int is_constant);
#endif
#ifndef MALLEABILITY_STATES_H
#define MALLEABILITY_STATES_H
#include <stdio.h>
#include <stdlib.h>
//States
#define MALL_DENIED -1
enum mall_states{MALL_UNRESERVED, MALL_NOT_STARTED, MALL_ZOMBIE, MALL_SPAWN_PENDING, MALL_SPAWN_SINGLE_PENDING,
MALL_SPAWN_SINGLE_COMPLETED, MALL_SPAWN_ADAPT_POSTPONE, MALL_SPAWN_COMPLETED, MALL_DIST_PENDING, MALL_DIST_COMPLETED,
MALL_SPAWN_ADAPT_PENDING, MALL_SPAWN_ADAPTED, MALL_COMPLETED};
enum mall_spawn_methods{MALL_SPAWN_BASELINE, MALL_SPAWN_MERGE};
#define MALL_SPAWN_PTHREAD 2
#define MALL_SPAWN_SINGLE 3
enum mall_redistribution_methods{MALL_RED_BASELINE, MALL_RED_POINT, MALL_RED_RMA_LOCK, MALL_RED_RMA_LOCKALL, MALL_RED_IBARRIER};
#define MALL_RED_THREAD 2
//#define MALL_RED_IBARRIER 3 Agregar como estrategia y eliminar como método
#define MALLEABILITY_ROOT 0
#define MAL_APP_EXECUTING 0
#define MAL_APP_ENDED 1
//TODO DEPRECATE
#define MAL_INT 0
#define MAL_CHAR 1
#define MAL_DOUBLE 2
////////////////
#define MALLEABILITY_CHILDREN 1
#define MALLEABILITY_NOT_CHILDREN 0
#endif
#include "malleabilityTypes.h"
void init_malleability_data_struct(malleability_data_t *data_struct, size_t size);
void realloc_malleability_data_struct(malleability_data_t *data_struct, size_t qty_to_add);
void def_malleability_entries(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, MPI_Datatype *new_type);
void def_malleability_qty_type(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, MPI_Datatype *new_type);
//======================================================||
//======================================================||
//===================PUBLIC FUNCTIONS===================||
//======================================================||
//======================================================||
/*
* Anyade en la estructura de datos a comunicar con los hijos
* un nuevo set de datos de un total "total_qty" distribuido entre
* todos los padres. La nueva serie "data" solo representa los datos
* que tiene este padre.
*/
void add_data(void *data, size_t total_qty, int type, size_t request_qty, malleability_data_t *data_struct) {
size_t i;
if(data_struct->entries == 0) {
init_malleability_data_struct(data_struct, MALLEABILITY_INIT_DATA_QTY);
} else if(data_struct->entries == data_struct->max_entries) {
realloc_malleability_data_struct(data_struct, MALLEABILITY_INIT_DATA_QTY);
}
data_struct->qty[data_struct->entries] = total_qty;
data_struct->types[data_struct->entries] = type;
data_struct->arrays[data_struct->entries] = data;
data_struct->request_qty[data_struct->entries] = request_qty;
if(request_qty) {
data_struct->requests[data_struct->entries] = (MPI_Request *) malloc(request_qty * sizeof(MPI_Request));
for(i=0; i < request_qty; i++) {
data_struct->requests[data_struct->entries][i] = MPI_REQUEST_NULL;
}
}
data_struct->entries+=1;
}
/*
* Modifica en la estructura de datos a comunicar con los hijos
* un set de datos de un total "total_qty" distribuido entre
* todos los padres. La nueva serie "data" solo representa los datos
* que tiene este padre.
*/
void modify_data(void *data, size_t index, size_t total_qty, int type, size_t request_qty, malleability_data_t *data_struct) {
size_t i;
if(data_struct->entries < index) { // Index does not exist
return;
}
if(data_struct->requests[index] != NULL) {
//free(data_struct->requests[index]); TODO Error when trying to free
data_struct->requests[index] = NULL;
}
data_struct->qty[index] = total_qty;
data_struct->types[index] = type;
data_struct->arrays[index] = data;
data_struct->request_qty[index] = request_qty;
if(request_qty) {
data_struct->requests[index] = (MPI_Request *) malloc(request_qty * sizeof(MPI_Request));
for(i=0; i < request_qty; i++) {
data_struct->requests[index][i] = MPI_REQUEST_NULL;
}
}
}
/*
* Comunicar desde los padres a los hijos las estructuras de datos sincronas o asincronas
* No es necesario que las estructuras esten inicializadas para un buen funcionamiento.
*
* En el argumento "root" todos tienen que indicar quien es el proceso raiz de los padres
* unicamente.
*/
void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, int is_children_group, int myId, int root, MPI_Comm intercomm) {
int is_intercomm, rootBcast = MPI_PROC_NULL;
size_t i, j;
MPI_Datatype entries_type, struct_type;
MPI_Comm_test_inter(intercomm, &is_intercomm);
if(is_intercomm && !is_children_group) {
rootBcast = myId == root ? MPI_ROOT : MPI_PROC_NULL;
} else {
rootBcast = root;
}
// Mandar primero numero de entradas
def_malleability_entries(data_struct_dist, data_struct_rep, &entries_type);
MPI_Bcast(MPI_BOTTOM, 1, entries_type, rootBcast, intercomm);
if(is_children_group && ( data_struct_rep->entries != 0 || data_struct_dist->entries != 0 )) {
init_malleability_data_struct(data_struct_rep, data_struct_rep->entries);
init_malleability_data_struct(data_struct_dist, data_struct_dist->entries);
}
def_malleability_qty_type(data_struct_dist, data_struct_rep, &struct_type);
MPI_Bcast(MPI_BOTTOM, 1, struct_type, rootBcast, intercomm);
if(is_children_group) {
for(i=0; i < data_struct_rep->entries; i++) {
data_struct_rep->arrays[i] = (void *) malloc(data_struct_rep->qty[i] * sizeof(int)); //TODO Tener en cuenta que no siempre es int
data_struct_rep->requests[i] = (MPI_Request *) malloc(data_struct_rep->request_qty[i] * sizeof(MPI_Request));
for(j=0; j < data_struct_rep->request_qty[i]; j++) {
data_struct_rep->requests[i][j] = MPI_REQUEST_NULL;
}
}
for(i=0; i < data_struct_dist->entries; i++) {
data_struct_dist->arrays[i] = (void *) NULL;
data_struct_dist->requests[i] = (MPI_Request *) malloc(data_struct_dist->request_qty[i] * sizeof(MPI_Request));
for(j=0; j < data_struct_dist->request_qty[i]; j++) {
data_struct_dist->requests[i][j] = MPI_REQUEST_NULL;
}
}
}
MPI_Type_free(&entries_type);
MPI_Type_free(&struct_type);
}
//======================================================||
//======================================================||
//=========INIT/REALLOC/FREE RESULTS FUNCTIONS==========||
//======================================================||
//======================================================||
/*
* Inicializa la estructura que describe una serie de datos con las mismas
* caracteristicas de localización y uso. Se inicializa para utilizar hasta
* "size" elementos.
*/
void init_malleability_data_struct(malleability_data_t *data_struct, size_t size) {
size_t i;
data_struct->max_entries = size;
data_struct->qty = (size_t *) malloc(size * sizeof(size_t));
data_struct->types = (int *) malloc(size * sizeof(int));
data_struct->request_qty = (size_t *) malloc(size * sizeof(size_t));
data_struct->requests = (MPI_Request **) malloc(size * sizeof(MPI_Request *));
data_struct->arrays = (void **) malloc(size * sizeof(void *));
for(i=0; i<size; i++) { //calloc and memset does not ensure a NULL value
data_struct->requests[i] = NULL;
data_struct->arrays[i] = NULL;
}
}
/*
* Realoja la estructura que describe una serie de datos con las mismas
* caracteristicas de localización y uso. Se anyaden "size" entradas nuevas
* a las ya existentes.
*/
void realloc_malleability_data_struct(malleability_data_t *data_struct, size_t qty_to_add) {
size_t i, needed, *qty_aux, *request_qty_aux;
int *types_aux;
MPI_Request **requests_aux;
void **arrays_aux;
needed = data_struct->max_entries + qty_to_add;
qty_aux = (size_t *) realloc(data_struct->qty, needed * sizeof(int));
types_aux = (int *) realloc(data_struct->types, needed * sizeof(int));
request_qty_aux = (size_t *) realloc(data_struct->request_qty, needed * sizeof(int));
requests_aux = (MPI_Request **) realloc(data_struct->requests, needed * sizeof(MPI_Request *));
arrays_aux = (void **) realloc(data_struct->arrays, needed * sizeof(void *));
if(qty_aux == NULL || arrays_aux == NULL || requests_aux == NULL || types_aux == NULL || request_qty_aux == NULL) {
fprintf(stderr, "Fatal error - No se ha podido realojar la memoria constante de datos a redistribuir/comunicar\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
for(i=data_struct->max_entries; i<needed; i++) { //calloc and memset does not ensure a NULL value
requests_aux[i] = NULL;
arrays_aux[i] = NULL;
}
data_struct->qty = qty_aux;
data_struct->types = types_aux;
data_struct->request_qty = request_qty_aux;
data_struct->requests = requests_aux;
data_struct->arrays = arrays_aux;
data_struct->max_entries = needed;
}
void free_malleability_data_struct(malleability_data_t *data_struct) {
size_t i, j, max;
max = data_struct->entries;
if(max != 0) {
for(i=0; i<max; i++) {
//free(data_struct->arrays[i]); //FIXME Valores alojados con 1 elemento no se liberan?
}
if(data_struct->qty != NULL) {
free(data_struct->qty);
}
if(data_struct->types != NULL) {
free(data_struct->types);
}
if(data_struct->requests != NULL && data_struct->request_qty != NULL) {
for(i=0; i<max; i++) {
if(data_struct->requests[i] != NULL) {
for(j=0; j<data_struct->request_qty[i]; j++) {
if(data_struct->requests[i][j] != MPI_REQUEST_NULL) {
MPI_Request_free(&(data_struct->requests[i][j]));
data_struct->requests[i][j] = MPI_REQUEST_NULL;
}
}
free(data_struct->requests[i]);
}
}
free(data_struct->request_qty);
free(data_struct->requests);
}
if(data_struct->arrays != NULL) {
free(data_struct->arrays);
}
}
}
//======================================================||
//======================================================||
//================MPI DERIVED DATATYPES=================||
//======================================================||
//======================================================||
/*
* Crea un tipo derivado para mandar el numero de entradas
* en dos estructuras de descripcion de datos.
*/
void def_malleability_entries(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, MPI_Datatype *new_type) {
int counts = 2;
int blocklengths[counts];
MPI_Aint displs[counts];
MPI_Datatype types[counts];
blocklengths[0] = blocklengths[1] = 1;
types[0] = types[1] = MPI_UNSIGNED_LONG;
// Obtener direccion base
MPI_Get_address(&(data_struct_rep->entries), &displs[0]);
MPI_Get_address(&(data_struct_dist->entries), &displs[1]);
MPI_Type_create_struct(counts, blocklengths, displs, types, new_type);
MPI_Type_commit(new_type);
}
/*
* Crea un tipo derivado para mandar las cantidades y tipo
* de datos de dos estructuras de descripcion de datos.
* El vector de "requests" no es enviado ya que solo es necesario
* en los padres.
* TODO Refactor?
*/
void def_malleability_qty_type(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, MPI_Datatype *new_type) {
int counts = 6;
int blocklengths[counts];
MPI_Aint displs[counts];
MPI_Datatype types[counts];
types[0] = types[1] = types[3] = types[4] = MPI_UNSIGNED_LONG;
types[2] = types[5] = MPI_INT;
blocklengths[0] = blocklengths[1] = blocklengths[2] = data_struct_rep->entries;
blocklengths[3] = blocklengths[4] = blocklengths[5] = data_struct_dist->entries;
MPI_Get_address((data_struct_rep->qty), &displs[0]);
MPI_Get_address((data_struct_rep->request_qty), &displs[1]);
MPI_Get_address((data_struct_rep->types), &displs[2]);
MPI_Get_address((data_struct_dist->qty), &displs[3]);
MPI_Get_address((data_struct_dist->request_qty), &displs[4]);
MPI_Get_address((data_struct_dist->types), &displs[5]);
MPI_Type_create_struct(counts, blocklengths, displs, types, new_type);
MPI_Type_commit(new_type);
}
#ifndef MALLEABILITY_TYPES_H
#define MALLEABILITY_TYPES_H
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <fcntl.h>
#include <sys/stat.h>
#include "malleabilityStates.h"
#define MALLEABILITY_INIT_DATA_QTY 100
typedef struct {
size_t entries; // Indica numero de vectores a comunicar (replicated data)
size_t max_entries;
size_t *qty; // Indica numero de elementos en cada subvector de sync_array
int *types;
// Vector de vectores de request. En cada elemento superior se indican los requests a comprobar para dar por finalizada
// la comunicacion de ese dato
size_t *request_qty;
MPI_Request **requests;
void **arrays; // Cada subvector es una serie de datos a comunicar
} malleability_data_t;
void add_data(void *data, size_t total_qty, int type, size_t request_qty, malleability_data_t *data_struct);
void modify_data(void *data, size_t index, size_t total_qty, int type, size_t request_qty, malleability_data_t *data_struct);
void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, int is_children_group, int myId, int root, MPI_Comm intercomm);
void free_malleability_data_struct(malleability_data_t *data_struct);
#endif
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment