process_stage.c 12.6 KB
Newer Older
1
2
3
4
5
6
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <mpi.h>
#include "computing_func.h"
#include "comunication_func.h"
7
#include "linear_reg.h"
8
9
#include "Main_datatypes.h"
#include "process_stage.h"
10
11
//#include "../malleability/malleabilityManager.h" //FIXME Refactor
#include "../malleability/distribution_methods/block_distribution.h"
12

13
void linear_regression_stage(iter_stage_t *stage, group_data group, MPI_Comm comm);
14

15
16
17
18
19
20
21

double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_allgatherv_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_reduce_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
22

23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/*
 * Calcula el tiempo por operacion o total de bytes a enviar
 * de cada fase de iteración para despues realizar correctamente
 * las iteraciones.
 *
 * Solo es calculado por el proceso ROOT que tras ello lo envia al
 * resto de procesos.
 *
 * Si la bandera "compute" esta activada, se realizaran las operaciones
 * para recalcular los tiempos desde 0. Si esta en falso solo se reservara
 * la memoria necesaria y utilizara los valores obtenidos en anteriores 
 * llamadas. Todos los procesos tienen que indicar el mismo valor en
 * la bandera.
 *
 * TODO Que el trabajo se divida entre los procesos.
38
 * TODO No tiene en cuenta cambios entre maquinas heterogeneas.
39
 */
40
41
42
double init_stage(configuration *config_file, int stage_i, group_data group, MPI_Comm comm, int compute) {
  double result = 0;
  int qty = 20000;
43

44
  iter_stage_t *stage = &(config_file->stages[stage_i]);
45
  stage->operations = qty;
46

47
  switch(stage->pt) {
48
49
    //Computo
    case COMP_MATRIX:
50
      result = init_matrix_pt(group, config_file, stage, comm, compute);
51
    case COMP_PI:
52
      result = init_pi_pt(group, config_file, stage, comm, compute);
53
54
      break;

55
56
    //Comunicación
    case COMP_POINT:
57
      init_comm_ptop_pt(group, config_file, stage, comm);
58
59
      break;
    case COMP_BCAST:
60
      result = init_comm_bcast_pt(group, config_file, stage, comm);
61
62
      break;
    case COMP_ALLGATHER:
63
      result = init_comm_allgatherv_pt(group, config_file, stage, comm);
64
65
66
      break;
    case COMP_REDUCE:
    case COMP_ALLREDUCE:
67
      result = init_comm_reduce_pt(group, config_file, stage, comm);
68
69
      break;
  }
70
  return result;
71
72
73
74
75
76
77
}

/*
 * Procesa una fase de la iteracion, concretando el tipo
 * de operacion a realizar y llamando a la funcion que
 * realizara la operacion.
 */
78
double process_stage(configuration config_file, iter_stage_t stage, group_data group, MPI_Comm comm) {
79
80
81
  int i;
  double result;

82
  switch(stage.pt) {
83
84
    //Computo
    case COMP_PI:
85
      for(i=0; i < stage.operations; i++) {
86
        result += computePiSerial(config_file.granularity);
87
88
89
      }
      break;
    case COMP_MATRIX:
90
      for(i=0; i < stage.operations; i++) {
91
        result += computeMatrix(stage.double_array, config_file.granularity); //FIXME No da tiempos repetibles
92
93
94
95
      } 
      break;
    //Comunicaciones
    case COMP_POINT:
96
      point_to_point(group.myId, group.numP, ROOT, comm, stage.array, stage.real_bytes);
97
98
      break;
    case COMP_BCAST:
99
      MPI_Bcast(stage.array, stage.real_bytes, MPI_CHAR, ROOT, comm);
100
      break;
101
    case COMP_ALLGATHER:
102
      MPI_Allgatherv(stage.array, stage.my_bytes, MPI_CHAR, stage.full_array, stage.counts.counts, stage.counts.displs, MPI_CHAR, comm);
103
104
      break;
    case COMP_REDUCE:
105
      MPI_Reduce(stage.array, stage.full_array, stage.real_bytes, MPI_CHAR, MPI_MAX, ROOT, comm);
106
      break;
107
    case COMP_ALLREDUCE:
108
      MPI_Allreduce(stage.array, stage.full_array, stage.real_bytes, MPI_CHAR, MPI_MAX, comm);
109
      break;
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
  }
  return result;
}




// Se realizan varios tests de latencia al 
// mandar un único dato de tipo CHAR a los procesos impares
// desde el par inmediatamente anterior. Tras esto, los impares
// vuelven a enviar el dato al proceso par.
//
// Devuelve la latencia del sistema.
double latency(int myId, int numP, MPI_Comm comm) {
  int i, loop_count = 100;
125
  double start_time, stop_time, time;
126
127
128
  char aux;

  aux = '0';
129

130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
  MPI_Barrier(comm);
  start_time = MPI_Wtime();
  if(myId == 0) {
    for(i=0; i<loop_count; i++){
      MPI_Send(&aux, 0, MPI_CHAR, numP-1, 99, comm);
    }
    MPI_Recv(&aux, 0, MPI_CHAR, numP-1, 99, comm, MPI_STATUS_IGNORE);
  } else if(myId+1 == numP) {
    for(i=0; i<loop_count; i++){
      MPI_Recv(&aux, 0, MPI_CHAR, 0, 99, comm, MPI_STATUS_IGNORE);
    }
    MPI_Send(&aux, 0, MPI_CHAR, 0, 99, comm);
  }
  MPI_Barrier(comm);
  stop_time = MPI_Wtime();
  time = (stop_time - start_time) / loop_count;
146

147
148
  MPI_Bcast(&time, 1, MPI_DOUBLE, ROOT, comm);
  return time;
149
150
151
152
153
154
155
156
157
158
159
}


// Se realizan varios tests de ancho de banda
// al mandar N datos a los procesos impares desde el
// par inmediatamente anterior. Tras esto, los impares
// vuelven a enviar los N datos al proceso par.
//
// Devuelve el tiempo necesario para realizar las pruebas
double bandwidth(int myId, int numP, MPI_Comm comm, double latency, int n) {
  int i, loop_count = 100, n_bytes;
160
  double start_time, stop_time, bw, time;
161
162
163
164
  char *aux;

  n_bytes = n * sizeof(char);
  aux = malloc(n_bytes);
165
  time = 0;
166

167

168
169
170
171
172
  MPI_Barrier(comm);
  start_time = MPI_Wtime();
  if(myId == 0) {
    for(i=0; i<loop_count; i++){
      MPI_Send(aux, n, MPI_CHAR, numP-1, 99, comm);
173
    }
174
175
176
177
178
179
    MPI_Recv(aux, 0, MPI_CHAR, numP-1, 99, comm, MPI_STATUS_IGNORE);
  } else if(myId+1 == numP) {
    for(i=0; i<loop_count; i++){
      MPI_Recv(aux, n, MPI_CHAR, 0, 99, comm, MPI_STATUS_IGNORE);
    }
    MPI_Send(aux, 0, MPI_CHAR, 0, 99, comm);
180
  }
181
182
183
184
  MPI_Barrier(comm);
  stop_time = MPI_Wtime();
  time = (stop_time - start_time) / loop_count;
  bw = ((double)n_bytes) / (time - latency);
185

186
  MPI_Bcast(&bw, 1, MPI_DOUBLE, ROOT, comm);
187
  free(aux);
188
189
  return bw;
}
190

191
/*
192
193
194
 * Creates a linear regression model to predict
 * the number of bytes needed to perform a collective
 * communication.
195
 */
196
197
void linear_regression_stage(iter_stage_t *stage, group_data group, MPI_Comm comm) {
  int i, j, tam, loop_iters = 100;
198

199
200
201
202
203
204
205
206
  tam = LR_ARRAY_TAM * loop_iters;
  double *bytes = malloc(tam * sizeof(double));
  double *times = malloc(tam * sizeof(double));
  
  for(i=0; i<LR_ARRAY_TAM; i++) {
    for(j=0; j<loop_iters; j++) {
      bytes[i*loop_iters + j] = LR_bytes_array[i];
    }
207
208
209
210
211
  }

  switch(stage->pt) {
    //Comunicaciones
    case COMP_BCAST:
212
      lr_times_bcast(group.myId, group.numP, ROOT, comm, loop_iters, times);
213
214
      break;
    case COMP_ALLGATHER:
215
      lr_times_allgatherv(group.myId, group.numP, ROOT, comm, loop_iters, times);
216
217
      break;
    case COMP_REDUCE:
218
      lr_times_reduce(group.myId, group.numP, ROOT, comm, loop_iters, times);
219
220
      break;
    case COMP_ALLREDUCE:
221
      lr_times_allreduce(group.myId, group.numP, ROOT, comm, loop_iters, times);
222
223
      break;
    default:
224
      return;
225
226
      break;
  }
227

228
229
  if(group.myId == ROOT) {
    MPI_Reduce(MPI_IN_PLACE, times, LR_ARRAY_TAM * loop_iters, MPI_DOUBLE, MPI_MAX, ROOT, comm);
230
231
232
233
234
235
236
237
238
239
240
241
    /*
    printf("PT=%d ", stage->pt);
    for(i=0; i<tam; i++) {
      printf("%lf, ", times[i]);
    }
    printf("\n");
    printf("BYTES ");
    for(i=0; i<tam; i++) {
      printf("%lf, ", bytes[i]);
    }
    printf("\n");
    */
242
243
244
245
246
247
248
249
    lr_compute(tam, bytes, times, &(stage->slope), &(stage->intercept));
  } else {
    MPI_Reduce(times, NULL, LR_ARRAY_TAM * loop_iters, MPI_DOUBLE, MPI_MAX, ROOT, comm);
  }

  MPI_Bcast(&(stage->slope), 1, MPI_DOUBLE, ROOT, comm);
  MPI_Bcast(&(stage->intercept), 1, MPI_DOUBLE, ROOT, comm);

250
  free(times);
251
  free(bytes);
252
}
253

254
255
256
257
258
259
260
261
262
263
264
265
266
267

/*
 * ========================================================================================
 * ========================================================================================
 * =================================INIT STAGE FUNCTIONS===================================
 * ========================================================================================
 * ========================================================================================
*/

double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
  double result, t_stage;

  result = 0;
  t_stage = stage->t_stage * config_file->factors[group.grp];
268
  initMatrix(&(stage->double_array), config_file->granularity);
269
270
271
272

  double start_time = MPI_Wtime();
  if(group.myId == ROOT && compute) {
    result+= process_stage(*config_file, *stage, group, comm);
273
  }
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291

  if(compute) {
    stage->t_op = (MPI_Wtime() - start_time) / stage->operations; //Tiempo de una operacion
    MPI_Bcast(&(stage->t_op), 1, MPI_DOUBLE, ROOT, comm);
  }
  stage->operations = t_stage / stage->t_op;

  return result;
}

double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
  double result, t_stage, start_time;

  result = 0;
  t_stage = stage->t_stage * config_file->factors[group.grp];	 
  start_time = MPI_Wtime();
  if(group.myId == ROOT && compute) {
    result+= process_stage(*config_file, *stage, group, comm);
292
  }
293
294
295
296

  if(compute) {
    stage->t_op = (MPI_Wtime() - start_time) / stage->operations; //Tiempo de una operacion
    MPI_Bcast(&(stage->t_op), 1, MPI_DOUBLE, ROOT, comm);
297
  }
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
  stage->operations = t_stage / stage->t_op;

  return result;
}

void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  struct Dist_data dist_data;

  if(stage->array != NULL)
    free(stage->array);
  if(stage->bytes == 0) {
    stage->bytes = (stage->t_stage - config_file->latency_m) * config_file->bw_m;
  }
  get_block_dist(stage->bytes, group.myId, group.numP, &dist_data);
  stage->real_bytes = dist_data.tamBl;
  stage->array = malloc(sizeof(char) * stage->real_bytes);
}

double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  double start_time, time = 0;
  stage->real_bytes = stage->bytes;
  if(stage->bytes == 0) {
    start_time = MPI_Wtime();
    linear_regression_stage(stage, group, comm);
    lr_calc_Y(stage->slope, stage->intercept, stage->t_stage, &(stage->real_bytes));

    time = MPI_Wtime() - start_time;
    if(group.myId == ROOT) {
      MPI_Reduce(MPI_IN_PLACE, &time, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    } else {
      MPI_Reduce(&time, NULL, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    }
  }

  if(stage->array != NULL)
    free(stage->array);
  stage->array = malloc(sizeof(char) * stage->real_bytes);

  return time;
}


double init_comm_allgatherv_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  double start_time, time = 0;
  struct Dist_data dist_data;

  stage->real_bytes = stage->bytes;
  if(stage->bytes == 0) {
    start_time = MPI_Wtime();
    linear_regression_stage(stage, group, comm);
    lr_calc_Y(stage->slope, stage->intercept, stage->t_stage, &(stage->real_bytes));

    time = MPI_Wtime() - start_time;
    if(group.myId == ROOT) {
      MPI_Reduce(MPI_IN_PLACE, &time, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    } else {
      MPI_Reduce(&time, NULL, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    }
  }

  if(stage->counts.counts != NULL)
    freeCounts(&(stage->counts));
  prepare_comm_allgatherv(group.numP, stage->real_bytes, &(stage->counts));
      
  get_block_dist(stage->real_bytes, group.myId, group.numP, &dist_data);
  stage->my_bytes = dist_data.tamBl;
  if(stage->array != NULL)
    free(stage->array);
  stage->array = malloc(sizeof(char) * stage->my_bytes);
  if(stage->full_array != NULL)
    free(stage->full_array);
  stage->full_array = malloc(sizeof(char) * stage->real_bytes);

  return time;
}

double init_comm_reduce_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  double start_time, time = 0;

  stage->real_bytes = stage->bytes;
  if(stage->bytes == 0) {
    start_time = MPI_Wtime();
    linear_regression_stage(stage, group, comm);
    lr_calc_Y(stage->slope, stage->intercept, stage->t_stage, &(stage->real_bytes));

    time = MPI_Wtime() - start_time;
    if(group.myId == ROOT) {
      MPI_Reduce(MPI_IN_PLACE, &time, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    } else {
      MPI_Reduce(&time, NULL, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    }
  }

  if(stage->array != NULL)
    free(stage->array);
  stage->array = malloc(sizeof(char) * stage->real_bytes);
  //Full array para el reduce necesita el mismo tamanyo
  if(stage->full_array != NULL)
    free(stage->full_array);
  stage->full_array = malloc(sizeof(char) * stage->real_bytes);
398

399
  return time;
400
}