process_stage.c 12.8 KB
Newer Older
1
2
3
4
5
6
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <mpi.h>
#include "computing_func.h"
#include "comunication_func.h"
7
#include "linear_reg.h"
8
9
#include "Main_datatypes.h"
#include "process_stage.h"
10
#include "../malleability/distribution_methods/block_distribution.h"
11

12
void linear_regression_stage(iter_stage_t *stage, group_data group, MPI_Comm comm);
13

14
15
16
17
18
19
20

double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute);
void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_allgatherv_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
double init_comm_reduce_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm);
21

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
/*
 * Calcula el tiempo por operacion o total de bytes a enviar
 * de cada fase de iteración para despues realizar correctamente
 * las iteraciones.
 *
 * Solo es calculado por el proceso ROOT que tras ello lo envia al
 * resto de procesos.
 *
 * Si la bandera "compute" esta activada, se realizaran las operaciones
 * para recalcular los tiempos desde 0. Si esta en falso solo se reservara
 * la memoria necesaria y utilizara los valores obtenidos en anteriores 
 * llamadas. Todos los procesos tienen que indicar el mismo valor en
 * la bandera.
 *
 * TODO Que el trabajo se divida entre los procesos.
37
 * TODO No tiene en cuenta cambios entre maquinas heterogeneas.
38
 */
39
40
41
double init_stage(configuration *config_file, int stage_i, group_data group, MPI_Comm comm, int compute) {
  double result = 0;
  int qty = 20000;
42

43
  iter_stage_t *stage = &(config_file->stages[stage_i]);
44
  stage->operations = qty;
45

46
  switch(stage->pt) {
47
48
    //Computo
    case COMP_MATRIX:
49
      result = init_matrix_pt(group, config_file, stage, comm, compute);
50
    case COMP_PI:
51
      result = init_pi_pt(group, config_file, stage, comm, compute);
52
53
      break;

54
55
    //Comunicación
    case COMP_POINT:
56
      init_comm_ptop_pt(group, config_file, stage, comm);
57
58
      break;
    case COMP_BCAST:
59
      result = init_comm_bcast_pt(group, config_file, stage, comm);
60
61
      break;
    case COMP_ALLGATHER:
62
      result = init_comm_allgatherv_pt(group, config_file, stage, comm);
63
64
65
      break;
    case COMP_REDUCE:
    case COMP_ALLREDUCE:
66
      result = init_comm_reduce_pt(group, config_file, stage, comm);
67
68
      break;
  }
69
  return result;
70
71
72
73
74
75
76
}

/*
 * Procesa una fase de la iteracion, concretando el tipo
 * de operacion a realizar y llamando a la funcion que
 * realizara la operacion.
 */
77
double process_stage(configuration config_file, iter_stage_t stage, group_data group, MPI_Comm comm) {
78
79
80
  int i;
  double result;

81
  switch(stage.pt) {
82
83
    //Computo
    case COMP_PI:
84
      for(i=0; i < stage.operations; i++) {
85
        result += computePiSerial(config_file.granularity);
86
87
88
      }
      break;
    case COMP_MATRIX:
89
      for(i=0; i < stage.operations; i++) {
90
        result += computeMatrix(stage.double_array, config_file.granularity); //FIXME No da tiempos repetibles
91
92
93
94
      } 
      break;
    //Comunicaciones
    case COMP_POINT:
95
      point_to_point(group.myId, group.numP, ROOT, comm, stage.array, stage.real_bytes);
96
97
      break;
    case COMP_BCAST:
98
      MPI_Bcast(stage.array, stage.real_bytes, MPI_CHAR, ROOT, comm);
99
      break;
100
    case COMP_ALLGATHER:
101
      MPI_Allgatherv(stage.array, stage.my_bytes, MPI_CHAR, stage.full_array, stage.counts.counts, stage.counts.displs, MPI_CHAR, comm);
102
103
      break;
    case COMP_REDUCE:
104
      MPI_Reduce(stage.array, stage.full_array, stage.real_bytes, MPI_CHAR, MPI_MAX, ROOT, comm);
105
      break;
106
    case COMP_ALLREDUCE:
107
      MPI_Allreduce(stage.array, stage.full_array, stage.real_bytes, MPI_CHAR, MPI_MAX, comm);
108
      break;
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
  }
  return result;
}




// Se realizan varios tests de latencia al 
// mandar un único dato de tipo CHAR a los procesos impares
// desde el par inmediatamente anterior. Tras esto, los impares
// vuelven a enviar el dato al proceso par.
//
// Devuelve la latencia del sistema.
double latency(int myId, int numP, MPI_Comm comm) {
  int i, loop_count = 100;
124
  double start_time, stop_time, time;
125
126
127
  char aux;

  aux = '0';
128

129
130
  MPI_Barrier(comm);
  start_time = MPI_Wtime();
131
  if(myId == ROOT) {
132
133
134
135
136
137
    for(i=0; i<loop_count; i++){
      MPI_Send(&aux, 0, MPI_CHAR, numP-1, 99, comm);
    }
    MPI_Recv(&aux, 0, MPI_CHAR, numP-1, 99, comm, MPI_STATUS_IGNORE);
  } else if(myId+1 == numP) {
    for(i=0; i<loop_count; i++){
138
      MPI_Recv(&aux, 0, MPI_CHAR, ROOT, 99, comm, MPI_STATUS_IGNORE);
139
    }
140
    MPI_Send(&aux, 0, MPI_CHAR, ROOT, 99, comm);
141
142
143
144
  }
  MPI_Barrier(comm);
  stop_time = MPI_Wtime();
  time = (stop_time - start_time) / loop_count;
145

146
147
  MPI_Bcast(&time, 1, MPI_DOUBLE, ROOT, comm);
  return time;
148
149
150
151
152
153
154
155
156
157
158
}


// Se realizan varios tests de ancho de banda
// al mandar N datos a los procesos impares desde el
// par inmediatamente anterior. Tras esto, los impares
// vuelven a enviar los N datos al proceso par.
//
// Devuelve el tiempo necesario para realizar las pruebas
double bandwidth(int myId, int numP, MPI_Comm comm, double latency, int n) {
  int i, loop_count = 100, n_bytes;
159
  double start_time, stop_time, bw, time;
160
161
162
163
  char *aux;

  n_bytes = n * sizeof(char);
  aux = malloc(n_bytes);
164
  time = 0;
165

166

167
168
  MPI_Barrier(comm);
  start_time = MPI_Wtime();
169
  if(myId == ROOT) {
170
171
    for(i=0; i<loop_count; i++){
      MPI_Send(aux, n, MPI_CHAR, numP-1, 99, comm);
172
    }
173
174
175
    MPI_Recv(aux, 0, MPI_CHAR, numP-1, 99, comm, MPI_STATUS_IGNORE);
  } else if(myId+1 == numP) {
    for(i=0; i<loop_count; i++){
176
      MPI_Recv(aux, n, MPI_CHAR, ROOT, 99, comm, MPI_STATUS_IGNORE);
177
    }
178
    MPI_Send(aux, 0, MPI_CHAR, ROOT, 99, comm);
179
  }
180
181
182
183
  MPI_Barrier(comm);
  stop_time = MPI_Wtime();
  time = (stop_time - start_time) / loop_count;
  bw = ((double)n_bytes) / (time - latency);
184

185
  MPI_Bcast(&bw, 1, MPI_DOUBLE, ROOT, comm);
186
  free(aux);
187
188
  return bw;
}
189

190
/*
191
192
193
 * Creates a linear regression model to predict
 * the number of bytes needed to perform a collective
 * communication.
194
 */
195
196
void linear_regression_stage(iter_stage_t *stage, group_data group, MPI_Comm comm) {
  int i, j, tam, loop_iters = 100;
197

198
199
200
201
202
203
204
205
  tam = LR_ARRAY_TAM * loop_iters;
  double *bytes = malloc(tam * sizeof(double));
  double *times = malloc(tam * sizeof(double));
  
  for(i=0; i<LR_ARRAY_TAM; i++) {
    for(j=0; j<loop_iters; j++) {
      bytes[i*loop_iters + j] = LR_bytes_array[i];
    }
206
207
  }

208
209
210
  // TODO Calcular solo si no se ha calculado para otra fase.
  // Si se ha calculado antes, copiar esos valores

211
212
213
  switch(stage->pt) {
    //Comunicaciones
    case COMP_BCAST:
214
      lr_times_bcast(group.myId, group.numP, ROOT, comm, loop_iters, times);
215
216
      break;
    case COMP_ALLGATHER:
217
      lr_times_allgatherv(group.myId, group.numP, ROOT, comm, loop_iters, times);
218
219
      break;
    case COMP_REDUCE:
220
      lr_times_reduce(group.myId, group.numP, ROOT, comm, loop_iters, times);
221
222
      break;
    case COMP_ALLREDUCE:
223
      lr_times_allreduce(group.myId, group.numP, ROOT, comm, loop_iters, times);
224
225
      break;
    default:
226
      return;
227
228
      break;
  }
229

230
231
  if(group.myId == ROOT) {
    MPI_Reduce(MPI_IN_PLACE, times, LR_ARRAY_TAM * loop_iters, MPI_DOUBLE, MPI_MAX, ROOT, comm);
232
233
234
235
236
237
238
239
240
241
242
243
    /*
    printf("PT=%d ", stage->pt);
    for(i=0; i<tam; i++) {
      printf("%lf, ", times[i]);
    }
    printf("\n");
    printf("BYTES ");
    for(i=0; i<tam; i++) {
      printf("%lf, ", bytes[i]);
    }
    printf("\n");
    */
244
245
246
    //if(stage->t_stage < 0.1) {
    //lr_compute(8*loop_iters, bytes, times, &(stage->slope), &(stage->intercept));
    //} else {
247
    lr_compute(tam, bytes, times, &(stage->slope), &(stage->intercept));
248
    //}
249
250
251
252
253
254
255
  } else {
    MPI_Reduce(times, NULL, LR_ARRAY_TAM * loop_iters, MPI_DOUBLE, MPI_MAX, ROOT, comm);
  }

  MPI_Bcast(&(stage->slope), 1, MPI_DOUBLE, ROOT, comm);
  MPI_Bcast(&(stage->intercept), 1, MPI_DOUBLE, ROOT, comm);

256
  free(times);
257
  free(bytes);
258
}
259

260
261
262
263
264
265
266
267
268
269
270
271
272
273

/*
 * ========================================================================================
 * ========================================================================================
 * =================================INIT STAGE FUNCTIONS===================================
 * ========================================================================================
 * ========================================================================================
*/

double init_matrix_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
  double result, t_stage;

  result = 0;
  t_stage = stage->t_stage * config_file->factors[group.grp];
274
  initMatrix(&(stage->double_array), config_file->granularity);
275
276
277
278

  double start_time = MPI_Wtime();
  if(group.myId == ROOT && compute) {
    result+= process_stage(*config_file, *stage, group, comm);
279
  }
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297

  if(compute) {
    stage->t_op = (MPI_Wtime() - start_time) / stage->operations; //Tiempo de una operacion
    MPI_Bcast(&(stage->t_op), 1, MPI_DOUBLE, ROOT, comm);
  }
  stage->operations = t_stage / stage->t_op;

  return result;
}

double init_pi_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm, int compute) {
  double result, t_stage, start_time;

  result = 0;
  t_stage = stage->t_stage * config_file->factors[group.grp];	 
  start_time = MPI_Wtime();
  if(group.myId == ROOT && compute) {
    result+= process_stage(*config_file, *stage, group, comm);
298
  }
299
300
301
302

  if(compute) {
    stage->t_op = (MPI_Wtime() - start_time) / stage->operations; //Tiempo de una operacion
    MPI_Bcast(&(stage->t_op), 1, MPI_DOUBLE, ROOT, comm);
303
  }
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
  stage->operations = t_stage / stage->t_op;

  return result;
}

void init_comm_ptop_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  struct Dist_data dist_data;

  if(stage->array != NULL)
    free(stage->array);
  if(stage->bytes == 0) {
    stage->bytes = (stage->t_stage - config_file->latency_m) * config_file->bw_m;
  }
  get_block_dist(stage->bytes, group.myId, group.numP, &dist_data);
  stage->real_bytes = dist_data.tamBl;
  stage->array = malloc(sizeof(char) * stage->real_bytes);
}

double init_comm_bcast_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  double start_time, time = 0;
  stage->real_bytes = stage->bytes;
  if(stage->bytes == 0) {
    start_time = MPI_Wtime();
    linear_regression_stage(stage, group, comm);
    lr_calc_Y(stage->slope, stage->intercept, stage->t_stage, &(stage->real_bytes));

    time = MPI_Wtime() - start_time;
    if(group.myId == ROOT) {
      MPI_Reduce(MPI_IN_PLACE, &time, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    } else {
      MPI_Reduce(&time, NULL, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    }
  }

  if(stage->array != NULL)
    free(stage->array);
  stage->array = malloc(sizeof(char) * stage->real_bytes);

  return time;
}


double init_comm_allgatherv_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  double start_time, time = 0;
  struct Dist_data dist_data;

  stage->real_bytes = stage->bytes;
  if(stage->bytes == 0) {
    start_time = MPI_Wtime();
    linear_regression_stage(stage, group, comm);
    lr_calc_Y(stage->slope, stage->intercept, stage->t_stage, &(stage->real_bytes));

    time = MPI_Wtime() - start_time;
    if(group.myId == ROOT) {
      MPI_Reduce(MPI_IN_PLACE, &time, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    } else {
      MPI_Reduce(&time, NULL, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    }
  }

  if(stage->counts.counts != NULL)
    freeCounts(&(stage->counts));
  prepare_comm_allgatherv(group.numP, stage->real_bytes, &(stage->counts));
      
  get_block_dist(stage->real_bytes, group.myId, group.numP, &dist_data);
  stage->my_bytes = dist_data.tamBl;
  if(stage->array != NULL)
    free(stage->array);
  stage->array = malloc(sizeof(char) * stage->my_bytes);
  if(stage->full_array != NULL)
    free(stage->full_array);
  stage->full_array = malloc(sizeof(char) * stage->real_bytes);

  return time;
}

double init_comm_reduce_pt(group_data group, configuration *config_file, iter_stage_t *stage, MPI_Comm comm) {
  double start_time, time = 0;

  stage->real_bytes = stage->bytes;
  if(stage->bytes == 0) {
    start_time = MPI_Wtime();
    linear_regression_stage(stage, group, comm);
    lr_calc_Y(stage->slope, stage->intercept, stage->t_stage, &(stage->real_bytes));

    time = MPI_Wtime() - start_time;
    if(group.myId == ROOT) {
      MPI_Reduce(MPI_IN_PLACE, &time, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    } else {
      MPI_Reduce(&time, NULL, 1, MPI_DOUBLE, MPI_MAX, ROOT, comm);
    }
  }

  if(stage->array != NULL)
    free(stage->array);
  stage->array = malloc(sizeof(char) * stage->real_bytes);
  //Full array para el reduce necesita el mismo tamanyo
  if(stage->full_array != NULL)
    free(stage->full_array);
  stage->full_array = malloc(sizeof(char) * stage->real_bytes);
404

405
  return time;
406
}