malleabilityManager.c 40.1 KB
Newer Older
1
#include <pthread.h>
2
#include <string.h>
3
4
#include "malleabilityManager.h"
#include "malleabilityStates.h"
5
#include "malleabilityDataStructures.h"
6
#include "malleabilityTypes.h"
iker_martin's avatar
iker_martin committed
7
#include "malleabilityZombies.h"
8
#include "malleabilityTimes.h"
9
#include "malleabilityRMS.h"
10
#include "spawn_methods/GenericSpawn.h"
11
12
13
14
15
#include "CommDist.h"

#define MALLEABILITY_USE_SYNCHRONOUS 0
#define MALLEABILITY_USE_ASYNCHRONOUS 1

16
void MAM_Commit(int *mam_state, int is_children_group);
17
18
19
20

void send_data(int numP_children, malleability_data_t *data_struct, int is_asynchronous);
void recv_data(int numP_parents, malleability_data_t *data_struct, int is_asynchronous);

21

22
23
int MAM_St_rms(int *mam_state);
int MAM_St_spawn_start();
24
25
26
27
28
29
30
31
32
33
34
int MAM_St_spawn_pending(int wait_completed);
int MAM_St_red_start();
int MAM_St_red_pending(int *mam_state, int wait_completed);
int MAM_St_user_pending(int *mam_state, int wait_completed, void (*user_function)(void *), void *user_args);
int MAM_St_user_completed();
int MAM_St_spawn_adapt_pending(int wait_completed);
int MAM_St_spawn_adapted(int *mam_state);
int MAM_St_red_completed(int *mam_state);
int MAM_St_completed(int *mam_state);


35
void Children_init(void (*user_function)(void *), void *user_args);
36
37
int spawn_step();
int start_redistribution();
38
int check_redistribution(int wait_completed);
39
int end_redistribution();
iker_martin's avatar
iker_martin committed
40
int shrink_redistribution();
41
42

int thread_creation();
43
int thread_check(int wait_completed);
44
void* thread_async_work();
45

46
void print_comms_state();
47
void malleability_comms_update(MPI_Comm comm);
48

49
int MAM_I_convert_key(char *key);
50
void MAM_I_create_user_struct(int is_children_group);
51

52
int state = MALL_UNRESERVED; //FIXME Mover a otro lado
53
54
55
56
57
58

malleability_data_t *rep_s_data;
malleability_data_t *dist_s_data;
malleability_data_t *rep_a_data;
malleability_data_t *dist_a_data;

59
60
mam_user_reconf_t *user_reconf;

61
/*
62
63
64
65
66
67
68
69
 * Inicializa la reserva de memoria para el modulo de maleabilidad
 * creando todas las estructuras necesarias y copias de comunicadores
 * para no interferir en la aplicación.
 *
 * Si es llamada por un grupo de procesos creados de forma dinámica,
 * inicializan la comunicacion con sus padres. En este caso, al terminar 
 * la comunicacion los procesos hijo estan preparados para ejecutar la
 * aplicacion.
70
 */
71
int MAM_Init(int root, MPI_Comm *comm, char *name_exec, void (*user_function)(void *), void *user_args) {
72
73
74
75
  MPI_Comm dup_comm, thread_comm;

  mall_conf = (malleability_config_t *) malloc(sizeof(malleability_config_t));
  mall = (malleability_t *) malloc(sizeof(malleability_t));
76
77
78
79
80
81
82
83
  user_reconf = (mam_user_reconf_t *) malloc(sizeof(mam_user_reconf_t));

  MPI_Comm_rank(*comm, &(mall->myId));
  MPI_Comm_size(*comm, &(mall->numP));

  #if USE_MAL_DEBUG
    DEBUG_FUNC("Initializing MaM", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(*comm);
  #endif
84

85
86
87
88
89
  rep_s_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
  dist_s_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
  rep_a_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));
  dist_a_data = (malleability_data_t *) malloc(sizeof(malleability_data_t));

90
91
92
93
  MPI_Comm_dup(*comm, &dup_comm);
  MPI_Comm_dup(*comm, &thread_comm);
  MPI_Comm_set_name(dup_comm, "MAM_MAIN");
  MPI_Comm_set_name(thread_comm, "MAM_THREAD");
94
95

  mall->root = root;
96
  mall->root_parents = -1;
97
  mall->zombie = 0;
98
  mall->comm = dup_comm;
99
  mall->thread_comm = thread_comm;
100
101
  mall->user_comm = comm; 
  mall->tmp_comm = MPI_COMM_NULL;
102

103
  mall->name_exec = name_exec;
104
  mall->nodelist = NULL;
105
106
107
108
109
110

  rep_s_data->entries = 0;
  rep_a_data->entries = 0;
  dist_s_data->entries = 0;
  dist_a_data->entries = 0;

111
  state = MALL_NOT_STARTED;
112

113
  zombies_service_init();
114
  init_malleability_times();
115
  MAM_Def_main_datatype();
116

117
118
  // Si son el primer grupo de procesos, obtienen los datos de los padres
  MPI_Comm_get_parent(&(mall->intercomm));
119
  if(mall->intercomm != MPI_COMM_NULL) { 
120
    Children_init(user_function, user_args);
121
    return MALLEABILITY_CHILDREN;
122
  }
123
124
  MAM_check_hosts();

iker_martin's avatar
iker_martin committed
125

126
127
128
129
130
  #if USE_MAL_BARRIERS && USE_MAL_DEBUG
    if(mall->myId == mall->root)
      printf("MaM: Using barriers to record times.\n");
  #endif

131
  #if USE_MAL_DEBUG
132
    DEBUG_FUNC("MaM has been initialized correctly as parents", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(*comm);
133
134
  #endif

135
  return MALLEABILITY_NOT_CHILDREN;
136
137
}

138
139
140
141
142
/*
 * Elimina toda la memoria reservado por el modulo
 * de maleabilidad y asegura que los zombies
 * despierten si los hubiese.
 */
143
void MAM_Finalize() {	  
144
145
146
147
148
149
150
151
152
153
  free_malleability_data_struct(rep_s_data);
  free_malleability_data_struct(rep_a_data);
  free_malleability_data_struct(dist_s_data);
  free_malleability_data_struct(dist_a_data);

  free(rep_s_data);
  free(rep_a_data);
  free(dist_s_data);
  free(dist_a_data);

154
  MAM_Free_main_datatype();
155
  free_malleability_times();
156
157
  if(mall->comm != MPI_COMM_WORLD && mall->comm != MPI_COMM_NULL) MPI_Comm_free(&(mall->comm));
  if(mall->thread_comm != MPI_COMM_WORLD && mall->thread_comm != MPI_COMM_NULL) MPI_Comm_free(&(mall->thread_comm));
158
  if(mall->intercomm != MPI_COMM_WORLD && mall->intercomm != MPI_COMM_NULL) { MPI_Comm_disconnect(&(mall->intercomm)); } //FIXME Error en OpenMPI + Merge
159
160
  free(mall);
  free(mall_conf);
161
  free(user_reconf);
iker_martin's avatar
iker_martin committed
162
163
164
165

  zombies_awake();
  zombies_service_free();

166
  state = MALL_UNRESERVED;
167
168
}

169
170
/* 
 * TODO Reescribir
171
172
173
174
 * Comprueba el estado de la maleabilidad. Intenta avanzar en la misma
 * si es posible. Funciona como una máquina de estados.
 * Retorna el estado de la maleabilidad concreto y modifica el argumento
 * "mam_state" a uno generico.
175
 *
176
177
 * El argumento "wait_completed" se utiliza para esperar a la finalización de
 * las tareas llevadas a cabo por parte de MAM.
178
179
 *
 */
180
int MAM_Checkpoint(int *mam_state, int wait_completed, void (*user_function)(void *), void *user_args) {
181
  int call_checkpoint = 0;
182
183
184

  switch(state) {
    case MALL_UNRESERVED:
185
      *mam_state = MAM_UNRESERVED;
186
187
      break;
    case MALL_NOT_STARTED:
188
189
190
191
      call_checkpoint = MAM_St_rms(mam_state);
      break;
    case MALL_RMS_COMPLETED:
      call_checkpoint = MAM_St_spawn_start();
192
      break;
193

194
    case MALL_SPAWN_PENDING: // Comprueba si el spawn ha terminado
195
    case MALL_SPAWN_SINGLE_PENDING:
196
      call_checkpoint = MAM_St_spawn_pending(wait_completed);
197
      break;
198

199
200
    case MALL_SPAWN_ADAPT_POSTPONE:
    case MALL_SPAWN_COMPLETED:
201
      call_checkpoint = MAM_St_red_start();
202
      break;
203

204
    case MALL_DIST_PENDING:
205
      call_checkpoint = MAM_St_red_pending(mam_state, wait_completed);
206
207
      break;

208
209
    case MALL_USER_PENDING:
      call_checkpoint = MAM_St_user_pending(mam_state, wait_completed, user_function, user_args);
210
      break;
211

212
213
    case MALL_USER_COMPLETED:
      call_checkpoint = MAM_St_user_completed();
214
      break;
215

216
217
    case MALL_SPAWN_ADAPT_PENDING:
      call_checkpoint = MAM_St_spawn_adapt_pending(wait_completed);
218
219
      break;

220
221
222
    case MALL_SPAWN_ADAPTED:
    case MALL_DIST_COMPLETED:
      call_checkpoint = MAM_St_completed(mam_state);
223
224
      break;
  }
225

226
227
  if(call_checkpoint) { MAM_Checkpoint(mam_state, wait_completed, user_function, user_args); }
  if(state > MALL_NOT_STARTED && state < MALL_COMPLETED) *mam_state = MAM_PENDING;
228
229
230
  return state;
}

231
232
233
/*
 * TODO
 */
234
235
236
237
238
239
240
241
242
243
void MAM_Resume_redistribution(int *mam_state) {
  state = MALL_USER_COMPLETED;
  *mam_state = MAM_PENDING;
}

/*
 * TODO
 */
void MAM_Commit(int *mam_state, int rootBcast) {
  int zombies = 0;
244
  #if USE_MAL_DEBUG
245
    if(mall->myId == mall->root){ DEBUG_FUNC("Trying to commit", mall->myId, mall->numP); } fflush(stdout);
246
247
  #endif

248
249
250
251
  // Get times before commiting
  if(mall_conf->spawn_method == MALL_SPAWN_BASELINE) {
    // This communication is only needed when a root process will become a zombie
    malleability_times_broadcast(rootBcast);
252
  }
253

254
  // Free unneded communicators
255
256
  if(mall->tmp_comm != MPI_COMM_WORLD && mall->tmp_comm != MPI_COMM_NULL) MPI_Comm_free(&(mall->tmp_comm));
  if(*(mall->user_comm) != MPI_COMM_WORLD && *(mall->user_comm) != MPI_COMM_NULL) MPI_Comm_free(mall->user_comm);
257
258
259
260
261
262
263
264

  // Zombies treatment
  if(mall_conf->spawn_method == MALL_SPAWN_MERGE) {
    MPI_Allreduce(&mall->zombie, &zombies, 1, MPI_INT, MPI_MAX, mall->comm);
    if(zombies) {
      zombies_collect_suspended(mall->comm);
    }
  }
265

266
  // Zombies KILL
267
  if(mall->zombie) {
268
    #if USE_MAL_DEBUG >= 2
269
270
      DEBUG_FUNC("Is terminating as zombie", mall->myId, mall->numP); fflush(stdout);
    #endif
271
    MAM_Finalize();
272
    MPI_Finalize();
273
274
275
    exit(0);
  }

276
277
278
279
  // Reset/Free communicators
  if(mall_conf->spawn_method == MALL_SPAWN_MERGE) { malleability_comms_update(mall->intercomm); }
  if(mall->intercomm != MPI_COMM_NULL && mall->intercomm != MPI_COMM_WORLD) { MPI_Comm_disconnect(&(mall->intercomm)); } //FIXME Error en OpenMPI + Merge

280
281
  MPI_Comm_rank(mall->comm, &(mall->myId));
  MPI_Comm_size(mall->comm, &(mall->numP));
282
283
284
  mall->root = mall->root_parents == -1 ? mall->root : mall->root_parents;
  mall->root_parents = -1;
  state = MALL_NOT_STARTED;
285
  if(mam_state != NULL) *mam_state = MAM_COMPLETED;
286

287
  // Set new communicator
288
289
  if(mall_conf->spawn_method == MALL_SPAWN_BASELINE) { *(mall->user_comm) = MPI_COMM_WORLD; }
  else if(mall_conf->spawn_method == MALL_SPAWN_MERGE) { MPI_Comm_dup(mall->comm, mall->user_comm); }
290
  #if USE_MAL_DEBUG
291
    if(mall->myId == mall->root) DEBUG_FUNC("Reconfiguration has been commited", mall->myId, mall->numP); fflush(stdout);
292
  #endif
293
294
295
296
297

  #if USE_MAL_BARRIERS
    MPI_Barrier(mall->comm);
  #endif
  mall_conf->times->malleability_end = MPI_Wtime();
298
299
}

300
301
302
303
304
305
306
int MAM_Get_Reconf_Info(mam_user_reconf_t *reconf_info) {
  if(state != MALL_USER_PENDING) return MALL_DENIED;

  *reconf_info = *user_reconf;
  return 0;
}

307
308
void MAM_Retrieve_times(double *sp_time, double *sy_time, double *asy_time, double *mall_time) {
  MAM_I_retrieve_times(sp_time, sy_time, asy_time, mall_time);
309
310
}

311
void MAM_Set_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int red_method, int red_strategies) {
312
313
  if(state > MALL_NOT_STARTED) return;

314
315
  mall_conf->spawn_method = spawn_method;
  mall_conf->spawn_strategies = spawn_strategies;
316
  mall_conf->spawn_dist = spawn_dist;
317
318
319
320
321
322
323
  mall_conf->red_method = red_method;
  mall_conf->red_strategies = red_strategies;

  if(!malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL) && 
	(mall_conf->red_method  == MALL_RED_RMA_LOCK || mall_conf->red_method  == MALL_RED_RMA_LOCKALL)) {
    malleability_red_add_strat(&(mall_conf->red_strategies), MALL_RED_IBARRIER);
  }
324
325
}

326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
void MAM_Set_key_configuration(char *key, int required, int *provided) {
  int value = MAM_I_convert_key(key);
  *provided = required;
  switch(value) { //TODO Comprobar si required existe para key
    case MAM_SPAWN_METHOD_VALUE:
      mall_conf->spawn_method = required;
      break;
    case MAM_SPAWN_STRATEGIES_VALUE:
      malleability_spawn_add_strat(&(mall_conf->spawn_strategies), required);
      *provided = mall_conf->spawn_strategies;
      break;
    case MAM_PHYSICAL_DISTRIBUTION_VALUE:
      mall_conf->spawn_dist = required;
      break;
    case MAM_RED_METHOD_VALUE:
      mall_conf->red_method = required;
      break;
    case MAM_RED_STRATEGIES_VALUE:
      malleability_red_add_strat(&(mall_conf->red_strategies), required);
      *provided = mall_conf->red_strategies;
      break;
    case MALL_DENIED:
    default:
      printf("MAM: Key %s does not exist\n", key);
      *provided = MALL_DENIED;
      break;
  }

  if(!malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL) && 
	(mall_conf->red_method  == MALL_RED_RMA_LOCK || mall_conf->red_method  == MALL_RED_RMA_LOCKALL)) {
    malleability_red_add_strat(&(mall_conf->red_strategies), MALL_RED_IBARRIER);
  }
}

360
/*
361
 * Tiene que ser llamado despues de setear la config
362
 */
363
void MAM_Set_target_number(int numC){
364
365
  if(state > MALL_NOT_STARTED) return;

366
  if((mall_conf->spawn_method == MALL_SPAWN_MERGE) && (numC >= mall->numP)) {
367
368
369
370
371
    mall->numC = numC;
    mall->numC_spawned = numC - mall->numP;

    if(numC == mall->numP) { // Migrar
      mall->numC_spawned = numC;
372
      mall_conf->spawn_method = MALL_SPAWN_BASELINE;
373
374
375
376
377
378
379
    }
  } else {
    mall->numC = numC;
    mall->numC_spawned = numC;
  }
}

380
381
382
383
384
385
386
/*
 * Anyade a la estructura concreta de datos elegida
 * el nuevo set de datos "data" de un total de "total_qty" elementos.
 *
 * Los datos variables se tienen que anyadir cuando quieran ser mandados, no antes
 *
 * Mas informacion en la funcion "add_data".
387
 *
388
 */
389
void malleability_add_data(void *data, size_t total_qty, MPI_Datatype type, int is_replicated, int is_constant) {
390
  size_t total_reqs = 0;
391

392
  if(is_constant) { //Async
393
    if(is_replicated) {
394
      total_reqs = 1;
395
      add_data(data, total_qty, type, total_reqs, rep_a_data);
396
    } else {
397
      if(mall_conf->red_method  == MALL_RED_BASELINE) {
398
        total_reqs = 1;
399
      } else if(mall_conf->red_method  == MALL_RED_POINT || mall_conf->red_method  == MALL_RED_RMA_LOCK || mall_conf->red_method  == MALL_RED_RMA_LOCKALL) {
400
        total_reqs = mall->numC;
401
      }
402
403
404
      if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL)) {
        total_reqs++;
      }
405
406
407
      
      add_data(data, total_qty, type, total_reqs, dist_a_data);
    }
408
  } else { //Sync
409
410
411
412
413
    if(is_replicated) {
      add_data(data, total_qty, type, total_reqs, rep_s_data);
    } else {
      add_data(data, total_qty, type, total_reqs, dist_s_data);
    }
414
415
416
  }
}

417
418
419
420
421
422
423
424
/*
 * Modifica en la estructura concreta de datos elegida en el indice "index"
 * con el set de datos "data" de un total de "total_qty" elementos.
 *
 * Los datos variables se tienen que modificar cuando quieran ser mandados, no antes
 *
 * Mas informacion en la funcion "modify_data".
 */
425
void malleability_modify_data(void *data, size_t index, size_t total_qty, MPI_Datatype type, int is_replicated, int is_constant) {
426
427
  size_t total_reqs = 0;

428
429
  if(is_constant) {
    if(is_replicated) {
430
      total_reqs = 1;
431
432
433
      modify_data(data, index, total_qty, type, total_reqs, rep_a_data); //FIXME total_reqs==0 ??? 
    } else {    
      if(mall_conf->red_method  == MALL_RED_BASELINE) {
434
        total_reqs = 1;
435
      } else if(mall_conf->red_method  == MALL_RED_POINT || mall_conf->red_method  == MALL_RED_RMA_LOCK || mall_conf->red_method  == MALL_RED_RMA_LOCKALL) {
436
437
        total_reqs = mall->numC;
      }
438
439
440
      if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL)) {
        total_reqs++;
      }
441
      
442
      modify_data(data, index, total_qty, type, total_reqs, dist_a_data);
443
    }
444
445
446
447
448
449
  } else {
    if(is_replicated) {
      modify_data(data, index, total_qty, type, total_reqs, rep_s_data);
    } else {
      modify_data(data, index, total_qty, type, total_reqs, dist_s_data);
    }
450
451
452
  }
}

453
454
455
456
/*
 * Devuelve el numero de entradas para la estructura de descripcion de 
 * datos elegida.
 */
457
void malleability_get_entries(size_t *entries, int is_replicated, int is_constant){
458
459
460
  
  if(is_constant) {
    if(is_replicated) {
461
      *entries = rep_a_data->entries;
462
    } else {
463
      *entries = dist_a_data->entries;
464
465
466
    }
  } else {
    if(is_replicated) {
467
      *entries = rep_s_data->entries;
468
    } else {
469
      *entries = dist_s_data->entries;
470
471
472
473
474
475
476
477
478
479
480
    }
  }
}

/*
 * Devuelve el elemento de la lista "index" al usuario.
 * La devolución es en el mismo orden que lo han metido los padres
 * con la funcion "malleability_add_data()".
 * Es tarea del usuario saber el tipo de esos datos.
 * TODO Refactor a que sea automatico
 */
481
void malleability_get_data(void **data, size_t index, int is_replicated, int is_constant) {
482
483
484
485
  malleability_data_t *data_struct;

  if(is_constant) {
    if(is_replicated) {
486
      data_struct = rep_a_data;
487
    } else {
488
      data_struct = dist_a_data;
489
490
491
    }
  } else {
    if(is_replicated) {
492
      data_struct = rep_s_data;
493
    } else {
494
      data_struct = dist_s_data;
495
496
497
    }
  }

498
  *data = data_struct->arrays[index];
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
}


//======================================================||
//================PRIVATE FUNCTIONS=====================||
//================DATA COMMUNICATION====================||
//======================================================||
//======================================================||

/*
 * Funcion generalizada para enviar datos desde los hijos.
 * La asincronizidad se refiere a si el hilo padre e hijo lo hacen
 * de forma bloqueante o no. El padre puede tener varios hilos.
 */
void send_data(int numP_children, malleability_data_t *data_struct, int is_asynchronous) {
514
  size_t i;
515
  void *aux_send, *aux_recv;
516
517
518

  if(is_asynchronous) {
    for(i=0; i < data_struct->entries; i++) {
519
      aux_send = data_struct->arrays[i];
520
      aux_recv = NULL;
521
522
523
      async_communication_start(aux_send, &aux_recv, data_struct->qty[i], data_struct->types[i], mall->myId, mall->numP, numP_children, MALLEABILITY_NOT_CHILDREN, mall_conf->red_method, 
		      mall_conf->red_strategies, mall->intercomm, &(data_struct->requests[i]), &(data_struct->request_qty[i]), &(data_struct->windows[i]));
      if(aux_recv != NULL) data_struct->arrays[i] = aux_recv;
524
525
526
    }
  } else {
    for(i=0; i < data_struct->entries; i++) {
527
      aux_send = data_struct->arrays[i];
528
      aux_recv = NULL;
529
530
      sync_communication(aux_send, &aux_recv, data_struct->qty[i], data_struct->types[i], mall->myId, mall->numP, numP_children, MALLEABILITY_NOT_CHILDREN, mall_conf->red_method, mall->intercomm);
      if(aux_recv != NULL) data_struct->arrays[i] = aux_recv;
531
532
533
534
535
536
537
538
539
540
    }
  }
}

/*
 * Funcion generalizada para recibir datos desde los hijos.
 * La asincronizidad se refiere a si el hilo padre e hijo lo hacen
 * de forma bloqueante o no. El padre puede tener varios hilos.
 */
void recv_data(int numP_parents, malleability_data_t *data_struct, int is_asynchronous) {
541
  size_t i;
542
  void *aux, *aux_s = NULL;
543
544
545

  if(is_asynchronous) {
    for(i=0; i < data_struct->entries; i++) {
546
547
      aux = data_struct->arrays[i];
      async_communication_start(aux_s, &aux, data_struct->qty[i], data_struct->types[i], mall->myId, mall->numP, numP_parents, MALLEABILITY_CHILDREN, mall_conf->red_method, mall_conf->red_strategies, 
548
		      mall->intercomm, &(data_struct->requests[i]), &(data_struct->request_qty[i]), &(data_struct->windows[i]));
549
      data_struct->arrays[i] = aux;
550
551
552
    }
  } else {
    for(i=0; i < data_struct->entries; i++) {
553
554
555
      aux = data_struct->arrays[i];
      sync_communication(aux_s, &aux, data_struct->qty[i], data_struct->types[i], mall->myId, mall->numP, numP_parents, MALLEABILITY_CHILDREN, mall_conf->red_method, mall->intercomm);
      data_struct->arrays[i] = aux;
556
557
558
559
    }
  }
}

560
561
562
563
564
565
566

//======================================================||
//================PRIVATE FUNCTIONS=====================||
//====================MAM STAGES========================||
//======================================================||
//======================================================||

567
int MAM_St_rms(int *mam_state) {
568
  *mam_state = MAM_NOT_STARTED;
569
  state = MALL_RMS_COMPLETED;
570
571
572
573
574
575
576
577
  reset_malleability_times();
  // Comprobar si se tiene que realizar un redimensionado
      
  #if USE_MAL_BARRIERS
    MPI_Barrier(mall->comm);
  #endif
  mall_conf->times->malleability_start = MPI_Wtime();
  //if(CHECK_RMS()) {return MALL_DENIED;}
578
579
  return 1;
}
580

581
int MAM_St_spawn_start() {
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
  state = spawn_step();
  //FIXME Esto es necesario pero feo
  if(mall_conf->spawn_method == MALL_SPAWN_MERGE && mall->myId >= mall->numC){ mall->zombie = 1; }
  else if(mall_conf->spawn_method == MALL_SPAWN_BASELINE){ mall->zombie = 1; }

  if (state == MALL_SPAWN_COMPLETED || state == MALL_SPAWN_ADAPT_POSTPONE){
    return 1;
  }
  return 0;
}

int MAM_St_spawn_pending(int wait_completed) {
  state = check_spawn_state(&(mall->intercomm), mall->comm, wait_completed);
  if (state == MALL_SPAWN_COMPLETED || state == MALL_SPAWN_ADAPTED) {
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->comm);
    #endif
    mall_conf->times->spawn_time = MPI_Wtime() - mall_conf->times->malleability_start;
    return 1;
  }
  return 0;
}

int MAM_St_red_start() {
  state = start_redistribution();
  return 1;
}

int MAM_St_red_pending(int *mam_state, int wait_completed) {
  if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_THREAD, NULL)) {
    state = thread_check(wait_completed);
  } else {
    state = check_redistribution(wait_completed);
  }

  if(state != MALL_DIST_PENDING) { 
    if(mall->is_intercomm) {
      MPI_Intercomm_merge(mall->intercomm, MALLEABILITY_NOT_CHILDREN, &mall->tmp_comm); //El que pone 0 va primero
    } else {
      MPI_Comm_dup(mall->intercomm, &mall->tmp_comm);
    }
    MPI_Comm_set_name(mall->tmp_comm, "MAM_USER_TMP");
    state = MALL_USER_PENDING;
    *mam_state = MAM_USER_PENDING;
    return 1;
  }
  return 0;
}

int MAM_St_user_pending(int *mam_state, int wait_completed, void (*user_function)(void *), void *user_args) {
  #if USE_MAL_DEBUG
    if(mall->myId == mall->root) DEBUG_FUNC("Starting USER redistribution", mall->myId, mall->numP); fflush(stdout);
  #endif
  if(user_function != NULL) {
    MAM_I_create_user_struct(MALLEABILITY_NOT_CHILDREN);
    do {
      user_function(user_args);
    } while(wait_completed && state == MALL_USER_PENDING);
  } else {
    MAM_Resume_redistribution(mam_state);
  }

  if(state != MALL_USER_PENDING) {
    #if USE_MAL_DEBUG
      if(mall->myId == mall->root) DEBUG_FUNC("Ended USER redistribution", mall->myId, mall->numP); fflush(stdout);
    #endif
    return 1;
  }
  return 0;
}

int MAM_St_user_completed() {
  state = end_redistribution();
  return 1;
}

int MAM_St_spawn_adapt_pending(int wait_completed) {
  #if USE_MAL_BARRIERS
    MPI_Barrier(mall->comm);
  #endif
  mall_conf->times->spawn_start = MPI_Wtime();
  unset_spawn_postpone_flag(state);
  state = check_spawn_state(&(mall->intercomm), mall->comm, wait_completed);

  if(!malleability_spawn_contains_strat(mall_conf->spawn_strategies, MALL_SPAWN_PTHREAD, NULL)) {
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->comm);
    #endif
    mall_conf->times->spawn_time = MPI_Wtime() - mall_conf->times->malleability_start;
    return 1;
  }
  return 0;
}

int MAM_St_completed(int *mam_state) {
  int rootBcast;

  if(mall->is_intercomm) {
    rootBcast = mall->myId == mall->root ? MPI_ROOT : MPI_PROC_NULL;
  } else {
    rootBcast = mall->root;
  }
  MAM_Commit(mam_state, rootBcast);
  return 0;
}


689
690
691
692
693
694
695
696
697
698
699
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//=====================CHILDREN=========================||
//======================================================||
//======================================================||
/*
 * Inicializacion de los datos de los hijos.
 * En la misma se reciben datos de los padres: La configuracion
 * de la ejecucion a realizar; y los datos a recibir de los padres
 * ya sea de forma sincrona, asincrona o ambas.
 */
700
void Children_init(void (*user_function)(void *), void *user_args) {
701
  size_t i;
702
  int numP_parents;
703

704
705
706
707
  #if USE_MAL_DEBUG
    DEBUG_FUNC("MaM will now initialize children", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
  #endif

708
709
710
  malleability_connect_children(mall->myId, mall->numP, mall->root, mall->comm, &numP_parents, &mall->root_parents, &(mall->intercomm));
  MPI_Comm_test_inter(mall->intercomm, &mall->is_intercomm);
  if(!mall->is_intercomm) { // For intracommunicators, these processes will be added
711
712
713
    MPI_Comm_rank(mall->intercomm, &(mall->myId));
    MPI_Comm_size(mall->intercomm, &(mall->numP));
  }
714

715
  MAM_Comm_main_structures(mall->root_parents);
716

717
  #if USE_MAL_DEBUG
718
    DEBUG_FUNC("Targets have completed spawn step", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
719
720
  #endif

721
  comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_CHILDREN, mall->myId, mall->root_parents, mall->intercomm);
722
  if(dist_a_data->entries || rep_a_data->entries) { // Recibir datos asincronos
723
724
725
    #if USE_MAL_DEBUG >= 2
      DEBUG_FUNC("Children start asynchronous redistribution", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
    #endif
726
727
728
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
729

730
731
    if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_THREAD, NULL)) {
      recv_data(numP_parents, dist_a_data, MALLEABILITY_USE_SYNCHRONOUS);
732
733
734
      for(i=0; i<rep_a_data->entries; i++) {
        MPI_Bcast(rep_a_data->arrays[i], rep_a_data->qty[i], rep_a_data->types[i], mall->root_parents, mall->intercomm);
      } 
735
736
    } else {
      recv_data(numP_parents, dist_a_data, MALLEABILITY_USE_ASYNCHRONOUS); 
737

738
739
740
      for(i=0; i<rep_a_data->entries; i++) {
        MPI_Ibcast(rep_a_data->arrays[i], rep_a_data->qty[i], rep_a_data->types[i], mall->root_parents, mall->intercomm, &(rep_a_data->requests[i][0]));
      } 
741
      #if USE_MAL_DEBUG >= 2
742
        DEBUG_FUNC("Targets started asynchronous redistribution", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
743
      #endif
744
745

      int post_ibarrier = 0; 
746
747
748
749
      // FIXME No permite el uso de ibarrier ahora mismo. Realmente solo hace falta un ibarrier para todos
      for(i=0; i<rep_a_data->entries; i++) {
        async_communication_wait(mall->intercomm, rep_a_data->requests[i], rep_a_data->request_qty[i], post_ibarrier);
      }
750
      if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL)) { post_ibarrier=1; }
751
      for(i=0; i<dist_a_data->entries; i++) {
752
        async_communication_wait(mall->intercomm, dist_a_data->requests[i], dist_a_data->request_qty[i], post_ibarrier);
753
      }
754
      #if USE_MAL_DEBUG >= 2
755
        DEBUG_FUNC("Targets waited for all asynchronous redistributions", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
756
      #endif
757
758
759
      for(i=0; i<dist_a_data->entries; i++) {
        async_communication_end(mall_conf->red_method, mall_conf->red_strategies, dist_a_data->requests[i], dist_a_data->request_qty[i], &(dist_a_data->windows[i]));
      }
760
761
762
      for(i=0; i<rep_a_data->entries; i++) {
        async_communication_end(mall_conf->red_method, mall_conf->red_strategies, rep_a_data->requests[i], rep_a_data->request_qty[i], &(rep_a_data->windows[i]));
      }
763
    }
764

765
766
767
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
768
    mall_conf->times->async_end= MPI_Wtime(); // Obtener timestamp de cuando termina comm asincrona
769
  }
770
  #if USE_MAL_DEBUG
771
    DEBUG_FUNC("Targets have completed asynchronous data redistribution step", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
772
  #endif
773

774
775
776
777
778
779
780
781
782
783
784
785
786
787
  if(mall->is_intercomm) {
    MPI_Intercomm_merge(mall->intercomm, MALLEABILITY_CHILDREN, &mall->tmp_comm); //El que pone 0 va primero
  } else {
    MPI_Comm_dup(mall->intercomm, &mall->tmp_comm);
  }
  MPI_Comm_set_name(mall->tmp_comm, "MAM_USER_TMP");
  mall->numC = numP_parents;
  if(user_function != NULL) {
    state = MALL_USER_PENDING;
    MAM_I_create_user_struct(MALLEABILITY_CHILDREN);
    user_function(user_args);
  }

  comm_data_info(rep_s_data, dist_s_data, MALLEABILITY_CHILDREN, mall->myId, mall->root_parents, mall->intercomm);
788
  if(dist_s_data->entries || rep_s_data->entries) { // Recibir datos sincronos
789
790
791
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
792
    recv_data(numP_parents, dist_s_data, MALLEABILITY_USE_SYNCHRONOUS);
793
794

    for(i=0; i<rep_s_data->entries; i++) {
795
      MPI_Bcast(rep_s_data->arrays[i], rep_s_data->qty[i], rep_s_data->types[i], mall->root_parents, mall->intercomm);
796
    } 
797
798
799
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
800
    mall_conf->times->sync_end = MPI_Wtime(); // Obtener timestamp de cuando termina comm sincrona
801
  }
802
  #if USE_MAL_DEBUG
803
    DEBUG_FUNC("Targets have completed synchronous data redistribution step", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
804
  #endif
805

806
  MAM_Commit(NULL, mall->root_parents);
807

808
  #if USE_MAL_DEBUG
809
    DEBUG_FUNC("MaM has been initialized correctly for new ranks", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
810
  #endif
811
812
813
814
815
816
817
818
819
820
821
822
823
}

//======================================================||
//================PRIVATE FUNCTIONS=====================||
//=====================PARENTS==========================||
//======================================================||
//======================================================||

/*
 * Se encarga de realizar la creacion de los procesos hijos.
 * Si se pide en segundo plano devuelve el estado actual.
 */
int spawn_step(){
824
825
826
  #if USE_MAL_BARRIERS
    MPI_Barrier(mall->comm);
  #endif
827
  mall_conf->times->spawn_start = MPI_Wtime();
828
 
829
  state = init_spawn(mall->name_exec, mall->num_cpus, mall->num_nodes, mall->nodelist, mall->myId, mall->numP, mall->numC, mall->root, mall_conf->spawn_dist, mall_conf->spawn_method, mall_conf->spawn_strategies, mall->thread_comm, &(mall->intercomm));
830

831
  if(!malleability_spawn_contains_strat(mall_conf->spawn_strategies, MALL_SPAWN_PTHREAD, NULL)) {
832
833
834
      #if USE_MAL_BARRIERS
        MPI_Barrier(mall->comm);
      #endif
835
      mall_conf->times->spawn_time = MPI_Wtime() - mall_conf->times->malleability_start;
836
837
838
839
  }
  return state;
}

840

841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
/*
 * Comienza la redistribucion de los datos con el nuevo grupo de procesos.
 *
 * Primero se envia la configuracion a utilizar al nuevo grupo de procesos y a continuacion
 * se realiza el envio asincrono y/o sincrono si lo hay.
 *
 * En caso de que haya comunicacion asincrona, se comienza y se termina la funcion 
 * indicando que se ha comenzado un envio asincrono.
 *
 * Si no hay comunicacion asincrono se pasa a realizar la sincrona si la hubiese.
 *
 * Finalmente se envian datos sobre los resultados a los hijos y se desconectan ambos
 * grupos de procesos.
 */
int start_redistribution() {
856
857
  int rootBcast;
  size_t i;
858

859
  mall->is_intercomm = 0;
860
  if(mall->intercomm != MPI_COMM_NULL) {
861
    MPI_Comm_test_inter(mall->intercomm, &mall->is_intercomm);
862
863
864
  } else { 
    // Si no tiene comunicador creado, se debe a que se ha pospuesto el Spawn
    //   y se trata del spawn Merge Shrink
865
    MPI_Comm_dup(mall->comm, &(mall->intercomm));
866
  }
867

868
  if(mall->is_intercomm) {
869
870
871
872
    rootBcast = mall->myId == mall->root ? MPI_ROOT : MPI_PROC_NULL;
  } else {
    rootBcast = mall->root;
  }
873

874
  if(mall_conf->spawn_method == MALL_SPAWN_BASELINE || mall->numP <= mall->numC) { MAM_Comm_main_structures(rootBcast); }
875

876
  comm_data_info(rep_a_data, dist_a_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
877
  if(dist_a_data->entries || rep_a_data->entries) { // Enviar datos asincronos
878
879
880
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
881
    mall_conf->times->async_start = MPI_Wtime();
882
    if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_THREAD, NULL)) {
883
884
885
      return thread_creation();
    } else {
      send_data(mall->numC, dist_a_data, MALLEABILITY_USE_ASYNCHRONOUS);
886
887
888
      for(i=0; i<rep_a_data->entries; i++) { //FIXME Ibarrier does not work with rep_a_data
        MPI_Ibcast(rep_a_data->arrays[i], rep_a_data->qty[i], rep_a_data->types[i], rootBcast, mall->intercomm, &(rep_a_data->requests[i][0]));
      } 
889
      return MALL_DIST_PENDING; 
890
891
    }
  } 
892
  return MALL_USER_PENDING;
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
}


/*
 * Comprueba si la redistribucion asincrona ha terminado. 
 * Si no ha terminado la funcion termina indicandolo, en caso contrario,
 * se continua con la comunicacion sincrona, el envio de resultados y
 * se desconectan los grupos de procesos.
 *
 * Esta funcion permite dos modos de funcionamiento al comprobar si la
 * comunicacion asincrona ha terminado.
 * Si se utiliza el modo "MAL_USE_NORMAL" o "MAL_USE_POINT", se considera 
 * terminada cuando los padres terminan de enviar.
 * Si se utiliza el modo "MAL_USE_IBARRIER", se considera terminada cuando
 * los hijos han terminado de recibir.
908
 * //FIXME Modificar para que se tenga en cuenta rep_a_data
909
 */
910
int check_redistribution(int wait_completed) {
911
  int completed, local_completed, all_completed, post_ibarrier;
912
  size_t i, req_qty;
913
  MPI_Request *req_completed;
914
  MPI_Win window;
915
  post_ibarrier = 0;
916
  local_completed = 1;
917
  #if USE_MAL_DEBUG >= 2
918
    DEBUG_FUNC("Sources are testing for all asynchronous redistributions", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
919
  #endif
920

921
  if(wait_completed) {
922
923
    if(malleability_red_contains_strat(mall_conf->red_strategies, MALL_RED_IBARRIER, NULL)) { 
      if( mall->is_intercomm || mall->myId >= mall->numC) {
924
925
926
927
928
929
930
931
        post_ibarrier=1;
      }
    }
    for(i=0; i<dist_a_data->entries; i++) {
      req_completed = dist_a_data->requests[i];
      req_qty = dist_a_data->request_qty[i];
      async_communication_wait(mall->intercomm, req_completed, req_qty, post_ibarrier);
    }
932
933
934
935
936
    for(i=0; i<rep_a_data->entries; i++) {
      req_completed = rep_a_data->requests[i];
      req_qty = rep_a_data->request_qty[i];
      async_communication_wait(mall->intercomm, req_completed, req_qty, 0); //FIXME Ibarrier does not work with rep_a_data
    }
937
938
939
940
941
942
943
  } else {
    for(i=0; i<dist_a_data->entries; i++) {
      req_completed = dist_a_data->requests[i];
      req_qty = dist_a_data->request_qty[i];
      completed = async_communication_check(mall->myId, MALLEABILITY_NOT_CHILDREN, mall_conf->red_strategies, mall->intercomm, req_completed, req_qty);
      local_completed = local_completed && completed;
    }
944
945
946
947
948
949
    for(i=0; i<rep_a_data->entries; i++) { //FIXME Ibarrier does not work with rep_a_data
      req_completed = rep_a_data->requests[i];
      req_qty = rep_a_data->request_qty[i];
      completed = async_communication_check(mall->myId, MALLEABILITY_NOT_CHILDREN, mall_conf->red_strategies, mall->intercomm, req_completed, req_qty);
      local_completed = local_completed && completed;
    }
950
951
952
953
954
955
    #if USE_MAL_DEBUG >= 2
      DEBUG_FUNC("Sources will now check a global decision", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
    #endif

    MPI_Allreduce(&local_completed, &all_completed, 1, MPI_INT, MPI_MIN, mall->comm);
    if(!all_completed) return MALL_DIST_PENDING; // Continue only if asynchronous send has ended 
956
957
  }

958
  #if USE_MAL_DEBUG >= 2
959
    DEBUG_FUNC("Sources sent asynchronous redistributions", mall->myId, mall->numP); fflush(stdout); MPI_Barrier(MPI_COMM_WORLD);
960
  #endif
961

962
963
964
965
966
  for(i=0; i<dist_a_data->entries; i++) {
    req_completed = dist_a_data->requests[i];
    req_qty = dist_a_data->request_qty[i];
    window = dist_a_data->windows[i];
    async_communication_end(mall_conf->red_method, mall_conf->red_strategies, req_completed, req_qty, &window);
967
  }
968
969
970
971
972
973
  for(i=0; i<rep_a_data->entries; i++) {
    req_completed = rep_a_data->requests[i];
    req_qty = rep_a_data->request_qty[i];
    window = rep_a_data->windows[i];
    async_communication_end(mall_conf->red_method, mall_conf->red_strategies, req_completed, req_qty, &window);
  }
974

975
976
977
  #if USE_MAL_BARRIERS
    MPI_Barrier(mall->intercomm);
  #endif
978
979
  if(!mall->is_intercomm) mall_conf->times->async_end = MPI_Wtime(); // Merge method only
  return MALL_USER_PENDING;
980
981
982
983
984
985
986
987
988
989
990
991
}


/*
 * Termina la redistribución de los datos con los hijos, comprobando
 * si se han realizado iteraciones con comunicaciones en segundo plano
 * y enviando cuantas iteraciones se han realizado a los hijos.
 *
 * Además se realizan las comunicaciones síncronas se las hay.
 * Finalmente termina enviando los datos temporales a los hijos.
 */ 
int end_redistribution() {
992
  size_t i;
993
  int rootBcast, local_state;
994

995
  if(mall->is_intercomm) {
996
997
998
999
1000
    rootBcast = mall->myId == mall->root ? MPI_ROOT : MPI_PROC_NULL;
  } else {
    rootBcast = mall->root;
  }
  
1001
  comm_data_info(rep_s_data, dist_s_data, MALLEABILITY_NOT_CHILDREN, mall->myId, mall->root, mall->intercomm);
1002
  if(dist_s_data->entries || rep_s_data->entries) { // Enviar datos sincronos
1003
1004
1005
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
1006
    mall_conf->times->sync_start = MPI_Wtime();
1007
1008
1009
    send_data(mall->numC, dist_s_data, MALLEABILITY_USE_SYNCHRONOUS);

    for(i=0; i<rep_s_data->entries; i++) {
1010
      MPI_Bcast(rep_s_data->arrays[i], rep_s_data->qty[i], rep_s_data->types[i], rootBcast, mall->intercomm);
1011
    } 
1012
1013
1014
    #if USE_MAL_BARRIERS
      MPI_Barrier(mall->intercomm);
    #endif
1015
    if(!mall->is_intercomm) mall_conf->times->sync_end = MPI_Wtime(); // Merge method only
1016
  }
iker_martin's avatar
iker_martin committed
1017

1018
  local_state = MALL_DIST_COMPLETED;
1019
  if(!mall->is_intercomm) { // Merge Spawn
1020
    if(mall->numP > mall->numC) { // Shrink || Merge Shrink requiere de mas tareas
1021
1022
1023
      local_state = MALL_SPAWN_ADAPT_PENDING;
    }
  }
1024

1025
  return local_state;
1026
1027
1028
1029
1030
1031
1032
1033
1034
}

// TODO MOVER A OTRO LADO??
//======================================================||
//================PRIVATE FUNCTIONS=====================||
//===============COMM PARENTS THREADS===================||
//======================================================||
//======================================================||

1035
1036

int comm_state; //FIXME Usar un handler
1037
1038
1039
1040
/*
 * Crea una hebra para ejecutar una comunicación en segundo plano.
 */
int thread_creation() {
1041
  comm_state = MALL_DIST_PENDING;
1042
1043
1044
1045
1046
  if(pthread_create(&(mall->async_thread), NULL, thread_async_work, NULL)) {
    printf("Error al crear el hilo\n");
    MPI_Abort(MPI_COMM_WORLD, -1);
    return -1;
  }
1047
  return comm_state;
1048
1049
1050
1051
1052
1053
1054
1055
}

/*
 * Comprobación por parte de una hebra maestra que indica
 * si una hebra esclava ha terminado su comunicación en segundo plano.
 *
 * El estado de la comunicación es devuelto al finalizar la función. 
 */
1056
int thread_check(int wait_completed) {
1057
  int all_completed = 0;
1058

1059
1060
1061
1062
1063
1064
1065
1066
  if(wait_completed && comm_state == MALL_DIST_PENDING) {
    if(pthread_join(mall->async_thread, NULL)) {
      printf("Error al esperar al hilo\n");
      MPI_Abort(MPI_COMM_WORLD, -1);
      return -2;
    } 
  }

1067
  // Comprueba que todos los hilos han terminado la distribucion (Mismo valor en commAsync)
1068
  MPI_Allreduce(&comm_state, &all_completed, 1, MPI_INT, MPI_MAX, mall->comm);
1069
1070
  if(all_completed != MALL_DIST_COMPLETED) return MALL_DIST_PENDING; // Continue only if asynchronous send has ended 
  //FIXME No se tiene en cuenta el estado MALL_APP_ENDED
1071
1072
1073
1074
1075
1076

  if(pthread_join(mall->async_thread, NULL)) {
    printf("Error al esperar al hilo\n");
    MPI_Abort(MPI_COMM_WORLD, -1);
    return -2;
  } 
1077
1078
1079
1080

  #if USE_MAL_BARRIERS
    MPI_Barrier(mall->intercomm);
  #endif
1081
  if(!mall->is_intercomm) mall_conf->times->async_end = MPI_Wtime(); // Merge method only
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
  return end_redistribution();
}


/*
 * Función ejecutada por una hebra.
 * Ejecuta una comunicación síncrona con los hijos que
 * para el usuario se puede considerar como en segundo plano.
 *
 * Cuando termina la comunicación la hebra maestra puede comprobarlo
 * por el valor "commAsync".
 */
1094
void* thread_async_work() {
1095
1096
1097
1098
1099
1100
1101
1102
1103
  int rootBcast;
  size_t i;

  if(mall->is_intercomm) {
    rootBcast = mall->myId == mall->root ? MPI_ROOT : MPI_PROC_NULL;
  } else {
    rootBcast = mall->root;
  }

1104
  send_data(mall->numC, dist_a_data, MALLEABILITY_USE_SYNCHRONOUS);
1105
1106
1107
  for(i=0; i<rep_a_data->entries; i++) {
    MPI_Bcast(rep_a_data->arrays[i], rep_a_data->qty[i], rep_a_data->types[i], rootBcast, mall->intercomm);
  } 
1108
  comm_state = MALL_DIST_COMPLETED;
1109
1110
  pthread_exit(NULL);
}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122


//==============================================================================
/*
 * Muestra por pantalla el estado actual de todos los comunicadores
 */
void print_comms_state() {
  int tester;
  char *test = malloc(MPI_MAX_OBJECT_NAME * sizeof(char));

  MPI_Comm_get_name(mall->comm, test, &tester);
  printf("P%d Comm=%d Name=%s\n", mall->myId, mall->comm, test);
1123
1124
  MPI_Comm_get_name(*(mall->user_comm), test, &tester);
  printf("P%d Comm=%d Name=%s\n", mall->myId, *(mall->user_comm), test);
1125
1126
1127
1128
1129
1130
  if(mall->intercomm != MPI_COMM_NULL) {
    MPI_Comm_get_name(mall->intercomm, test, &tester);
    printf("P%d Comm=%d Name=%s\n", mall->myId, mall->intercomm, test);
  }
  free(test);
}
1131

1132
1133
1134
/*
 * Función solo necesaria en Merge
 */
1135
1136
1137
1138
1139
1140
1141
void malleability_comms_update(MPI_Comm comm) {
  if(mall->thread_comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->thread_comm));
  if(mall->comm != MPI_COMM_WORLD) MPI_Comm_free(&(mall->comm));

  MPI_Comm_dup(comm, &(mall->thread_comm));
  MPI_Comm_dup(comm, &(mall->comm));

1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
  MPI_Comm_set_name(mall->thread_comm, "MAM_THREAD");
  MPI_Comm_set_name(mall->comm, "MAM_MAIN");
}

/*
 * Converts the name of a Key to its value version
 */
int MAM_I_convert_key(char *key) {
  size_t i; 

  for(i=0; i<MAM_KEY_COUNT; i++) {
    if(strcmp(key, mam_key_names[i]) == 0) { // Equal
      return i;
    }
  }
  return MALL_DENIED;
1158
}
1159

1160
1161
1162
/*
 * TODO Por hacer
 */
1163
void MAM_I_create_user_struct(int is_children_group) {
1164
1165
1166
  user_reconf->comm = mall->tmp_comm;

  if(is_children_group) {
1167
    user_reconf->rank_state = MAM_PROC_NEW_RANK;
1168
1169
1170
1171
1172
1173
    user_reconf->numS = mall->numC;
    if(mall_conf->spawn_method == MALL_SPAWN_BASELINE) user_reconf->numT = mall->numC;
    else user_reconf->numT = mall->numC + mall->numP;
  } else {
    user_reconf->numS = mall->numP;
    user_reconf->numT = mall->numC;
1174
1175
    if(mall->zombie) user_reconf->rank_state = MAM_PROC_ZOMBIE;
    else user_reconf->rank_state = MAM_PROC_CONTINUE;
1176
1177
  }
}