CommDist.c 21.1 KB
Newer Older
iker_martin's avatar
iker_martin committed
1
2
3
4
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <string.h>
5
#include "distribution_methods/block_distribution.h"
6
#include "CommDist.h"
iker_martin's avatar
iker_martin committed
7

8
9
void prepare_redistribution(int qty, int myId, int numP, int numO, int is_children_group, int is_intercomm, char **recv, struct Counts *s_counts, struct Counts *r_counts);

10
11
12
13
void sync_rma(char *send, char *recv, struct Counts r_counts, int tamBl, MPI_Comm comm, int comm_type);
void sync_rma_lock(char *recv, struct Counts r_counts, MPI_Win win);
void sync_rma_lockall(char *recv, struct Counts r_counts, MPI_Win win);
//////////////////////////
14
15
void send_async_arrays(struct Dist_data dist_data, char *array, int numP_child, struct Counts counts, MPI_Request *comm_req);
void recv_async_arrays(struct Dist_data dist_data, char *array, int numP_parents, struct Counts counts, MPI_Request *comm_req);
iker_martin's avatar
iker_martin committed
16

17
18
void send_async_point_arrays(struct Dist_data dist_data, char *array, int numP_child, struct Counts counts, MPI_Request *comm_req);
void recv_async_point_arrays(struct Dist_data dist_data, char *array, int numP_parents, struct Counts counts, MPI_Request *comm_req);
19

iker_martin's avatar
iker_martin committed
20
void getIds_intercomm(struct Dist_data dist_data, int numP_other, int **idS);
21
22
23
24
25
26
27
28
/*
 * Reserva memoria para un vector de hasta "qty" elementos.
 * Los "qty" elementos se disitribuyen entre los "numP" procesos
 * que llaman a esta funcion.
 */
void malloc_comm_array(char **array, int qty, int myId, int numP) {
    struct Dist_data dist_data;

29
    get_block_dist(qty, myId, numP, &dist_data);
30
    if( (*array = calloc(dist_data.tamBl, sizeof(char))) == NULL) {
31
32
33
      printf("Memory Error (Malloc Arrays(%d))\n", dist_data.tamBl); 
      exit(1); 
    }
34

35
/*
36
37
38
39
        int i;
	for(i=0; i<dist_data.tamBl; i++) {
	  (*array)[i] = '!' + i + dist_data.ini;
	}
40
41
42
	
        printf("P%d Tam %d String: %s\n", myId, dist_data.tamBl, *array);
*/
43
}
44
45
46

//================================================================================
//================================================================================
47
//========================SYNCHRONOUS FUNCTIONS===================================
48
49
50
//================================================================================
//================================================================================

51
/*
52
53
54
 * Performs a communication to redistribute an array in a block distribution.
 * In the redistribution is differenciated parent group from the children and the values each group indicates can be
 * different.
55
 *
56
 * - send (IN):  Array with the data to send. This data can not be null for parents.
57
 * - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
58
 *               If the process receives data and is NULL, the behaviour is undefined.
59
60
61
62
63
64
65
66
 * - qty  (IN):  Sum of elements shared by all processes that will send data.
 * - myId (IN):  Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
 * - numP (IN):  Size of the local group. If it is a children group, this parameter must correspond to using
 *               "MPI_Comm_size(comm)". For the parents is not always the size obtained from "comm".
 * - numO (IN):  Amount of processes in the remote group. For the parents is the target quantity of processes after the 
 *               resize, while for the children is the amount of parents.
 * - is_children_group (IN): Indicates wether this MPI rank is a children(TRUE) or a parent(FALSE).
 * - comm (IN):  Communicator to use to perform the redistribution.
67
 *
68
 * returns: An integer indicating if the operation has been completed(TRUE) or not(FALSE). //FIXME In this case is always true...
69
 */
70
71
int sync_communication(char *send, char **recv, int qty, int myId, int numP, int numO, int is_children_group, int comm_type, MPI_Comm comm) {
    int is_intercomm, aux_comm_used = 0;
72
    struct Counts s_counts, r_counts;
73
74
    struct Dist_data dist_data;
    MPI_Comm aux_comm = MPI_COMM_NULL;
iker_martin's avatar
iker_martin committed
75

76
77
78
    /* PREPARE COMMUNICATION */
    MPI_Comm_test_inter(comm, &is_intercomm);
    prepare_redistribution(qty, myId, numP, numO, is_children_group, is_intercomm, recv, &s_counts, &r_counts);
79
    printf("P%d/%d Comm type=%d RMA_LOCK=%d RMA_All=%d\n", myId, numP, comm_type, MALL_RED_RMA_LOCK, MALL_RED_RMA_LOCKALL);
80

81
    /* PERFORM COMMUNICATION */
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    switch(comm_type) {

      case MALL_RED_RMA_LOCKALL:
      case MALL_RED_RMA_LOCK:
        if(is_children_group) {
          get_block_dist(qty, myId, numP, &dist_data);
	} else {
          get_block_dist(qty, myId, numO, &dist_data);
	}
        if(is_intercomm) {
          MPI_Intercomm_merge(comm, is_children_group, &aux_comm);
	  aux_comm_used = 1;
	} else { aux_comm = comm; }
        sync_rma(send, *recv, r_counts, dist_data.tamBl, aux_comm, comm_type);
	break;

      case MALL_RED_POINT:
	//TODO
      case MALL_RED_BASELINE:
      default:
        MPI_Alltoallv(send, s_counts.counts, s_counts.displs, MPI_CHAR, *recv, r_counts.counts, r_counts.displs, MPI_CHAR, comm);
	break;
    }
105

106
107
108
    if(aux_comm_used) {
      MPI_Comm_free(&aux_comm);
    } 
109
110
    freeCounts(&s_counts);
    freeCounts(&r_counts);
111
    return 1; //FIXME In this case is always true...
112
}
113

114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201

/*
 * Performs synchronous MPI-RMA operations to redistribute an array in a block distribution. Is should be called after calculating
 * how data should be redistributed
 *
 * - send (IN):  Array with the data to send. This value can not be NULL for parents.
 * - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
 *               If the process receives data and is NULL, the behaviour is undefined.
 * - r_counts (IN): Structure which describes how many elements will receive this process from each parent and the
 *               displacements.
 * - tamBl (IN): How many elements are stored in the parameter "send".
 * - comm (IN):  Communicator to use to perform the redistribution. Must be an intracommunicator as MPI-RMA requirements.
 * - comm_type (IN): Type of data redistribution to use. In this case indicates the RMA operation(Lock or LockAll).
 *
 */
void sync_rma(char *send, char *recv, struct Counts r_counts, int tamBl, MPI_Comm comm, int comm_type) {
  int aux_array_used;
  MPI_Win win;

  aux_array_used = 0;
  if(send == NULL) {
    tamBl = 1;
    send = malloc(tamBl*sizeof(char));
    aux_array_used = 1;
  }
  MPI_Win_create(send, (MPI_Aint)tamBl, sizeof(char), MPI_INFO_NULL, comm, &win);

  switch(comm_type) {
    case MALL_RED_RMA_LOCKALL:
      sync_rma_lockall(recv, r_counts, win);
      break;
    case MALL_RED_RMA_LOCK:
      sync_rma_lock(recv, r_counts, win);
      break;
  }

  MPI_Win_free(&win);
  if(aux_array_used) { 
    free(send);
    send = NULL;
  }
}



/*
 * Performs a passive MPI-RMA data redistribution for a single array using the passive epochs Lock/Unlock.
 * - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
 *               If the process receives data and is NULL, the behaviour is undefined.
 * - r_counts (IN): Structure which describes how many elements will receive this process from each parent and the
 *               displacements.
 * - win (IN):   Window to use to perform the redistribution.
 *
 */
void sync_rma_lock(char *recv, struct Counts r_counts, MPI_Win win) {
  int i, target_displs;

  target_displs = r_counts.first_target_displs;
  for(i=r_counts.idI; i<r_counts.idE; i++) {
    MPI_Win_lock(MPI_LOCK_SHARED, i, MPI_MODE_NOCHECK, win);
    MPI_Get(recv+r_counts.displs[i], r_counts.counts[i], MPI_CHAR, i, target_displs, r_counts.counts[i], MPI_CHAR, win);
    MPI_Win_unlock(i, win);
    target_displs=0;
  }
}


/*
 * Performs a passive MPI-RMA data redistribution for a single array using the passive epochs Lockall/Unlockall.
 * - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
 *               If the process receives data and is NULL, the behaviour is undefined.
 * - r_counts (IN): Structure which describes how many elements will receive this process from each parent and the
 *               displacements.
 * - win (IN):   Window to use to perform the redistribution.
 *
 */
void sync_rma_lockall(char *recv, struct Counts r_counts, MPI_Win win) {
  int i, target_displs;

  target_displs = r_counts.first_target_displs;
  MPI_Win_lock_all(MPI_MODE_NOCHECK, win);
  for(i=r_counts.idI; i<r_counts.idE; i++) {
    MPI_Get(recv+r_counts.displs[i], r_counts.counts[i], MPI_CHAR, i, target_displs, r_counts.counts[i], MPI_CHAR, win);
    target_displs=0;
  }
  MPI_Win_unlock_all(win);
}

202
203
//================================================================================
//================================================================================
204
//========================ASYNCHRONOUS FUNCTIONS==================================
205
206
207
208
209
210
211
212
213
214
215
216
//================================================================================
//================================================================================

/*
 * Realiza un envio asincrono del vector array desde este grupo de procesos al grupo
 * enlazado por el intercomunicador intercomm.
 *
 * El objeto MPI_Request se devuelve con el manejador para comprobar si la comunicacion
 * ha terminado.
 *
 * El vector array no se modifica en esta funcion.
 */
217
218
int send_async(char *array, int qty, int myId, int numP, MPI_Comm intercomm, int numP_child, MPI_Request **comm_req, int parents_wait) {
    int i;
219
220
221
222
    int *idS = NULL;
    struct Counts counts;
    struct Dist_data dist_data;

223
    get_block_dist(qty, myId, numP, &dist_data); // Distribucion de este proceso en su grupo
224
225
226
    dist_data.intercomm = intercomm;

    // Create arrays which contains info about how many elements will be send to each created process
227
    mallocCounts(&counts, numP_child);
228
229
230

    getIds_intercomm(dist_data, numP_child, &idS); // Obtener rango de Id hijos a los que este proceso manda datos

231
    // MAL_USE_THREAD sigue el camino sincrono
232
    if(parents_wait == MAL_USE_NORMAL) {
233
      //*comm_req = (MPI_Request *) malloc(sizeof(MPI_Request));
234
      *comm_req[0] = MPI_REQUEST_NULL;
235
      send_async_arrays(dist_data, array, numP_child, counts, &(*comm_req[0])); 
236

237
    } else if (parents_wait == MAL_USE_IBARRIER){
238
239
240
      //*comm_req = (MPI_Request *) malloc(2 * sizeof(MPI_Request));
      *comm_req[0] = MPI_REQUEST_NULL;
      *comm_req[1] = MPI_REQUEST_NULL;
241
      send_async_arrays(dist_data, array, numP_child, counts, &((*comm_req)[1])); 
242
      MPI_Ibarrier(intercomm, &((*comm_req)[0]) );
243
    } else if (parents_wait == MAL_USE_POINT){
244
      //*comm_req = (MPI_Request *) malloc(numP_child * sizeof(MPI_Request));
245
246
247
      for(i=0; i<numP_child; i++){
        (*comm_req)[i] = MPI_REQUEST_NULL;
      }
248
      send_async_point_arrays(dist_data, array, numP_child, counts, *comm_req); 
249
    } else if (parents_wait == MAL_USE_THREAD) { //TODO 
250
    }
251
252
253
254
255
256
257
258
259
260
261
262
263

    freeCounts(&counts);
    free(idS);

    return 1;
}

/*
 * Realiza una recepcion asincrona del vector array a este grupo de procesos desde el grupo
 * enlazado por el intercomunicador intercomm.
 *
 * El vector array se reserva dentro de la funcion y se devuelve en el mismo argumento.
 * Tiene que ser liberado posteriormente por el usuario.
264
265
266
 *
 * El argumento "parents_wait" sirve para indicar si se usará la versión en la los padres 
 * espera a que terminen de enviar, o en la que esperan a que los hijos acaben de recibir.
267
 */
268
void recv_async(char **array, int qty, int myId, int numP, MPI_Comm intercomm, int numP_parents, int parents_wait) {
269
    int *idS = NULL;
270
    int wait_err, i;
271
272
    struct Counts counts;
    struct Dist_data dist_data;
273
    MPI_Request *comm_req, aux;
274
275

    // Obtener distribución para este hijo
276
    get_block_dist(qty, myId, numP, &dist_data);
277
    *array = malloc( dist_data.tamBl * sizeof(char));
278
279
280
    dist_data.intercomm = intercomm;

    /* PREPARAR DATOS DE RECEPCION SOBRE VECTOR*/
281
    mallocCounts(&counts, numP_parents);
282
283
284

    getIds_intercomm(dist_data, numP_parents, &idS); // Obtener el rango de Ids de padres del que este proceso recibira datos

285
    // MAL_USE_THREAD sigue el camino sincrono
286
    if(parents_wait == MAL_USE_POINT) {
287
      comm_req = (MPI_Request *) malloc(numP_parents * sizeof(MPI_Request));
288
289
290
      for(i=0; i<numP_parents; i++){
        comm_req[i] = MPI_REQUEST_NULL;
      }
291
      recv_async_point_arrays(dist_data, *array, numP_parents, counts, comm_req);
292
293
      wait_err = MPI_Waitall(numP_parents, comm_req, MPI_STATUSES_IGNORE);

294
    } else if (parents_wait == MAL_USE_NORMAL || parents_wait == MAL_USE_IBARRIER) {
295
296
      comm_req = (MPI_Request *) malloc(sizeof(MPI_Request));
      *comm_req = MPI_REQUEST_NULL;
297
      recv_async_arrays(dist_data, *array, numP_parents, counts, comm_req);
298
      wait_err = MPI_Wait(comm_req, MPI_STATUS_IGNORE);
299
    } else if (parents_wait == MAL_USE_THREAD) { //TODO
300
    }
301
302
303
304
305

    if(wait_err != MPI_SUCCESS) {
      MPI_Abort(MPI_COMM_WORLD, wait_err);
    }

306
    if(parents_wait == MAL_USE_IBARRIER) { //MAL USE IBARRIER END
307
308
309
      MPI_Ibarrier(intercomm, &aux);
      MPI_Wait(&aux, MPI_STATUS_IGNORE); //Es necesario comprobar que la comunicación ha terminado para desconectar los grupos de procesos
    }
310

311
    //printf("S%d Tam %d String: %s END\n", myId, dist_data.tamBl, *array);
312
313
    freeCounts(&counts);
    free(idS);
314
    free(comm_req);
315
316
317
318
319
320
}

/*
 * Envia a los hijos un vector que es redistribuido a los procesos
 * hijos. Antes de realizar la comunicacion, cada proceso padre calcula sobre que procesos
 * del otro grupo se transmiten elementos.
321
322
 *
 * El envio se realiza a partir de una comunicación colectiva.
323
 */
324
void send_async_arrays(struct Dist_data dist_data, char *array, int numP_child, struct Counts counts, MPI_Request *comm_req) {
325

326
    prepare_comm_alltoall(dist_data.myId, dist_data.numP, numP_child, dist_data.qty, &counts);
327
328
329
330
331

    /* COMUNICACION DE DATOS */
    MPI_Ialltoallv(array, counts.counts, counts.displs, MPI_CHAR, NULL, counts.zero_arr, counts.zero_arr, MPI_CHAR, dist_data.intercomm, comm_req);
}

332
333
334
335
336
337
338
/*
 * Envia a los hijos un vector que es redistribuido a los procesos
 * hijos. Antes de realizar la comunicacion, cada proceso padre calcula sobre que procesos
 * del otro grupo se transmiten elementos.
 *
 * El envio se realiza a partir de varias comunicaciones punto a punto.
 */
339
void send_async_point_arrays(struct Dist_data dist_data, char *array, int numP_child, struct Counts counts, MPI_Request *comm_req) {
340
341
    int i;
    // PREPARAR ENVIO DEL VECTOR
342
343
    prepare_comm_alltoall(dist_data.myId, dist_data.numP, numP_child, dist_data.qty, &counts);

344
345
346
347
    for(i=0; i<numP_child; i++) { //TODO Esta propuesta ya no usa el IdI y Ide
      if(counts.counts[0] != 0) {
        MPI_Isend(array+counts.displs[i], counts.counts[i], MPI_CHAR, i, 99, dist_data.intercomm, &(comm_req[i]));
      }
348
    }
349
    //print_counts(dist_data, counts.counts, counts.displs, numP_child, "Padres");
350
351
}

352
353
354
355
/*
 * Recibe de los padres un vector que es redistribuido a los procesos
 * de este grupo. Antes de realizar la comunicacion cada hijo calcula sobre que procesos
 * del otro grupo se transmiten elementos.
356
357
 *
 * La recepcion se realiza a partir de una comunicacion colectiva.
358
 */
359
void recv_async_arrays(struct Dist_data dist_data, char *array, int numP_parents, struct Counts counts, MPI_Request *comm_req) {
360
361
362
    char *aux = malloc(1);

    // Ajustar los valores de recepcion
363
    prepare_comm_alltoall(dist_data.myId, dist_data.numP, numP_parents, dist_data.qty, &counts);
364
    //print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos");
365
366
367
368

    /* COMUNICACION DE DATOS */
    MPI_Ialltoallv(aux, counts.zero_arr, counts.zero_arr, MPI_CHAR, array, counts.counts, counts.displs, MPI_CHAR, dist_data.intercomm, comm_req);
    free(aux);
iker_martin's avatar
iker_martin committed
369
370
}

371
372
373
374
375
376
377
/*
 * Recibe de los padres un vector que es redistribuido a los procesos
 * de este grupo. Antes de realizar la comunicacion cada hijo calcula sobre que procesos
 * del otro grupo se transmiten elementos.
 *
 * La recepcion se realiza a partir de varias comunicaciones punto a punto.
 */
378
void recv_async_point_arrays(struct Dist_data dist_data, char *array, int numP_parents, struct Counts counts, MPI_Request *comm_req) {
379
380
381
    int i;

    // Ajustar los valores de recepcion
382
    prepare_comm_alltoall(dist_data.myId, dist_data.numP, numP_parents, dist_data.qty, &counts);
383

384
385
386
387
    for(i=0; i<numP_parents; i++) { //TODO Esta propuesta ya no usa el IdI y Ide
      if(counts.counts[0] != 0) {
        MPI_Irecv(array+counts.displs[i], counts.counts[i], MPI_CHAR, i, 99, dist_data.intercomm, &(comm_req[i])); //FIXME BUffer recv
      }
388
389
390
391
    }
    //print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos");
}

iker_martin's avatar
iker_martin committed
392
393
394
395
396
397
398
399
/*
 * ========================================================================================
 * ========================================================================================
 * ================================DISTRIBUTION FUNCTIONS==================================
 * ========================================================================================
 * ========================================================================================
*/

400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
/*
 * Performs a communication to redistribute an array in a block distribution. For each process calculates
 * how many elements sends/receives to other processes for the new group.
 *
 * - qty  (IN):  Sum of elements shared by all processes that will send data.
 * - myId (IN):  Rank of the MPI process in the local communicator. For the parents is not the rank obtained from "comm".
 * - numP (IN):  Size of the local group. If it is a children group, this parameter must correspond to using
 *               "MPI_Comm_size(comm)". For the parents is not always the size obtained from "comm".
 * - numO (IN):  Amount of processes in the remote group. For the parents is the target quantity of processes after the 
 *               resize, while for the children is the amount of parents.
 * - is_children_group (IN): Indicates wether this MPI rank is a children(TRUE) or a parent(FALSE).
 * - is_intercomm (IN): Indicates wether the used communicator is a intercomunicator(TRUE) or intracommunicator(FALSE).
 * - recv (OUT): Array where data will be written. A NULL value is allowed if the process is not going to receive data.
 *               process receives data and is NULL, the behaviour is undefined.
 * - s_counts (OUT): Struct where is indicated how many elements sends this process to processes in the new group.
 * - r_counts (OUT): Struct where is indicated how many elements receives this process from other processes in the previous group.
 *
 * returns: An integer indicating if the operation has been completed(TRUE) or not(FALSE). //FIXME In this case is always true...
 */
void prepare_redistribution(int qty, int myId, int numP, int numO, int is_children_group, int is_intercomm, char **recv, struct Counts *s_counts, struct Counts *r_counts) {
  struct Dist_data dist_data;

  if(is_children_group) {
    mallocCounts(s_counts, numO);
    prepare_comm_alltoall(myId, numP, numO, qty, r_counts);
    // Obtener distribución para este hijo
    get_block_dist(qty, myId, numP, &dist_data);
    *recv = malloc(dist_data.tamBl * sizeof(char));
428
429
get_block_dist(qty, myId, numP, &dist_data);
print_counts(dist_data, r_counts->counts, r_counts->displs, numO, 1, "Children C");
430
  } else {
431
get_block_dist(qty, myId, numP, &dist_data);
432
433
434
435
436
437
438
439
440
441
442
443
444
    prepare_comm_alltoall(myId, numP, numO, qty, s_counts);

    if(is_intercomm) {
      mallocCounts(r_counts, numO);
    } else {
      if(myId < numO) {
        prepare_comm_alltoall(myId, numO, numP, qty, r_counts);
        // Obtener distribución para este hijo
        get_block_dist(qty, myId, numO, &dist_data);
        *recv = malloc(dist_data.tamBl * sizeof(char));
      } else {
        mallocCounts(r_counts, numP);
      }	
445
print_counts(dist_data, r_counts->counts, r_counts->displs, numP, 1, "Children P ");
446
    }
447
print_counts(dist_data, s_counts->counts, s_counts->displs, numO, 1, "Parents ");
448
449
450
451
452
  }
}



iker_martin's avatar
iker_martin committed
453
454
455
456
457
458
459
460
461
462
463
/*
 * Obtiene para un proceso de un grupo a que rango procesos de 
 * otro grupo tiene que enviar o recibir datos.
 *
 * Devuelve el primer identificador y el último (Excluido) con el que
 * comunicarse.
 */
void getIds_intercomm(struct Dist_data dist_data, int numP_other, int **idS) {
    int idI, idE;
    int tamOther = dist_data.qty / numP_other;
    int remOther = dist_data.qty % numP_other;
464
465
466
    // Indica el punto de corte del grupo de procesos externo que 
    // divide entre los procesos que tienen 
    // un tamaño tamOther + 1 y un tamaño tamOther
iker_martin's avatar
iker_martin committed
467
468
    int middle = (tamOther + 1) * remOther;

469
470
471
    // Calcular idI teniendo en cuenta si se comunica con un
    // proceso con tamano tamOther o tamOther+1
    if(middle > dist_data.ini) { // First subgroup (tamOther+1)
iker_martin's avatar
iker_martin committed
472
      idI = dist_data.ini / (tamOther + 1);
473
    } else { // Second subgroup (tamOther)
iker_martin's avatar
iker_martin committed
474
475
476
      idI = ((dist_data.ini - middle) / tamOther) + remOther;
    }

477
478
479
    // Calcular idR teniendo en cuenta si se comunica con un
    // proceso con tamano tamOther o tamOther+1
    if(middle >= dist_data.fin) { // First subgroup (tamOther +1)
iker_martin's avatar
iker_martin committed
480
481
      idE = dist_data.fin / (tamOther + 1);
      idE = (dist_data.fin % (tamOther + 1) > 0 && idE+1 <= numP_other) ? idE+1 : idE;
482
    } else { // Second subgroup (tamOther)
iker_martin's avatar
iker_martin committed
483
484
485
486
      idE = ((dist_data.fin - middle) / tamOther) + remOther;
      idE = ((dist_data.fin - middle) % tamOther > 0 && idE+1 <= numP_other) ? idE+1 : idE;
    }

487
    *idS = malloc(2 * sizeof(int));
iker_martin's avatar
iker_martin committed
488
489
490
    (*idS)[0] = idI;
    (*idS)[1] = idE;
}