MAM_RMS.c 8.74 KB
Newer Older
1
2
3
4
5
6
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sched.h>
7
#include <mpi.h>
8
9
#include "MAM_RMS.h"
#include "MAM_DataStructures.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59


#if USE_MAL_SLURM
#include <slurm/slurm.h>
int MAM_I_slurm_getenv_hosts_info();
int MAM_I_slurm_getjob_hosts_info();
#endif

int MAM_I_get_hosts_info();
int GetCPUCount();

void MAM_check_hosts() {
  int not_filled = 1;
  
  #if USE_MAL_SLURM
    not_filled = MAM_I_slurm_getenv_hosts_info();
    if(not_filled) {
      if(mall->nodelist != NULL) {
        free(mall->nodelist);
	mall->nodelist = NULL;
      }

      not_filled = MAM_I_slurm_getjob_hosts_info();
    }
  #endif
  if(not_filled) {
    if(mall->nodelist != NULL) {
      free(mall->nodelist);
      mall->nodelist = NULL;
    }

    not_filled = MAM_I_get_hosts_info();
  }


  if(not_filled) {
    if(mall->myId == mall->root) printf("MAM FATAL ERROR: It has not been possible to obtain the nodelist\n");
    fflush(stdout);
    MPI_Abort(mall->comm, -50);
  }

  #if USE_MAL_DEBUG >= 2
    if(mall->myId == mall->root) {
      DEBUG_FUNC("Obtained Nodelist", mall->myId, mall->numP); 
      printf("NODELIST: %s\nNODE_COUNT: %d NUM_CPUS_PER_NODE: %d\n", mall->nodelist, mall->num_nodes, mall->num_cpus);
      fflush(stdout); 
    }
  #endif
}

60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
 * @brief Get if a group of processes uses an internode comunicator
 *
 * This function checks the physical distribution of all ranks in the
 * original communicator passed to MaM. If all of them reside in the
 * same host, false is returned. True is returned otherwise.
 *
 * @return Integer indicating if more than one node is used by the
 * original communicator (>0) or only one (0).
 */
int MAM_Is_internode_group() {
  int i, name_len, max_name_len, unique_count;
  int myId, numP;
  char *my_host, *all_hosts, *tested_host;

  MPI_Comm_rank(mall->original_comm, &myId);
  MPI_Comm_size(mall->original_comm, &numP);

  unique_count = 0; //First node is not counted
  if(numP == 1) return unique_count;

  all_hosts = NULL;
  my_host = (char *) malloc(MPI_MAX_PROCESSOR_NAME * sizeof(char));
  MPI_Get_processor_name(my_host, &name_len);

  MPI_Allreduce(&name_len, &max_name_len, 1, MPI_INT, MPI_MAX, mall->original_comm);
  my_host[max_name_len] = '\0';
  max_name_len++; // Len does not consider terminating character
88
  if(myId == MAM_ROOT) {
89
90
91
    all_hosts = (char *) malloc(numP * max_name_len * sizeof(char));
  }
  //FIXME Should be a Gatherv as each host could have unitialised chars between name_len and max_name_len
92
  MPI_Gather(my_host, max_name_len, MPI_CHAR, all_hosts, max_name_len, MPI_CHAR, MAM_ROOT, mall->original_comm);
93

94
  if(myId == MAM_ROOT) {
95
96
97
98
99
100
101
102
103
    for (i = 1; i < numP; i++) {
      tested_host = all_hosts + (i * max_name_len);
      if (strcmp(my_host, tested_host) != 0) {
        unique_count++;
        break;
      }
    }
    free(all_hosts);
  }
104
  MPI_Bcast(&unique_count, 1, MPI_INT, MAM_ROOT, mall->original_comm);
105
106
107
108
  free(my_host);
  return unique_count;
}

109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
/*
 * TODO
 * FIXME Does not consider heterogenous machines for num_cpus
 * FIXME Always returns 0... -- Perform error checking?
 */
int MAM_I_get_hosts_info() {
  int i, j, name_len, max_name_len, unique_count, *unique_hosts;
  char *my_host, *all_hosts, *confirmed_host, *tested_host;

  all_hosts = NULL;
  my_host = (char *) malloc(MPI_MAX_PROCESSOR_NAME * sizeof(char));
  MPI_Get_processor_name(my_host, &name_len);

  MPI_Allreduce(&name_len, &max_name_len, 1, MPI_INT, MPI_MAX, mall->comm);
  my_host[max_name_len] = '\0';
  max_name_len++; // Len does not consider terminating character
  if(mall->myId == mall->root) {
    all_hosts = (char *) malloc(mall->numP * max_name_len * sizeof(char));
    unique_hosts = (int *) malloc(mall->numP * sizeof(int));
    unique_hosts[0] = 0; //First host will always be unique
    unique_count = 1;
  }
131
  //FIXME Should be a Gatherv as each host could have unitialised chars between name_len and max_name_len
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
  MPI_Gather(my_host, max_name_len, MPI_CHAR, all_hosts, max_name_len, MPI_CHAR, mall->root, mall->comm);

  if(mall->myId == mall->root) {
    for (i = 1; i < mall->numP; i++) {
      for (j = 0; j < unique_count; j++) {
	tested_host = all_hosts + (i * max_name_len);
	confirmed_host = all_hosts + (unique_hosts[j] * max_name_len);
        if (strcmp(tested_host, confirmed_host) != 0) {
	  unique_hosts[unique_count] = i;
          unique_count++;
          break;
        }
      }
    }

    mall->num_nodes = unique_count;
    mall->num_cpus = GetCPUCount(); 
    mall->nodelist_len = unique_count*max_name_len;
    mall->nodelist = (char *) malloc(mall->nodelist_len * sizeof(char));

    strcpy(mall->nodelist, ""); //FIXME Strcat can be very inneficient...
    for (i = 0; i < unique_count; i++) {
      confirmed_host = all_hosts + (unique_hosts[i] * max_name_len);
      strcat(mall->nodelist, confirmed_host);
      if (i < unique_count - 1) {
        strcat(mall->nodelist, ",");
      }
    }

    free(all_hosts);
    free(unique_hosts);
  }

  free(my_host);
  return 0;
}

/*
 * @brief Get the total number of CPUs available to the process.
 *
 * This function uses sched_getaffinity to obtain the CPU affinity of the current process
 * and counts the number of CPUs in the affinity set. It adjusts the loop based on the
 * maximum number of CPUs allowed on the system.
 *
 * @return The total number of CPUs available to the process.
 *
 * Code obtained from: https://stackoverflow.com/questions/4586405/how-to-get-the-number-of-cpus-in-linux-using-c
 * The code has been slightly modified.
 */
int GetCPUCount() {
  cpu_set_t cs;
  CPU_ZERO(&cs);
  sched_getaffinity(0, sizeof(cs), &cs);

  int count = 0;
  int max_cpus = sysconf(_SC_NPROCESSORS_ONLN);

  for (int i = 0; i < max_cpus; i++) {
      if (CPU_ISSET(i, &cs)) {
          count++;
      } else {
          break;
      }
  }
  return count;
}

#if USE_MAL_SLURM
/*
 * TODO
 */
int MAM_I_slurm_getenv_hosts_info() {
204
  char *tmp = NULL, *tmp_copy, *token;
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
  int cpus, count;
  //int i, *cpus_counts, *nodes_counts, *aux;
  
  tmp = getenv("SLURM_JOB_NUM_NODES");
  if(tmp == NULL) return 1;
  mall->num_nodes = atoi(tmp);
  tmp = NULL;

  tmp = getenv("SLURM_JOB_NODELIST");
  if(tmp == NULL) return 1;
  mall->nodelist_len = strlen(tmp)+1;
  mall->nodelist = (char *) malloc(mall->nodelist_len * sizeof(char));
  strcpy(mall->nodelist, tmp);
  tmp = NULL;


221
222
  //EXAMPLE - SLURM_JOB_CPUS_PER_NODE='72(x2),36'
  //It indicates two nodes have 72 CPUs each and third has 36 cpus
223
224
  tmp = getenv("SLURM_JOB_CPUS_PER_NODE");
  if(tmp == NULL) return 1;
225
226
227
228

  tmp_copy = (char *) malloc((strlen(tmp)+1) * sizeof(char));
  strcpy(tmp_copy, tmp);
  token = strtok(tmp_copy, ",");
229
230
231
232
233
234
235
  //TODO When MaM considers heteregenous allocations, these will be needed instead of num_cpus.
  //cpus_counts = (int *) malloc(mall->num_nodes * sizeof(int));
  //nodes_counts = (int *) malloc(mall->num_nodes * sizeof(int));
  //i = 0;
  mall->num_cpus = 0;

  while (token != NULL) {
236
237
238
239
240
    // If actual token contains only one node, the second portion
    // does not appear and sscanf does not modify "count"
    // First portion --> "%d"
    // Second portion -> "(x%d)"
    count = 1;
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
    if (sscanf(token, "%d(x%d)", &cpus, &count) >= 1) {
      mall->num_cpus = cpus; // num_cpus stores the amount of cores per cpu
      //cpus_per_node[i] = cpus;
      //nodes_count[i] = count;
      //i++;
    }
    token = strtok(NULL, ",");
  }
  /*
  if(i < mall->num_nodes) {
    aux = (int *) realloc(cpus_per_node, i * sizeof(int));
    if(cpus_per_node != aux && cpus_per_node != NULL) free(cpus_per_node);
    cpus_per_node = aux;

    aux = (int *) realloc(nodes_counts, i * sizeof(int));
    if(nodes_count != aux && nodes_count != NULL) free(nodes_count);
    nodes_count = aux;
  }
  */

261
  free(tmp_copy);
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
  return 0;
}

/*
 * TODO
 * FIXME Does not consider heterogenous machines
 */
int MAM_I_slurm_getjob_hosts_info() {
  int jobId, err;
  char *tmp = NULL;
  job_info_msg_t *j_info;
  slurm_job_info_t last_record;

  tmp = getenv("SLURM_JOB_ID");
  if(tmp == NULL) return 1;
  jobId = atoi(tmp);

  err = slurm_load_job(&j_info, jobId, 1);
  if(err) return err;
  last_record = j_info->job_array[j_info->record_count - 1];

  mall->num_nodes = last_record.num_nodes;
  mall->num_cpus = last_record.num_cpus;

  mall->nodelist_len = strlen(last_record.nodes)+1;
  mall->nodelist = (char *) malloc(mall->nodelist_len * sizeof(char));
  strcpy(mall->nodelist, last_record.nodes);

  slurm_free_job_info_msg(j_info);
  return 0;
}
#endif
294
295
296
297
298
299
300
301
302
303
304
305
306

//TODO REFACTOR PARA CUANDO SE COMUNIQUE CON RMS
    // Get Slurm job info
    //int jobId;
    //char *tmp;
    //job_info_msg_t *j_info;
    //slurm_job_info_t last_record;
    //tmp = getenv("SLURM_JOB_ID");
    //jobId = atoi(tmp);
    //slurm_load_job(&j_info, jobId, 1);
    //last_record = j_info->job_array[j_info->record_count - 1];
    // Free JOB INFO
    //slurm_free_job_info_msg(j_info);