Commit f1511cb4 authored by Iker Martín Álvarez's avatar Iker Martín Álvarez
Browse files

Merge branch 'RMA-Distributions' into 'dev'

RMA functionality and refactor of many of the codes

See merge request martini/malleability_benchmark!4
parents 2f81e29c 6633cd95
......@@ -18,14 +18,14 @@ void indicate_ending_malleability(int new_outside_state);
int malleability_checkpoint();
void set_benchmark_grp(int grp);
void set_malleability_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int comm_type, int comm_threaded);
void set_malleability_configuration(int spawn_method, int spawn_strategies, int spawn_dist, int red_method, int red_strategies);
void set_children_number(int numC); // TODO TO BE DEPRECATED
void get_malleability_user_comm(MPI_Comm *comm);
void malleability_add_data(void *data, size_t total_qty, int type, int is_replicated, int is_constant);
void malleability_modify_data(void *data, size_t index, size_t total_qty, int type, int is_replicated, int is_constant);
void malleability_get_entries(size_t *entries, int is_replicated, int is_constant);
void malleability_get_data(void **data, int index, int is_replicated, int is_constant);
void malleability_get_data(void **data, size_t index, int is_replicated, int is_constant);
void set_benchmark_configuration(configuration *config_file);
void get_benchmark_configuration(configuration **config_file);
......
......@@ -5,21 +5,6 @@
#include <stdlib.h>
//States
/*
#define MAL_UNRESERVED -1
#define MAL_DENIED -2
#define MAL_ZOMBIE -3
#define MAL_NOT_STARTED 0
#define MAL_SPAWN_PENDING 1
#define MAL_SPAWN_SINGLE_START 2
#define MAL_SPAWN_SINGLE_PENDING 3
#define MAL_SPAWN_ADAPT_POSTPONE 4
#define MAL_SPAWN_COMPLETED 5
#define MAL_DIST_PENDING 6
#define MAL_DIST_COMPLETED 7
#define MAL_DIST_ADAPTED 8
*/
#define MALL_DENIED -1
enum mall_states{MALL_UNRESERVED, MALL_NOT_STARTED, MALL_ZOMBIE, MALL_SPAWN_PENDING, MALL_SPAWN_SINGLE_PENDING,
MALL_SPAWN_SINGLE_COMPLETED, MALL_SPAWN_ADAPT_POSTPONE, MALL_SPAWN_COMPLETED, MALL_DIST_PENDING, MALL_DIST_COMPLETED,
......@@ -28,39 +13,19 @@ enum mall_spawn_methods{MALL_SPAWN_BASELINE, MALL_SPAWN_MERGE};
#define MALL_SPAWN_PTHREAD 2
#define MALL_SPAWN_SINGLE 3
enum mall_redistribution_methods{MALL_RED_BASELINE, MALL_RED_POINT, MALL_RED_RMA_LOCK, MALL_RED_RMA_LOCKALL};
#define MALL_RED_THREAD 2
#define MALL_RED_IBARRIER 3
#define MALLEABILITY_ROOT 0
#define MAL_APP_EXECUTING 0
#define MAL_APP_ENDED 1
// TODO Refactor
/*
#define COMM_PHY_SPREAD 1
#define COMM_PHY_COMPACT 2
*/
/*
// SPAWN METHODS
#define COMM_SPAWN_SERIAL 0
#define COMM_SPAWN_PTHREAD 1
#define COMM_SPAWN_MERGE 2
#define COMM_SPAWN_MERGE_PTHREAD 3
//#define COMM_SPAWN_BASELINE 0
//#define COMM_SPAWN_MERGE 1
//SPAWN STRATEGIES
#define COMM_SPAWN_MULTIPLE 0
#define COMM_SPAWN_SINGLE 1
//#define COMM_SPAWN_PTHREAD 2
//#define COMM_SPAWN_SINGLE 3
*/
#define MAL_USE_NORMAL 0
#define MAL_USE_IBARRIER 1
#define MAL_USE_POINT 2
#define MAL_USE_THREAD 3
//TODO DEPRECATE
#define MAL_INT 0
#define MAL_CHAR 1
////////////////
#define MALLEABILITY_CHILDREN 1
#define MALLEABILITY_NOT_CHILDREN 0
......
......@@ -32,6 +32,7 @@ void add_data(void *data, size_t total_qty, int type, size_t request_qty, mallea
data_struct->qty[data_struct->entries] = total_qty;
data_struct->types[data_struct->entries] = type;
data_struct->arrays[data_struct->entries] = data;
data_struct->request_qty[data_struct->entries] = request_qty;
if(request_qty) {
data_struct->requests[data_struct->entries] = (MPI_Request *) malloc(request_qty * sizeof(MPI_Request));
......@@ -62,6 +63,7 @@ void modify_data(void *data, size_t index, size_t total_qty, int type, size_t re
data_struct->qty[index] = total_qty;
data_struct->types[index] = type;
data_struct->arrays[index] = data;
data_struct->request_qty[index] = request_qty;
if(request_qty) {
data_struct->requests[index] = (MPI_Request *) malloc(request_qty * sizeof(MPI_Request));
......@@ -80,7 +82,7 @@ void modify_data(void *data, size_t index, size_t total_qty, int type, size_t re
*/
void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, int is_children_group, int myId, int root, MPI_Comm intercomm) {
int is_intercomm, rootBcast = MPI_PROC_NULL;
size_t i;
size_t i, j;
MPI_Datatype entries_type, struct_type;
......@@ -95,7 +97,7 @@ void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *d
def_malleability_entries(data_struct_dist, data_struct_rep, &entries_type);
MPI_Bcast(MPI_BOTTOM, 1, entries_type, rootBcast, intercomm);
if(is_children_group && ( data_struct_rep->entries != 0 || data_struct_dist->entries != 0 )) {
if(is_children_group && ( data_struct_rep->entries != 0 || data_struct_dist->entries != 0 )) { //FIXME Que pasa si ambos valores son 0?
init_malleability_data_struct(data_struct_rep, data_struct_rep->entries);
init_malleability_data_struct(data_struct_dist, data_struct_dist->entries);
}
......@@ -104,17 +106,19 @@ void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *d
MPI_Bcast(MPI_BOTTOM, 1, struct_type, rootBcast, intercomm);
if(is_children_group) {
/*
size_t request_qty = 1; // TODO Obtener desde la funcion
data_struct_rep->requests[data_struct_rep->entries] = (MPI_Request *) malloc(request_qty * sizeof(MPI_Request));
data_struct_dist->requests[data_struct_dist->entries] = (MPI_Request *) malloc(request_qty * sizeof(MPI_Request));
*/
for(i=0; i < data_struct_rep->entries; i++) {
data_struct_rep->arrays[i] = (void *) malloc(data_struct_rep->qty[i] * sizeof(int)); //TODO Tener en cuenta que no siempre es int
data_struct_rep->requests[i] = (MPI_Request *) malloc(data_struct_rep->request_qty[i] * sizeof(MPI_Request));
for(j=0; j < data_struct_rep->request_qty[i]; j++) {
data_struct_rep->requests[i][j] = MPI_REQUEST_NULL;
}
}
for(i=0; i < data_struct_dist->entries; i++) {
data_struct_dist->arrays[i] = (void *) malloc(data_struct_dist->qty[i] * sizeof(int)); //TODO Tener en cuenta que no siempre es int
data_struct_dist->arrays[i] = (void *) NULL;
data_struct_dist->requests[i] = (MPI_Request *) malloc(data_struct_dist->request_qty[i] * sizeof(MPI_Request));
for(j=0; j < data_struct_dist->request_qty[i]; j++) {
data_struct_dist->requests[i][j] = MPI_REQUEST_NULL;
}
}
}
......@@ -134,13 +138,20 @@ void comm_data_info(malleability_data_t *data_struct_rep, malleability_data_t *d
* "size" elementos.
*/
void init_malleability_data_struct(malleability_data_t *data_struct, size_t size) {
size_t i;
data_struct->max_entries = size;
data_struct->qty = (size_t *) malloc(size * sizeof(size_t));
data_struct->types = (int *) malloc(size * sizeof(int));
data_struct->request_qty = (size_t *) malloc(size * sizeof(size_t));
data_struct->requests = (MPI_Request **) malloc(size * sizeof(MPI_Request *));
data_struct->windows = (MPI_Win *) malloc(size * sizeof(MPI_Win));
data_struct->arrays = (void **) malloc(size * sizeof(void *));
data_struct->request_ibarrier = MPI_REQUEST_NULL;
for(i=0; i<size; i++) { //calloc and memset does not ensure a NULL value
data_struct->requests[i] = NULL;
data_struct->arrays[i] = NULL;
}
}
/*
......@@ -149,37 +160,46 @@ void init_malleability_data_struct(malleability_data_t *data_struct, size_t size
* a las ya existentes.
*/
void realloc_malleability_data_struct(malleability_data_t *data_struct, size_t qty_to_add) {
size_t needed, *qty_aux;
size_t i, needed, *qty_aux, *request_qty_aux;
int *types_aux;
MPI_Win *windows_aux;
MPI_Request **requests_aux;
void **arrays_aux;
needed = data_struct->max_entries + qty_to_add;
qty_aux = (size_t *) realloc(data_struct->qty, needed * sizeof(int));
types_aux = (int *) realloc(data_struct->types, needed * sizeof(int));
request_qty_aux = (size_t *) realloc(data_struct->request_qty, needed * sizeof(int));
requests_aux = (MPI_Request **) realloc(data_struct->requests, needed * sizeof(MPI_Request *));
windows_aux = (MPI_Win *) realloc(data_struct->windows, needed * sizeof(MPI_Win));
arrays_aux = (void **) realloc(data_struct->arrays, needed * sizeof(void *));
if(qty_aux == NULL || arrays_aux == NULL || requests_aux == NULL || types_aux == NULL) {
if(qty_aux == NULL || arrays_aux == NULL || requests_aux == NULL || types_aux == NULL || request_qty_aux == NULL || windows_aux == NULL) {
fprintf(stderr, "Fatal error - No se ha podido realojar la memoria constante de datos a redistribuir/comunicar\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
for(i=data_struct->max_entries; i<needed; i++) { //realloc does not ensure a NULL value
requests_aux[i] = NULL;
arrays_aux[i] = NULL;
}
data_struct->qty = qty_aux;
data_struct->types = types_aux;
data_struct->request_qty = request_qty_aux;
data_struct->requests = requests_aux;
data_struct->windows = windows_aux;
data_struct->arrays = arrays_aux;
data_struct->max_entries = needed;
}
void free_malleability_data_struct(malleability_data_t *data_struct) {
size_t i, max;
size_t i, j, max;
max = data_struct->entries;
if(max != 0) {
for(i=0; i<max; i++) {
//free(data_struct->arrays[i]); //FIXME Valores alojados con 1 elemento no se liberan?
//free(data_struct->requests[i]); //TODO Plantear como crearlos
}
if(data_struct->qty != NULL) {
......@@ -188,9 +208,26 @@ void free_malleability_data_struct(malleability_data_t *data_struct) {
if(data_struct->types != NULL) {
free(data_struct->types);
}
if(data_struct->requests != NULL) {
free(data_struct->requests);
if(data_struct->requests != NULL && data_struct->request_qty != NULL) {
for(i=0; i<max; i++) {
if(data_struct->requests[i] != NULL) {
for(j=0; j<data_struct->request_qty[i]; j++) {
if(data_struct->requests[i][j] != MPI_REQUEST_NULL) {
MPI_Request_free(&(data_struct->requests[i][j]));
data_struct->requests[i][j] = MPI_REQUEST_NULL;
}
}
free(data_struct->requests[i]);
}
}
free(data_struct->request_qty);
free(data_struct->requests);
}
if(data_struct->windows != NULL) {
free(data_struct->windows);
}
if(data_struct->arrays != NULL) {
free(data_struct->arrays);
}
......@@ -232,20 +269,22 @@ void def_malleability_entries(malleability_data_t *data_struct_rep, malleability
* TODO Refactor?
*/
void def_malleability_qty_type(malleability_data_t *data_struct_rep, malleability_data_t *data_struct_dist, MPI_Datatype *new_type) {
int counts = 4;
int counts = 6;
int blocklengths[counts];
MPI_Aint displs[counts];
MPI_Datatype types[counts];
types[0] = types[2] = MPI_UNSIGNED_LONG;
types[1] = types[3] = MPI_INT;
blocklengths[0] = blocklengths[1] = data_struct_rep->entries;
blocklengths[2] = blocklengths[3] = data_struct_dist->entries;
types[0] = types[1] = types[3] = types[4] = MPI_UNSIGNED_LONG;
types[2] = types[5] = MPI_INT;
blocklengths[0] = blocklengths[1] = blocklengths[2] = data_struct_rep->entries;
blocklengths[3] = blocklengths[4] = blocklengths[5] = data_struct_dist->entries;
MPI_Get_address((data_struct_rep->qty), &displs[0]);
MPI_Get_address((data_struct_rep->types), &displs[1]);
MPI_Get_address((data_struct_dist->qty), &displs[2]);
MPI_Get_address((data_struct_dist->types), &displs[3]);
MPI_Get_address((data_struct_rep->request_qty), &displs[1]);
MPI_Get_address((data_struct_rep->types), &displs[2]);
MPI_Get_address((data_struct_dist->qty), &displs[3]);
MPI_Get_address((data_struct_dist->request_qty), &displs[4]);
MPI_Get_address((data_struct_dist->types), &displs[5]);
MPI_Type_create_struct(counts, blocklengths, displs, types, new_type);
MPI_Type_commit(new_type);
......
......@@ -13,13 +13,14 @@
typedef struct {
size_t entries; // Indica numero de vectores a comunicar (replicated data)
size_t max_entries;
MPI_Request request_ibarrier; // Request para indicar que los padres esperan a que los hijos terminen de recibir
size_t *qty; // Indica numero de elementos en cada subvector de sync_array
int *types;
// Vector de vectores de request. En cada elemento superior se indican los requests a comprobar para dar por finalizada
// la comunicacion de ese dato
size_t *request_qty;
MPI_Request **requests;
MPI_Win *windows;
void **arrays; // Cada subvector es una serie de datos a comunicar
} malleability_data_t;
......
......@@ -40,13 +40,12 @@ void zombies_collect_suspended(MPI_Comm comm, int myId, int numP, int numC, int
free(pids_counts);
free(pids_displs);
// FIXME No deberia estar aqui
// Needed to ensure iteration times are collected before suspending these processes
results_data *results = (results_data *) results_void;
compute_results_iter(results, myId, numP,root, comm);
compute_results_stages(results, myId, numP, n_stages, root, comm);
if(myId >= numC) {
// FIXME No deberia estar aqui
// Needed to ensure iteration times are collected before suspending these processes
results_data *results = (results_data *) results_void;
compute_results_iter(results, myId, numP,root, comm);
compute_results_stages(results, myId, numP, n_stages, root, comm);
zombies_suspend();
}
}
......
......@@ -215,6 +215,7 @@ void set_spawn_configuration(char *cmd, int num_cpus, int num_nodes, char *nodel
init_spawn_state();
}
spawn_data->mapping = MPI_INFO_NULL;
if(spawn_data->myId == spawn_data->root) {
physical_struct_create(target_qty, spawn_data->already_created, num_cpus, num_nodes, nodelist, type_dist, MALL_DIST_STRING, &(spawn_data->dist));
......@@ -225,7 +226,6 @@ void set_spawn_configuration(char *cmd, int num_cpus, int num_nodes, char *nodel
} else {
spawn_data->cmd = malloc(1 * sizeof(char));
spawn_data->mapping = MPI_INFO_NULL; //It is only needed for the root process
}
}
......@@ -290,10 +290,10 @@ void deallocate_spawn_data() {
* Cuando termina, modifica la variable global para indicar este cambio
*/
void generic_spawn(MPI_Comm *child, int data_stage) {
int local_state;
int local_state, aux_state;
// WORK
if(spawn_data->myId == spawn_data->root) { //SET MAPPING
if(spawn_data->myId == spawn_data->root && spawn_data->spawn_qty > 0) { //SET MAPPING FOR NEW PROCESSES
processes_dist(spawn_data->dist, &(spawn_data->mapping));
}
switch(spawn_data->spawn_method) {
......@@ -306,7 +306,10 @@ void generic_spawn(MPI_Comm *child, int data_stage) {
}
// END WORK
end_time = MPI_Wtime();
set_spawn_state(local_state, spawn_data->spawn_is_async);
aux_state = get_spawn_state(spawn_data->spawn_is_async);
if(!(aux_state == MALL_SPAWN_PENDING && local_state == MALL_SPAWN_ADAPT_POSTPONE)) {
set_spawn_state(local_state, spawn_data->spawn_is_async);
}
}
......@@ -345,7 +348,7 @@ void* thread_work() {
generic_spawn(returned_comm, MALL_NOT_STARTED);
local_state = get_spawn_state(MALL_SPAWN_PTHREAD);
if(local_state == MALL_SPAWN_ADAPT_POSTPONE) {
if(local_state == MALL_SPAWN_ADAPT_POSTPONE || local_state == MALL_SPAWN_PENDING) {
// El grupo de procesos se terminara de juntar tras la redistribucion de datos
local_state = wait_wakeup();
......
......@@ -168,7 +168,7 @@ void compact_dist(struct physical_dist dist, int *used_nodes, int *procs) {
int tamBl, remainder;
tamBl = dist.num_cpus / dist.num_nodes;
asigCores = 0;
asigCores = dist.already_created;
i = *used_nodes = dist.already_created / tamBl;
remainder = dist.already_created % tamBl;
......@@ -176,12 +176,13 @@ void compact_dist(struct physical_dist dist, int *used_nodes, int *procs) {
//First nodes could already have existing procs
//Start from the first with free spaces
if (remainder) {
procs[i] = asigCores = tamBl - remainder;
procs[i] = tamBl - remainder;
asigCores += procs[i];
i = (i+1) % dist.num_nodes;
(*used_nodes)++;
}
//Assing tamBl to each node
//Assign tamBl to each node
while(asigCores+tamBl <= dist.target_qty) {
asigCores += tamBl;
procs[i] += tamBl;
......
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include "Spawn_state.h"
pthread_mutex_t spawn_mutex;
pthread_cond_t spawn_cond;
int spawn_state;
int waiting_redistribution=0;
void init_spawn_state() {
pthread_mutex_init(&spawn_mutex,NULL);
pthread_cond_init(&spawn_cond,NULL);
set_spawn_state(1,0); //FIXME First parameter is a horrible magical number
}
void free_spawn_state() {
......@@ -40,13 +43,20 @@ void set_spawn_state(int value, int is_async) {
int wait_wakeup() {
pthread_mutex_lock(&spawn_mutex);
pthread_cond_wait(&spawn_cond, &spawn_mutex);
if(!waiting_redistribution) {
waiting_redistribution=1;
pthread_cond_wait(&spawn_cond, &spawn_mutex);
}
waiting_redistribution=0;
pthread_mutex_unlock(&spawn_mutex);
return get_spawn_state(1);
}
void wakeup() {
pthread_mutex_lock(&spawn_mutex);
pthread_cond_signal(&spawn_cond);
if(waiting_redistribution) {
pthread_cond_signal(&spawn_cond);
}
waiting_redistribution=1;
pthread_mutex_unlock(&spawn_mutex);
}
......@@ -5,7 +5,11 @@
#SBATCH --exclude=c01,c00,c02
dir="/home/martini/malleability_benchmark"
partition='P1'
codeDir="/Codes"
execDir="/Exec"
cores=$(bash $dir$execDir/BashScripts/getCores.sh $partition)
nodelist=$SLURM_JOB_NODELIST
nodes=$SLURM_JOB_NUM_NODES
......@@ -18,18 +22,14 @@ then
fi
echo "MPICH"
#module load mpich-3.4.1-noucx
#export HYDRA_DEBUG=1
aux=$(grep "\[resize0\]" -n $configFile | cut -d ":" -f1)
read -r ini fin <<<$(echo $aux)
diff=$(( fin - ini ))
numP=$(head -$fin $configFile | tail -$diff | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
ls /home/martini/malleability_benchmark/Codes/build/a.out
echo "Test PreRUN $numP $nodes"
mpirun -np $numP $dir$codeDir/build/a.out $configFile $outIndex $nodelist $nodes
numP=$(bash $dir$execDir/BashScripts/getNumPNeeded.sh $configFile 0)
initial_nodelist=$(bash $dir$execDir/BashScripts/createInitialNodelist.sh $numP $cores $nodelist)
echo $initial_nodelist
echo "Test PreRUN $numP $nodelist"
mpirun -hosts $initial_nodelist -np $numP $dir$codeDir/build/a.out $configFile $outIndex $nodelist $nodes
echo "END RUN"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
sed -i 's/Abort(-100)/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
......@@ -7,6 +7,7 @@
dir="/home/martini/malleability_benchmark"
codeDir="/Codes/build"
resultsDir="/Results"
execDir="/Exec"
nodelist=$SLURM_JOB_NODELIST
nodes=$SLURM_JOB_NUM_NODES
......@@ -15,10 +16,7 @@ outIndex=$2
echo "MPICH"
aux=$(grep "\[resize0\]" -n $configFile | cut -d ":" -f1)
read -r ini fin <<<$(echo $aux)
diff=$(( fin - ini ))
numP=$(head -$fin $configFile | tail -$diff | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
numP=$(bash $dir$execDir/BashScripts/getNumPNeeded.sh $configFile 0)
name_res="Extrae_"$nodes"_Test_"$numP
dir_name_res=$dir$resultsDir"/"$name_res
......@@ -28,6 +26,7 @@ srun -n$numP --mpi=pmi2 ./trace.sh $dir$codeDir/a.out $configFile $outIndex $nod
echo "END RUN"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
sed -i 's/Abort(-100)/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
rm hostfile.o$SLURM_JOB_ID
echo "MOVING DATA"
......
......@@ -6,6 +6,7 @@
dir="/home/martini/malleability_benchmark"
codeDir="/Codes"
execDir="/Exec"
nodelist="localhost"
nodes=1
......@@ -16,12 +17,9 @@ echo "MPICH"
#module load mpich-3.4.1-noucx
#export HYDRA_DEBUG=1
aux=$(grep "\[resize0\]" -n $1 | cut -d ":" -f1)
read -r ini fin <<<$(echo $aux)
diff=$(( fin - ini ))
numP=$(head -$fin $1 | tail -$diff | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
numP=$(bash $dir$execDir/BashScripts/getNumPNeeded.sh $configFile 0)
mpirun -np $numP valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes --trace-children=yes --log-file=nc.vg.%p $dir$codeDir/build/a.out $configFile $outIndex $nodelist $nodes
echo "END RUN"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
sed -i 's/Abort(-100)/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
#!/bin/bash
dir="/home/martini/malleability_benchmark" #FIXME Obtain from another way
# Runs in a given current directory all .ini files
# Parameter 1(Optional) - Amount of executions per file. Must be a positive number
#====== Do not modify these values =======
codeDir="/Codes/build"
execDir="/Exec"
ResultsDir="/Results"
numP=$1
cores=$2
nodelist=$3
initial_node_qty=$(($numP / $cores))
if [ $initial_node_qty -eq 0 ]
then
initial_node_qty=1
fi
common_node_name="n" #FIXME What if it uses another type of node?
if [[ $nodelist == *"["* ]]; then
common_node_name=$(echo $nodelist | cut -d '[' -f1)
fi
node_array=($(echo $nodelist | sed -e 's/[\[n]//g' -e 's/\]/ /g' -e 's/,/ /g'))
actual_node_qty=0
for ((i=0; $actual_node_qty<$initial_node_qty; i++))
do
element=($(echo ${node_array[$i]} | sed -e 's/-/ /g'))
nodes_qty=1
if [ "${#element[@]}" -gt 1 ];
then
nodes_qty=$((10#${element[1]}-10#${element[0]}+1))
fi
expected_node_qty=$(($actual_node_qty + $nodes_qty))
if [ "$expected_node_qty" -le "$initial_node_qty" ];
then
added_qty=$nodes_qty
actual_node_qty=$expected_node_qty
else
added_qty=$(($initial_node_qty - $actual_node_qty))
actual_node_qty=$initial_node_qty
fi
for ((j=0; j<$added_qty; j++))
do
index=$((10#${element[0]} + $j))
index=0$index # FIXME What if there are more than 9 nodes?
#FIXME What if less than $cores have to be spawned?
for ((core=0; core<$cores; core++)) # FIXME What if the user asks for a spread distribution
do
initial_nodelist="${initial_nodelist:+$initial_nodelist,}"$common_node_name$index
done
done
done
#Print result
echo $initial_nodelist
#!/bin/bash
# Obtains the number of total cores in an homogenous partition
# Parameter 1 - Partition to use
#====== Do not modify these values =======
partition=$1
hostlist=$(sinfo -hs --partition $partition | sed 's/ */:/g' | cut -d ':' -f5)
basic_node=$(scontrol show hostname $hostlist | paste -d, -s | cut -d ',' -f1)
cores=$(scontrol show node $basic_node | grep CPUTot | cut -d '=' -f3 | cut -d ' ' -f1)
echo "$cores"
#!/bin/bash
# Obtains for a given configuration file how many nodes will be needed
# Parameter 1 - Configuration file name for the emulation.
# Parameter 2 - Base directory of the malleability benchmark
# Parameter 3 - Number of cores in the machines. The machines must be homogenous. Must be a positive number.
#====== Do not modify these values =======
codeDir="/Codes/build"
execDir="/Exec"
ResultsDir="/Results"
if [ "$#" -lt "3" ]
then
echo "Not enough arguments"
echo "Usage -> bash getMaxNodesNeeded.sh Configuration.ini BaseDirectory NumCores"
exit -1
fi
config_file=$1
dir=$2
cores=$3
max_numP=-1
total_resizes=$(grep Total_Resizes $config_file | cut -d '=' -f2)
total_groups=$(($total_resizes + 1))
for ((j=0; j<total_groups; j++));
do
numP=$(bash $dir$execDir/BashScripts/getNumPNeeded.sh $config_file $j)
if [ "$numP" -gt "$max_numP" ];
then
max_numP=$numP
fi
done
node_qty=$(($max_numP / $cores))
if [ $node_qty -eq 0 ]
then
node_qty=1
fi
echo $node_qty
#!/bin/bash
dir="/home/martini/malleability_benchmark" #FIXME Obtain from another way
# Runs in a given current directory all .ini files
# Parameter 1(Optional) - Amount of executions per file. Must be a positive number
#====== Do not modify these values =======
codeDir="/Codes/build"
execDir="/Exec"
ResultsDir="/Results"
config_file=$1
group_index=$2
resize_info=$(grep "\[resize$group_index\]" -n $config_file | cut -d ":" -f1)
first_line=$(echo $resize_info | cut -d " " -f1)
last_line=$(echo $resize_info | cut -d " " -f2)
range_lines=$(( last_line - first_line ))
numP=$(head -$last_line $config_file | tail -$range_lines | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
echo $numP
#!/bin/bash
dir="/home/martini/malleability_benchmark/"
partition="P1"
# Checks if all the runs in the current working directory performed under a
# Slurm manager have been performed correctly and if some runs can be corrected
# they are launched again
# Parameter 1 - Common name of the configuration files
# Parameter 2 - Maximum index of the runs
# Parameter 3 - Amount of repetitions per index/run
# Parameter 4 - Total stages in all runs. #FIXME The amount of stages must be equal across all the runs, must be modified in the future.
# Parameter 5 - Total groups of processes in all runs #FIXME The amount of groups must be equal across all the runs, must be modified in the future.
# Parameter 6 - Maximum valid iteration time across all runs. If an iteration time
# is higher, that particular repetition inside the run is cleaned and
# launched again.
# Parameter 7(Optional) - Maximum amount of time in seconds needed by a single execution. Default value is 0, which indicates infinite time.
# Must be a positive integer.
#====== Do not modify the following values =======
codeDir="Codes/"
execDir="Exec/"
ResultsDir="Results/"
cores=$(bash $dir$execDir/BashScripts/getCores.sh $partition)
ResultsDirName=$1
maxIndex=$2
cantidadGrupos=$3 #Contando a los padres
totalEjGrupo=$4 #Total de ejecuciones por grupo
maxTime=$5 #Maximo tiempo que se considera válido
if [ $# -lt 3 ]
if [ "$#" -lt "6" ]
then
echo "Faltan argumentos"
echo "Uso -> bash CheckRun NombreDirectorio IndiceMaximo Grupos"
echo "Not enough arguments"
echo "Usage -> bash CheckRun Common_Name maxIndex total_repetitions total_groups total_stages max_iteration_time [limit_time]"
exit -1
fi
cd $dir$ResultsDir
if [ ! -d $ResultsDirName ]
common_name=$1
maxIndex=$2
totalEjGrupo=$3 #Total de ejecuciones por grupo
total_stages=$4
total_groups=$5
maxTime=$6 #Maximo tiempo que se considera válido
limit_time_exec=0
if [ $# -ge 7 ] #Max time per execution in seconds
then
echo "La carpeta de resultados $ResultsDirName no existe. Abortando"
exit -1
limit_time_exec=$7
fi
cd $ResultsDirName
#Comprobar si hay errores
#Si los hay, salir
grep -i -e fatal -e error -e abort -e == */slurm* > errores2.txt
qty=$(wc -l errores2.txt | cut -d ' ' -f1)
limit_time=0
exec_lines_basic=7
iter_lines_basic=3
exec_total_lines=$(($exec_lines_basic+$total_stages+$total_groups))
iter_total_lines=$(($iter_lines_basic+$total_stages*2+1))
exec_remove=$(($exec_lines_basic+$total_stages+$total_groups-1))
iter_remove=$(($iter_lines_basic+$total_stages))
if [ $qty -gt 0 ]
#Check if there are fatal errors during executions
grep -i -e fatal -e error -e abort -e == slurm* > errores2.txt
qty=$(wc -l errores2.txt | cut -d ' ' -f1)
if [ "$qty" -gt "0" ]
then
echo "Se han encontrado errores de ejecución graves. Abortando"
echo "Revisar archivo errores2.txt en el directorio $ResultsDirName"
echo "Found Fatal errors during execution. Aborting"
echo "Read file errors2 to see the errors and in which files"
echo "FAILURE"
exit -2
fi
rm errores2.txt
#Comprobar que el número de archivos es correcto
#Pueden estar todos los archivos pero no estar los archivos
#completos -- Esto se comprueba más tarde
qtyG=$(ls R*/R*_Global.out | wc -l)
#Check if the number of output files is correct.
#If the number is not correct is a fatal error and the user
# is informed in which runs the amount does not match, and
# then the scripts exits.
#The user must figure out what to do with those runs.
qtyG=$(ls R*_Global.out | wc -l)
qtyG=$(($qtyG * 2))
qtyL=$(ls R*/R*_G?N*.out | wc -l)
if [ $qtyG == $qtyL ]
qtyL=$(ls R*_G*N*.out | wc -l)
if [ "$qtyG" == "$qtyL" ]
then
echo "El numero de ficheros G($qtyG) y L($qtyL) coincide"
echo "Number of G($qtyG) and L($qtyL) files match"
else
#Si faltan archivos, se indican cuales faltan
echo "Faltan ejecuciones Locales o globales"
for ((i=1; i<$maxIndex; i++))
echo "Lacking Local($qtyL) or global($qtyG) files. Aborting"
echo "Lacking Local($qtyL) or global($qtyG) files. Aborting" > errores2.txt
for ((i=0; i<$maxIndex; i++))
do
qtyEx=$(grep Tex -r Run$i | wc -l)
qtyIt=$(grep Top -r Run$i | wc -l)
qtyEx=$(grep T_total R"$i"_Global.out | wc -l)
qtyIt=$(grep T_iter R"$i"_G*N*.out | wc -l)
qtyEx=$(($qtyEx * 2))
if [ $qtyEx -ne $qtyIt ]
if [ "$qtyEx" -ne "$qtyIt" ]
then
diff=$(($totalEjGrupo-$qtyEx))
echo "Faltan archivos en Run$i"
echo "Files do not match at Run $i -- diff=$diff"
echo "Files do not match at Run $i -- diff=$diff" >> errores2.txt
fi
done
echo "FAILURE"
exit -1
fi
rm errores2.txt
#grep -rn "2.\." R* TODO Testear que el tiempo teorico maximo es valido?
# Check if there is any negative execution time
# Only invalid IDs are stored
rm -f tmp.txt
touch tmp.txt
exec_ids=($(grep -n "T_total" R*_Global.out | grep - | cut -d '_' -f1 | cut -d 'R' -f2))
exec_line=($(grep -n "T_total" R*_Global.out | grep - | cut -d ':' -f2))
for ((i=${#exec_ids[@]}-1; i>=0; i--))
do
first_line=$((${exec_line[$i]}-$exec_remove))
last_line=$(($first_line+$exec_total_lines-1))
echo "${exec_ids[$i]}:$first_line:$last_line" >> tmp.txt
done
#Comprobar si hay runs con tiempo negativos
#Si los hay, reejecutar e informar de cuales son
grep - */R* | grep Tex > errores.txt
qty=$(wc -l errores.txt | cut -d ' ' -f1)
if [ $qty -gt 0 ]
# Check if there is any iter time higher than expected
# Only invalid IDs are stored
iter_times=($(grep "T_iter" R*_G*N*.out | cut -d ' ' -f2))
iter_ids=($(grep "T_iter" R*_G*N*.out | cut -d '_' -f1 | cut -d 'R' -f2))
iter_line=($(grep -n "T_iter" R*_G*N*.out | cut -d ':' -f2))
for ((i=${#iter_times[@]}-1; i>=0; i--))
do
is_invalid=$(echo ${iter_times[$i]}'>'$maxTime | bc -l)
if [ $is_invalid -eq 1 ]
then
first_line=$((${iter_line[$i]}-$iter_remove))
# Translate line number to Global file
first_line=$(($first_line/$iter_total_lines))
first_line=$(($first_line*$exec_total_lines+1))
last_line=$(($first_line+$exec_total_lines-1))
echo "${iter_ids[$i]}:$first_line:$last_line" >> tmp.txt
fi
done
#Clean data from collected erroneous executions
qty=$(wc -l tmp.txt | cut -d ' ' -f1)
if [ "$qty" -gt 0 ];
then
echo "Se han encontrado errores de ejecución leves. Volviendo a ejecutar"
echo "Found minor execution errors. Executing again. Review file errores.txt."
echo "CHECKRUN -- Found errors" >> errores.txt
while IFS="" read -r lineRun || [ -n "$lineRun" ]
do
#Obtener datos de una ejecución erronea
run=$(echo $lineRun | cut -d 'R' -f3 | cut -d '_' -f1)
if [ $run -gt $maxIndex ]
then #Indice de ejecuciones posteriores echas a mano -- FIXME Eliminar?
realRun=$(($run - $maxIndex))
index=$run
else # Indice de las primeras ejecuciones
realRun=$run
index=$(($run + $maxIndex))
fi
echo "Run $run"
cd Run$realRun
#Arreglar ejecuccion
#1 - Borrar lineas erroneas
qty=$(grep -n - R* | grep Tex | wc -l)
for ((i=0; i<qty; i++))
do
fin=$(grep -n - R* | grep Tex | cut -d ':' -f2 | head -n1)
init=$(($fin - 7))
sed -i ''$init','$fin'd' R${realRun}_Global.out
#Se borran las lineas de los ficheros locales asociados
aux=$(($fin / 8)) #Utilizado para saber de entre las ejecuciones del fichero, cual es la erronea
fin=$(($aux * 5))
init=$(($fin - 4))
for ((j=0; j<cantidadGrupos; j++)); do
sed -i ''$init','$fin'd' R${realRun}_G${j}*
done
#Obtain data of erroneous execution
run=$(echo $lineRun | cut -d ':' -f1)
echo "Run $run had an erroneous execution, cleaning bad data."
echo "Run$run----------------------------------------------" >> errores.txt
#1 - Delete erroneous lines in Global file
first_line=$(echo $lineRun | cut -d ':' -f2)
last_line=$(echo $lineRun | cut -d ':' -f3)
sed -n ''$first_line','$last_line'p' R${run}_Global.out >> errores.txt
sed -i ''$first_line','$last_line'd' R${run}_Global.out
#2 - Translate line numbers to Local files type
first_line=$(($first_line/$exec_total_lines))
first_line=$(($first_line*$iter_total_lines+1))
last_line=$(($first_line+$iter_total_lines-1))
#3 - Delete erroneous lines in Local files
for ((j=0; j<total_groups; j++));
do
sed -n ''$first_line','$last_line'p' R${run}_G${j}* >> errores.txt
sed -i ''$first_line','$last_line'd' R${run}_G${j}*
done
echo "--------------------------------------------------" >> errores.txt
#2 - Reelanzar ejecucion
proc_list=$(grep Procs R${realRun}_Global.out | cut -d '=' -f3 | cut -d ',' -f1)
proc_parents=$(echo $proc_list | cut -d ' ' -f1)
proc_children=$(echo $proc_list | cut -d ' ' -f2)
nodes=8 # Maximo actual
if [ $proc_parents -gt $proc_children ]
then
nodes=$(($proc_parents / 20))
else
nodes=$(($proc_children / 20))
fi
sbatch -N $nodes $dir$execDir./singleRun.sh config$realRun.ini $index
cd $dir$ResultsDir$ResultsDirName
done < errores.txt
exit 0
done < tmp.txt
fi
#Comprobar que todas las ejecuciones tienen todas las ejecucciones que tocan
#Solo es necesario comprobar el global.
#Check if all repetitions for each Run have been executed
#If any run lacks repetitions, the job is automatically launched again
#If a run has even executed a repetition, is not launched as it could be in the waiting queue
qty_missing=0
cd $dir$ResultsDir$ResultsDirName
for ((i=1; i<$maxIndex; i++))
for ((run=0; run<$maxIndex; run++))
do
qtyEx=$(grep Tex -r Run$i | wc -l)
if [ $qtyEx -ne $totalEjGrupo ]
if [ -f "R${run}_Global.out" ]
then
diff=$(($totalEjGrupo-$qtyEx))
qty_missing=$(($qty_missing+1))
echo "Faltan en $i, $diff ejecuciones"
qtyEx=$(grep T_total R"$run"_Global.out | wc -l)
if [ "$qtyEx" -ne "$totalEjGrupo" ];
then
#1 - Obtain config file name and repetitions to perform
diff=$(($totalEjGrupo-$qtyEx))
qty_missing=$(($qty_missing+$diff))
config_file="$common_name$run.ini"
if [ $limit_time_exec -ne 0 ] #Max time per execution in seconds
then
limit_time=$(($limit_time_exec*$diff/60+1))
fi
#2 - Obtain number of nodes needed
node_qty=$(bash $dir$execDir/BashScripts/getMaxNodesNeeded.sh $config_file $dir $cores)
#3 - Launch execution
echo "Run$run lacks $diff repetitions"
use_extrae=0
sbatch -p $partition -N $node_qty -t $limit_time $dir$execDir./generalRun.sh $dir $cores $config_file $use_extrae $run $diff
fi
else
echo "File R${run}_Global.out does not exist -- Could it be it must still be executed?"
fi
done
if [ $qty_missing -eq 0 ]
if [ "$qty_missing" -eq "0" ];
then
echo "Todos los archivos tienen $totalEjGrupo ejecuciones"
echo "SUCCESS"
else
echo "REPEATING - A total of $qty_missing executions are being repeated"
fi
......@@ -97,7 +97,7 @@
<syscall enabled="no" />
<merge enabled="yes"
<merge enabled="no"
synchronization="default"
tree-fan-out="16"
max-memory="512"
......
#!/bin/bash
dir="/home/martini/malleability_benchmark"
codeDir="/Codes/build"
export EXTRAE_CONFIG_FILE=extrae.xml
export LD_PRELOAD=$EXTRAE_HOME/lib/libmpitrace.so
$dir$codeDir/./a.out
import sys
import glob
import os
from datetime import date
from enum import Enum
GENERAL_SECTION = "[general]"
RESIZE_SECTION = "[resize"
STAGE_SECTION = "[stage"
END_SECTION_DELIMITER = ";end"
class Config_section(Enum):
INVALID=0
GENERAL=1
RESIZE=2
STAGE=3
P_TOTAL_RESIZES="Total_Resizes"
P_TOTAL_STAGES="Total_Stages"
P_GRANULARITY="Granularity"
P_SDR="SDR"
P_ADR="ADR"
P_RIGID="Rigid"
P_STAGE_TYPE="Stage_Type"
P_STAGE_BYTES="Stage_Bytes"
P_STAGE_TIME_CAPPED="Stage_Time_Capped"
P_STAGE_TIME="Stage_Time"
P_RESIZE_ITERS="Iters"
P_RESIZE_PROCS="Procs"
P_RESIZE_FACTORS="FactorS"
P_RESIZE_DIST="Dist"
P_RESIZE_REDISTRIBUTION_METHOD="Redistribution_Method"
P_RESIZE_REDISTRIBUTION_STRATEGY="Redistribution_Strategy"
P_RESIZE_SPAWN_METHOD="Spawn_Method"
P_RESIZE_SPAWN_STRATEGY="Spawn_Strategy"
@classmethod
def has_key(cls, name):
return any(x.value == name for x in cls)
def is_ending_of_section(line):
if(END_SECTION_DELIMITER in line):
return True
return False
def is_a_general_section(line):
if(line == GENERAL_SECTION):
return True
return False
def is_a_resize_section(line):
if(RESIZE_SECTION in line and not is_ending_of_section(line)):
return True
return False
def is_a_stage_section(line):
if(STAGE_SECTION in line and not is_ending_of_section(line)):
return True
return False
def process_line(line, data):
key,value = line.split('=')
if(not Config_section.has_key(key)):
print("Unknown parameter " + key)
return False
if(',' in value):
value = value.split(',')
for i in range(len(value)):
try:
value[i] = float(value[i])
if value[i] == int(value[i]):
value[i] = int(value[i])
except ValueError:
print("Unable to convert to number - Not a fatal error")
else:
try:
value = float(value)
if value == int(value):
value = int(value)
except ValueError:
print("Unable to convert to number - Not a fatal error")
data[key]=value
return True
def process_file(file_name):
f = open(file_name, "r")
lines = f.read().splitlines()
section_type = Config_section.INVALID
general_data = {}
stages_data=[]
resizes_data=[]
processing=0
for line in lines:
if(section_type != Config_section.INVALID):
if(is_ending_of_section(line)):
section_type = Config_section.INVALID
else:
process_line(line, processing)
elif(is_a_general_section(line)):
section_type = Config_section.GENERAL
processing = general_data
elif(is_a_resize_section(line)):
section_type = Config_section.RESIZE
resizes_data.append({})
processing = resizes_data[len(resizes_data)-1]
elif(is_a_stage_section(line)):
section_type = Config_section.STAGE
stages_data.append({})
processing = stages_data[len(stages_data)-1]
# print(general_data)
# print(stages_data)
# print(resizes_data)
f.close()
return general_data,stages_data,resizes_data
def general_section_write(f, general_data):
f.write(GENERAL_SECTION + "\n")
keys = list(general_data.keys())
values = list(general_data.values())
for i in range(len(keys)):
f.write(keys[i] + "=" + str(values[i]) + "\n")
f.write(END_SECTION_DELIMITER + " " + GENERAL_SECTION + "\n")
def stage_section_write(f, stage_data, section_index):
f.write(STAGE_SECTION + str(section_index) + "]\n")
keys = list(stage_data.keys())
values = list(stage_data.values())
for i in range(len(keys)):
f.write(keys[i] + "=" + str(values[i]) + "\n")
f.write(END_SECTION_DELIMITER + " " + STAGE_SECTION + str(section_index) + "]\n")
def resize_section_write(f, resize_data, section_index):
f.write(RESIZE_SECTION + str(section_index) + "]\n")
keys = list(resize_data.keys())
values = list(resize_data.values())
for i in range(len(keys)):
f.write(keys[i] + "=" + str(values[i]) + "\n")
f.write(END_SECTION_DELIMITER + " " + RESIZE_SECTION + str(section_index) + "]\n")
def write_output_file(datasets, common_output_name, output_index):
file_name = common_output_name + str(output_index) + ".ini"
total_stages=int(datasets[0][Config_section.P_TOTAL_STAGES.value])
total_resizes=int(datasets[0][Config_section.P_TOTAL_RESIZES.value])+1
f = open(file_name, "w")
general_section_write(f, datasets[0])
for i in range(total_stages):
stage_section_write(f, datasets[i+1], i)
for i in range(total_resizes):
resize_section_write(f, datasets[i+1+total_stages], i)
f.close()
def check_sections_assumptions(datasets):
total_resizes=int(datasets[0][Config_section.P_TOTAL_RESIZES.value])+1
total_stages=int(datasets[0][Config_section.P_TOTAL_STAGES.value])
adr = datasets[0][Config_section.P_ADR.value]
for i in range(total_resizes):
#Not valid if trying to use thread strategy and adr(Async data) is 0
if adr==0 and (datasets[total_stages+1+i][Config_section.P_RESIZE_SPAWN_STRATEGY.value] == 2 or datasets[total_stages+1+i][Config_section.P_RESIZE_REDISTRIBUTION_STRATEGY.value] == 2):
return False
#Not valid if the strategies are different
if datasets[total_stages+1+i][Config_section.P_RESIZE_SPAWN_STRATEGY.value] != datasets[total_stages+1+i][Config_section.P_RESIZE_REDISTRIBUTION_STRATEGY.value]:
return False
#Not valid if resize is to the same amount of processes
if i>0:
if datasets[total_stages+1+i][Config_section.P_RESIZE_PROCS.value] == datasets[total_stages+i][Config_section.P_RESIZE_PROCS.value]:
return False
return True
def correct_adr(sdr, adr_percentage, w_general_dataset):
#TODO Tener en cuenta que tanto sdr como adr pueden tener diferentes valores
if (adr_percentage != 0):
w_general_dataset[Config_section.P_ADR.value] = sdr * (adr_percentage/100)
w_general_dataset[Config_section.P_SDR.value] = sdr * ((100.0-adr_percentage)/100)
def create_output_files(common_output_name, general_data, resize_data, stage_data):
def read_parameter(level_index):
dictionary = write_datasets[ds_indexes[level_index]]
key = keys[level_index]
index = indexes[level_index]
max_index = mindexes[level_index]
values = lists[level_index]
finished=False
if(index == max_index):
index = 0
if(level_index+1 == len(lists)):
finished = True
else:
finished = read_parameter(level_index+1)
dictionary[key] = values[index]
if(key == Config_section.P_RESIZE_PROCS.value):
original_dictionary = datasets[ds_indexes[level_index]]
dictionary[Config_section.P_RESIZE_FACTORS.value] = original_dictionary[Config_section.P_RESIZE_FACTORS.value][index]
elif(key == Config_section.P_SDR.value or key == Config_section.P_ADR.value):
original_dictionary = datasets[ds_indexes[level_index]]
sdr = original_dictionary[Config_section.P_SDR.value]
adr_percentage = original_dictionary[Config_section.P_ADR.value][index]
correct_adr(sdr, adr_percentage, dictionary)
indexes[level_index] = index + 1
return finished
datasets=[general_data]
write_datasets=[general_data.copy()]
for dataset in resize_data:
datasets.append(dataset)
write_datasets.append(dataset.copy())
for dataset in stage_data:
datasets.append(dataset)
write_datasets.append(dataset.copy())
directory = "/Desglosed-" + str(date.today())
path = os.getcwd() + directory
os.mkdir(path, mode=0o775)
os.chdir(path)
lists=[] # Stores lists of those variables with multiple values
keys=[] # Stores keys of those variables with multiple values
indexes=[] # Stores actual index for each variable with multiple values
mindexes=[] # Stores len of lists of each variable with multiple values
ds_indexes=[] # Stores the index of the dataset where the variable is stored
#For each variable with a list of elements
for i in range(len(datasets)):
values_aux = list(datasets[i].values())
keys_aux = list(datasets[i].keys())
for j in range(len(values_aux)):
if type(values_aux[j]) == list and keys_aux[j] != Config_section.P_RESIZE_FACTORS.value:
keys.append(keys_aux[j])
lists.append(values_aux[j])
ds_indexes.append(i)
indexes.append(0)
mindexes.append(len(values_aux[j]))
#Get the first set of values
for i in range(len(lists)):
read_parameter(i)
#FIXME Deberia hacerse en otra parte
if (type(datasets[0][Config_section.P_SDR.value]) != list or type(datasets[0][Config_section.P_ADR.value]) != list):
sdr = datasets[0][Config_section.P_SDR.value]
adr_percentage = datasets[0][Config_section.P_ADR.value]
correct_adr(sdr, adr_percentage, write_datasets[0])
output_index=0
adr_corrected=False
while True:
if(check_sections_assumptions(write_datasets)):
write_output_file(write_datasets, common_output_name, output_index)
# for i in range(len(write_datasets)):
# print(write_datasets[i])
# print("\n\n\n------------------------------------------" + str(output_index) + " ADR=" + str(adr_corrected))
output_index+=1
finished = read_parameter(0)
if finished:
break
if(len(sys.argv) < 3):
print("Not enough arguments given.\nExpected usage: python3 read_multiple.py file.ini output_name")
name = sys.argv[1]
common_output_name = sys.argv[2]
general_data, resize_data, stage_data = process_file(name)
create_output_files(common_output_name, general_data, resize_data, stage_data)
exit(1)
[general]
Total_Resizes=1
Total_Stages=4
Granularity=100000
SDR=1000.0
ADR=0.0
Rigid=1
; end [general]
[stage0]
Stage_Type=0
Stage_Bytes=0
Stage_Time_Capped=0
Stage_Time=0.01235
;end [stage0]
[stage1]
Stage_Type=3
Stage_Bytes=0
Stage_Time_Capped=0
Stage_Time=0.03
;end [stage1]
[stage2]
Stage_Type=4
Stage_Bytes=0
Stage_Time_Capped=0
Stage_Time=0.0027915324
;end [stage2]
[stage3]
Stage_Type=4
Stage_Bytes=33176880
Stage_Time_Capped=0
Stage_Time=0.040449
;end [stage3]
[resize0]
Iters=5
Procs=2
FactorS=1
Dist=compact
Redistribution_Method=0
Redistribution_Strategy=1
Spawn_Method=0
Spawn_Strategy=1
;end [resize0]
[resize1]
Iters=30
Procs=4
FactorS=0.1
Dist=compact
Redistribution_Method=0
Redistribution_Strategy=1
Spawn_Method=0
Spawn_Strategy=1
;end [resize1]
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment