Commit e83b5922 authored by Iker Martín Álvarez's avatar Iker Martín Álvarez
Browse files

New version of Proteo

parent 26305fac
#!/bin/bash
scriptDir="$(dirname "$0")"
source $scriptDir/../../Codes/build/config.txt
valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes --log-file=vg.tp.%p $PROTEO_BIN
#!/bin/bash
#SBATCH --exclude=c02,c01,c00
#SBATCH -p P1
dir="/home/martini/malleability_benchmark"
codeDir="/Codes"
ResultsDir="/Results"
nodelist=$SLURM_JOB_NODELIST
nodes=$SLURM_JOB_NUM_NODES
module load mpich-3.4.1-noucx
name_dir=$1
i=$2
procs_parents=$3
procs_sons=$4
#percs_array=(0 25 50 75 100)
percs_array=(0)
at_array=(0)
dist_array=(cpu)
cst_array=(0 1 2 3)
css_array=(0 1)
aux=$(($i + 1))
echo "START TEST init=$aux"
for adr_perc in "${percs_array[@]}"
do
for phy_dist in "${dist_array[@]}"
do
for ibarrier_use in "${at_array[@]}"
do
for cst in "${cst_array[@]}"
do
for css in "${css_array[@]}"
do
i=$(($i + 1))
cd $name_dir/Run$i
config_file="config$i.ini"
echo "EXEC $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- $cst -- $css -- RUN $i"
for index in 1 2 3 4 5 6 7 8 9 10
do
numP=$(bash $dir$codeDir/recordMachinefile.sh $config_file) # Crea el fichero hostfile
mpirun -f hostfile.o$SLURM_JOB_ID $dir$codeDir/./bench.out $config_file $i $nodelist $nodes
rm hostfile.o$SLURM_JOB_ID
done
done
done
done
done
done
echo "END TEST"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
[general]
Total_Resizes=1
Total_Stages=4
Granularity=100000
SDR=1000.0
ADR=0.0
Rigid=1
;end [general]
[stage0]
Stage_Type=0
Stage_Bytes=0
Stage_Time_Capped=0
Stage_Time=0.01235
;end [stage0]
[stage1]
Stage_Type=3
Stage_Bytes=0
Stage_Time_Capped=0
Stage_Time=0.03
;end [stage1]
[stage2]
Stage_Type=4
Stage_Bytes=0
Stage_Time_Capped=0
Stage_Time=0.0027915324
;end [stage2]
[stage3]
Stage_Type=4
Stage_Bytes=33176880
Stage_Time_Capped=0
Stage_Time=0.040449
;end [stage3]
[resize0]
Iters=5
Procs=2
FactorS=1
Dist=compact
Redistribution_Method=0
Redistribution_Strategy=1
Spawn_Method=0
Spawn_Strategy=1
;end [resize0]
[resize1]
Iters=30
Procs=4
FactorS=0.1
Dist=compact
Redistribution_Method=0
Redistribution_Strategy=1
Spawn_Method=0
Spawn_Strategy=1
;end [resize1]
import sys
import glob
def general(f, resizes, matrix_tam, comm_tam, sdr, adr, aib, cst, css, time):
f.write("[general]\n")
f.write("resizes=" + resizes +"\n")
f.write("matrix_tam=" + matrix_tam +"\n")
f.write("comm_tam=" + comm_tam +"\n")
f.write("SDR=" + sdr +"\n")
f.write("ADR=" + adr +"\n")
f.write("AIB=" + aib +"\n")
f.write("CST=" + cst +"\n")
f.write("CSS=" + css +"\n")
f.write("time=" + time +"\n")
f.write("; end [general]\n")
def resize_section(f, resize, iters, procs, factor, physical_dist):
f.write("[resize" + resize + "]\n")
f.write("iters=" + iters +"\n")
f.write("procs=" + procs +"\n")
f.write("factor=" + factor +"\n")
f.write("physical_dist=" + physical_dist +"\n")
f.write(";end [resize" + resize +"]\n")
if len(sys.argv) < 2:
print("The config file name is missing\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR AIB CST CSS time iters0 procs0 dist0 iters1 procs1 dist1 ...")
exit(1)
if len(sys.argv) < 12:
print("The are not enough arguments\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR_perc AIB CST CSS time proc_time iters0 procs0 dist0 iters1 procs1 dist1 ...")
exit(1)
name = sys.argv[1]
resizes = int(sys.argv[2])
matrix_tam = sys.argv[3]
comm_tam = sys.argv[4]
sdr = int(sys.argv[5])
adr_perc = float(sys.argv[6])
aib = sys.argv[7]
cst = sys.argv[8]
css = sys.argv[9]
time = sys.argv[10]
proc_time = float(sys.argv[11]) # Usado para calcular el factor de cada proceso
adr = (sdr * adr_perc) / 100
sdr = sdr - adr
adr = str(adr)
sdr = str(sdr)
factor = 0
f = open(name, "w")
general(f, str(resizes), matrix_tam, comm_tam, sdr, adr, aib, cst, css, time)
resizes = resizes + 1 # Internamente, los primeros procesos se muestran como un grupo
for resize in range(resizes):
iters = sys.argv[12 + 3 * resize]
procs = sys.argv[12 + 3 * resize + 1]
physical_dist = sys.argv[12 + 3 * resize + 2]
if proc_time != 0: # Si el argumento proc_time es 0, todos los grupos tienen un factor de 1
factor = proc_time / float(procs)
else:
factor = 1
resize_section(f, str(resize), iters, procs, str(factor), physical_dist)
f.close()
exit(1)
#!/bin/bash
#SBATCH --exclude=c02,c01,c00
#SBATCH -p P1
# !!!!This script should only be called by others scripts, do not call it directly!!!
# Runs a given configuration file with the indicated parameters with the aid of the RMS Slurm.
# Parameter 1 - Number of cores in a single machine
# Parameter 2 - Configuration file name for the emulation.
# Parameter 3 - Use Valgrind(1), Extrae(2) or nothing(0).
# Parameter 4 - Index to use for the output files. Must be a positive integer.
# Parameter 5 - Amount of executions per file. Must be a positive number.
#====== Do not modify these values =======
execDir="/Exec"
echo "START TEST"
#$1 == cores
#$2 == configFile
#$3 == use_external
#$4 == outFileIndex
#$5 == qty
echo $@
if [ $# -lt 3 ]
then
echo "Internal ERROR generalRun.sh - Not enough arguments were given"
exit -1
fi
#READ PARAMETERS AND ENSURE CORRECTNESS
cores=$1
configFile=$2
use_external=$3
outFileIndex=$4
qty=1
if [ $# -ge 4 ]
then
qty=$5
fi
nodelist=$SLURM_JOB_NODELIST
if [ -z "$nodelist" ];
then
echo "Internal ERROR in generalRun.sh - Nodelist not provided"
exit -1
fi
numP=$(bash $PROTEO_HOME$execDir/BashScripts/getNumPNeeded.sh $configFile 0)
initial_nodelist=$(bash $PROTEO_HOME$execDir/BashScripts/createInitialNodelist.sh $numP $cores $nodelist)
#EXECUTE RUN
echo "Nodes=$nodelist"
if [ $use_external -eq 0 ] #NORMAL
then
for ((i=0; i<qty; i++))
do
echo "Run $i starts"
mpirun -hosts $initial_nodelist -np $numP $PROTEO_BIN $configFile $outFileIndex
echo "Run $i ends"
done
elif [ $use_external -eq 1 ] #VALGRIND
then
cp $PROTEO_HOME$execDir/Valgrind/worker_valgrind.sh .
for ((i=0; i<qty; i++))
do
echo "Run $i starts"
mpirun -hosts $initial_nodelist -np $numP valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes --trace-children=yes --log-file=vg.sp.%p.$SLURM_JOB_ID.$i $PROTEO_BIN $configFile $outIndex
echo "Run $i ends"
done
else #EXTRAE
cp $PROTEO_HOME$execDir/Extrae/extrae.xml .
cp $PROTEO_HOME$execDir/Extrae/trace.sh .
cp $PROTEO_HOME$execDir/Extrae/worker_extrae.sh .
for ((i=0; i<qty; i++))
do
#FIXME Extrae not tested keeping in mind the initial nodelist - Could have some errors
srun -n$numP --mpi=pmi2 ./trace.sh $PROTEO_BIN $configFile $outFileIndex
done
fi
echo "END TEST"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
sed -i 's/Abort(-100)/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
MAM_ID=$(($SLURM_JOB_ID % 1000))
rm MAM_HF_ID*$MAM_ID*.tmp
#!/bin/bash
# !!!!This script should only be called by others scripts, do not call it directly!!!
# Runs a given configuration file with the indicated parameters.
# Parameter 1 - Number of cores in a single machine
# Parameter 2 - Configuration file name for the emulation.
# Parameter 3 - Use Extrae(1) or not(0).
# Parameter 4 - Index to use for the output files. Must be a positive integer.
# Parameter 5 - Amount of executions per file. Must be a positive number.
#====== Do not modify these values =======
execDir="/Exec"
echo "START TEST"
#$1 == cores
#$2 == configFile
#$3 == use_external
#$4 == outFileIndex
#$5 == qty
echo $@
if [ $# -lt 3 ]
then
echo "Internal ERROR generalRunCostum.sh - Not enough arguments were given"
exit -1
fi
#READ PARAMETERS AND ENSURE CORRECTNESS
cores=$1
configFile=$2
use_external=0
outFileIndex=0
qty=1
if [ $# -ge 3 ]
then
use_external=$3
fi
if [ $# -ge 4 ]
then
outFileIndex=$4
fi
if [ $# -ge 5 ]
then
qty=$5
fi
numP=$(bash $PROTEO_HOME$execDir/BashScripts/getNumPNeeded.sh $configFile 0)
nodelist=$SLURM_JOB_NODELIST
if [ -z "$nodelist" ];
then
nodelist="localhost"
initial_nodelist="localhost"
else
initial_nodelist=$(bash $PROTEO_HOME$execDir/BashScripts/createInitialNodelist.sh $numP $cores $nodelist)
fi
#EXECUTE RUN
echo "Nodes=$nodelist"
if [ $use_external -eq 0 ]
then
for ((i=0; i<qty; i++))
do
echo "Run $i starts"
mpirun -hosts $initial_nodelist -np $numP $PROTEO_BIN $configFile $outFileIndex
echo "Run $i ends"
done
elif [ $use_external -eq 1 ] #VALGRIND
then
cp $PROTEO_HOME$execDir/Valgrind/worker_valgrind.sh .
for ((i=0; i<qty; i++))
do
echo "Run $i starts"
mpirun -hosts $initial_nodelist -np $numP valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes --trace-children=yes --log-file=vg.sp.%p.$SLURM_JOB_ID.$i $PROTEO_BIN $configFile $outIndex
echo "Run $i ends"
done
else
cp $PROTEO_HOME$execDir/Extrae/extrae.xml .
cp $PROTEO_HOME$execDir/Extrae/trace.sh .
cp $PROTEO_HOME$execDir/Extrae/worker_extrae.sh .
for ((i=0; i<qty; i++))
do
mpirun -hosts $initial_nodelist -np $numP ./trace.sh $PROTEO_BIN $configFile $outFileIndex
done
fi
echo "END TEST"
MAM_ID=$(($SLURM_JOB_ID % 1000))
rm MAM_HF_ID*$MAM_ID*.tmp
#!/bin/bash
# Creates a directory with all possible and valid combinations of configuration files
# that can be created from a given complex configuration file.
# Parameter 1: Complex configuration file name.
# Parameter 2: Common output name of the output configuration files. It will be appended an index to each of them.
#====== Do not modify these values =======
scriptDir="$(dirname "$0")"
source $scriptDir/../Codes/build/config.txt
complex_file=$1
output_name=$2
python3 $PROTEO_HOME$execDir/PythonCodes/read_multiple.py $complex_file $output_name
echo "END GENERATION"
#!/bin/bash
dir="/home/martini/malleability_benchmark/"
codeDir="Codes/"
execDir="Exec/"
ResultsDir="Results/"
#TODO Añadir diferenciar phy_dist de padres e hijos al ejecutar
#TODO Añadir que se considere la cantidad de nucleos de un nodo y no este fijada
if [[ $# -lt 9 ]]
then
echo "Faltan argumentos"
echo "bash run.sh grupos tam_computo tam_comm tam_resize tiempo proc_init iters first_iter node_qty"
exit -1
fi
echo "START TEST"
groups=$1 #TODO Modificar para que admita más de dos grupos de procesos
matrix_tam=$2
comm_tam=$3
N_qty=$4 # Datos a redistribuir
time=$5
proc_init=$6 #El tiempo por iteracion es para esta cantidad de procesos
iters=$7
first_iter=$8
node_qty=$9
# Si el valor es 0 el primer grupo de procesos realiza $iters iteraciones antes de redimensionar
if [[ $first_iter -eq 0 ]]
then
iters_first_group=$iters
# Si el valor es diferente a 0, el primer grupo de procesos realiza $first_iter iteraciones antes de redimensionar
else
iters_first_group=$first_iter
fi
max_procs=$(($node_qty * 20))
procs_array=(1 10)
#percs_array=(0 25 50 75 100)
percs_array=(0)
at_array=(0)
dist_array=(cpu)
cst_array=(0 1 2 3)
css_array=(0 1)
#Obtener cantidades de procesos posibles a ejecutar
i=0
#while [[ $value -lt $max_procs ]]
#do
# i=$(($i + 1))
# value=$((20 * $i))
# procs_array=(${procs_array[@]} $value)
#done
i=0
while [[ $value -lt $max_procs ]]
do
i=$(($i + 1))
value=$((2 ** $i))
value=$(($value * 10))
procs_array=(${procs_array[@]} $value)
done
#Crear carpeta de resultados
cd $dir$ResultsDir
name_res=$node_qty"N-"$(date '+%m-%d')
if [ -d $name_res ] # Si ya existe el directorio, modificar levemente el nombre y crear otro
then
name_res=$name_res"-"$(date '+%H:%M')
fi
echo "Localizacion de los resultados: $dir$ResultsDir$name_res"
mkdir $name_res
# Ejecutar pruebas
i=0
j=0
for procs_parents in "${procs_array[@]}"
do
node_qty1=$(($procs_parents / 20))
for procs_sons in "${procs_array[@]}"
do
node_qty2=$(($procs_sons / 20))
if [ $node_qty1 -lt $node_qty2 ]
then
node_qty1=$node_qty2
fi
if [ $node_qty1 -eq 0 ]
then
node_qty1=1
fi
if [ $procs_parents -ne $procs_sons ]
then
for adr_perc in "${percs_array[@]}"
do
for phy_dist in "${dist_array[@]}"
do
for ibarrier_use in "${at_array[@]}"
do
for cst in "${cst_array[@]}"
do
for css in "${css_array[@]}"
do
i=$(($i + 1))
# Crear directorio para esta ejecucion
cd $dir$ResultsDir$name_res
mkdir Run$i
cd Run$i
# Crear archivo de configuracion
echo "Config $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- $cst -- $css -- RUN $i"
array0=($iters_first_group $procs_parents $phy_dist)
array=("${array0[@]}")
array0=($iters $procs_sons $phy_dist)
array+=("${array0[@]}")
python3 $dir$execDir/./create_ini.py config$i.ini 1 $matrix_tam $comm_tam $N_qty $adr_perc $ibarrier_use $cst $css $time $proc_init "${array[@]}"
done
done
done
done
done #adr_perc
start_i=$(($j * ${#percs_array[@]} * ${#dist_array[@]} * ${#at_array[@]} * ${#cst_array[@]} * ${#css_array[@]}))
# LANZAR SCRIPT
echo $aux
sbatch -N $node_qty1 $dir$execDir./arrayRun.sh $dir$ResultsDir$name_res $start_i $procs_parents $procs_sons
j=$(($j + 1))
fi
done
done
echo "Localizacion de los resultados: $dir$ResultsDir$name_res"
echo "END TEST"
#!/bin/bash
partition="P1"
exclude="c00,c01,c02"
# Runs in a given current directory all .ini files with the aid of the RMS
# Parameter 1(Optional) - Amount of executions per file. Must be a positive number
# Parameter 2(Optional) - Maximum amount of time in seconds needed by a single execution. Default value is 0, which indicates infinite time. Must be a positive integer.
#====== Do not modify these values =======
scriptDir="$(dirname "$0")"
source $scriptDir/../Codes/build/config.txt
cores=$(bash $PROTEO_HOME$execDir/BashScripts/getCores.sh $partition)
use_extrae=0
qty=1
if [ $# -ge 1 ]
then
qty=$1
fi
limit_time=$((0))
if [ $# -ge 2 ] #Max time per execution in seconds
then
limit_time=$(($2 * $qty / 60 + 1))
fi
files="./*.ini"
internalIndex=$(echo $files | tr -cd ' ' | wc -c)
index=$((0))
for config_file in $files
do
node_qty=$(bash $PROTEO_HOME$execDir/BashScripts/getMaxNodesNeeded.sh $config_file $cores)
outFileIndex=$(echo $config_file | sed s/[^0-9]//g)
if [[ $outFileIndex ]]; then
index=$outFileIndex
else
index=$internalIndex
((internalIndex++))
fi
#Execute test
echo "Execute job $index with Nodes=$node_qty and config_file=$config_file"
sbatch -p $partition --exclude=$exclude -N $node_qty -t $limit_time $PROTEO_HOME$execDir/generalRun.sh $cores $config_file $use_extrae $index $qty
done
echo "End"
#!/bin/bash #!/bin/bash
#SBATCH --exclude=c02,c01,c00 partition="P1"
#SBATCH -p P1 exclude="c00,c01,c02"
dir="/home/martini/malleability_benchmark" # Executes a given configuration file with the aid of
codeDir="/Codes" # the RMS Slurm.
ResultsDir="/Results" # Parameter 1: Configuration file name for the emulation.
# Parameter 2(Optional): Index to use for the output files. Must be a positive integer.
# Parameter 3(Optional): Number of repetitions to perform. Must be a positive integer.
# Parameter 4(Optional): Use Valgrind(1), Extrae(2) or nothing(0).
# Parameter 5(Optional): Maximum amount of time in seconds needed by a single execution. Default value is 0, which indicates infinite time. Must be a positive integer.
# Parameter 6(Optional): Path where the output files should be saved.
#====== Do not modify these values =======
nodelist=$SLURM_JOB_NODELIST scriptDir="$(dirname "$0")"
nodes=$SLURM_JOB_NUM_NODES source $scriptDir/../Codes/build/config.txt
cores=$(bash $PROTEO_HOME$execDir/BashScripts/getCores.sh $partition)
module load mpich-3.4.1-noucx if [ $# -lt 1 ]
echo "START TEST" then
echo "Not enough arguments. Usage:"
echo "bash singleRun.sh config.ini [outFileIndex] [Qty] [Use extrae] [Output path]"
exit 1
fi
#$1 == configFile #$1 == configFile
#$2 == outFileIndex #$2 == outFileIndex
#$3 == cantidad de ejecuciones #$3 == Qty of repetitions
#$4 == Use external NO(0) Valgrind(1), Extrae(2)
#$5 == Max time per execution(s)
#$6 == Output path
if [ $# -gt 2 ] config_file=$1
outFileIndex=0
qty=1
use_external=0
if [ $# -ge 2 ]
then
outFileIndex=$2
fi
if [ $# -ge 3 ]
then then
qty=$3 qty=$3
else fi
qty=1 if [ $# -ge 4 ]
then
use_external=$4
fi
limit_time=$((0))
if [ $# -ge 5 ] #Max time per execution in seconds
then
limit_time=$(($5 * $qty / 60 + 1))
fi
if [ $# -ge 6 ]
then
output=$6
fi fi
for ((i=0; i<qty; i++)) #Obtain amount of nodes neeeded
do node_qty=$(bash $PROTEO_HOME$execDir/BashScripts/getMaxNodesNeeded.sh $config_file $cores)
echo "Iter $i" #Run with the expected amount of nodes
numP=$(bash $dir$codeDir/recordMachinefile.sh $1) sbatch -p $partition --exclude=$exclude -N $node_qty -t $limit_time $PROTEO_HOME$execDir/generalRun.sh $cores $config_file $use_external $outFileIndex $qty
mpirun -f hostfile.o$SLURM_JOB_ID $dir$codeDir/bench.out $1 $2 $nodelist $nodes
rm hostfile.o$SLURM_JOB_ID
done
echo "END TEST" if ! [ -z "$output" ]
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out then
mkdir -p $output
echo "Moving data to $output\nMoved files:"
ls R${outFileIndex}_G*
mv R${outFileIndex}_G* $output
if [ "$use_external" -eq 2 ] # Extrae additional output
then
mv a.out.* $output
mv TRACE* $output
mv set-0/ $output
elif [ "$use_external" -eq 1 ] # Valgrind additional output
then
mv vg.* $output
fi
fi
#!/bin/bash
cores=20
# Executes a given configuration file. This script can be called with Slurm commands to
# choose the desired user configuration.
# Parameter 1: Configuration file name for the emulation.
# Parameter 2(Optional): Index to use for the output files. Must be a positive integer.
# Parameter 3(Optional): Number of repetitions to perform. Must be a positive integer.
# Parameter 4(Optional): Use Valgrind(1), Extrae(2) or nothing(0).
# Parameter 5(Optional): Path where the output files should be saved.
#====== Do not modify these values =======
scriptDir="$(dirname "$0")"
source $scriptDir/../Codes/build/config.txt
if [ $# -lt 1 ]
then
echo "Not enough arguments. Usage:"
echo "singleRunCostum.sh config.ini [outFileIndex] [Qty] [Use Extrae] [Output path]"
exit 1
fi
#$1 == configFile
#$2 == outFileIndex
#$3 == Qty of repetitions
#$4 == Use external NO(0) Valgrind(1), Extrae(2)
#$5 == Output path
config_file=$1
outFileIndex=0
qty=1
use_external=0
if [ $# -ge 2 ]
then
outFileIndex=$2
fi
if [ $# -ge 3 ]
then
qty=$3
fi
if [ $# -ge 4 ]
then
use_external=$4
fi
if [ $# -ge 5 ]
then
output=$5
fi
bash $PROTEO_HOME$execDir/generalRunCostum.sh $cores $config_file $use_external $outFileIndex $qty
if ! [ -z "$output" ]
then
mkdir -p $output
echo "Moving data to $output\nMoved files:"
ls R${outFileIndex}_G*
mv R${outFileIndex}_G* $output
if [ "$use_external" -eq 2 ] # Extrae additional output
then
mv a.out.* $output
mv TRACE* $output
mv set-0/ $output
elif [ "$use_external" -eq 1 ] # Valgrind additional output
then
mv vg.* $output
fi
fi
No preview for this file type
# malleability_benchmark # Proteo - Dev branch
Benchmark for a MPI malleable application ## Overview
\ No newline at end of file This branch contains the codebase used for Proteo developing branch.
## Branch Structure
This branch is divided into the following 4 directories:
- **Analysis**: Contains the scripts and notebook to perform analysis of Proteo executions.
- **Codes**: Contains all the codes used to compile Proteo.
- **Exec**: Contains the scripts to execute Proteo in different ways and check if the runs have completed successfully.
- **Results**: Contains the configuration files used to emulate the malleable emulation of the CG.
## Installation
### Prerequisites
Before installing, ensure you have the following prerequisites:
- MPI (MPICH) installed on your system. This code has been tested with MPICH versions 3.4.1 and 4.0.3 with the OFI netmod.
- Slurm is installed on your system. This code has been tested with slurm-wlm 19.05.5.
The following requisites are optional and only needed to process and analyse the data:
- Python 3(Optional). Only if you want to perform the post-mortem processing or analyse the data.
- Numpy 1.24.3(Optional). Only if you want to perform the post-mortem processing or analyse the data.
- Pandas 1.5.3(Optional). Only if you want to perform the post-mortem processing or analyse the data.
- Seaborn 0.12.2(Optional). Only if you want to analyse the data.
- Matplotlib 3.7.1(Optional). Only if you want to analyse the data.
- Scipy 1.10.1(Optional). Only if you want to analyse the data.
- scikit-posthocs 0.7.0(Optional). Only if you want to analyse the data.
### Steps
1. Clone the repository to your local machine:
```bash
$ git clone http://lorca.act.uji.es/gitlab/martini/malleability_benchmark.git
$ cd malleability_benchmark
$ git checkout JournalSupercomputing23/24
```
2. Compile the code using the `make` command:
```bash
$ cd Codes/
$ make
```
This command compiles the code using the MPI (MPICH) library.
3. Test the installation:
```bash
$ cd ../Results
$ bash ../Exec/singleRun.sh test.ini
```
This test launches an Slurm Job with a basic configuration file that performs a reconfiguration from 10 to 2 processes.
As soon as it ends, 4 files will appear, one is the slurm output, and the other 3 are Proteo's output.
Example of a successful run with expected output:
```bash
$ ls
R0_G0NP10ID0.out R0_G1NP2ID0.out R0_Global.out slurm-X.out
$ bash ../Exec/CheckRun.sh test 1 1 4 2 2 100
Number of G(2) and L(2) files match
SUCCESS
```
The slurm-X.out is the output produced by the Job, while the files beggining with an "R" are the output of Proteo, and their description can be found in the manual from this branch.
Lastly, the script Checkrun.sh indicates wether the execution has been performed correctly or not. The value should be SUCCESS or REPEATING, in either case Proteo has been compiled correctly. If the value is FAILURE, a major error appeared and it is recommended to contact the code mantainer.
### Clean Up
To clean the installation and remove compiled binaries, use:
```bash
$ make clean
```
grep - */R* | grep Tex > errores.txt
grep == */slurm* > errores2.txt
Para crear comprimido:
tar -czf Datos.tar.gz 1N-06-14/
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment