Commit 93ff2656 authored by iker_martin's avatar iker_martin
Browse files

More updates to exec scripts

parent a5e8654f
......@@ -10,7 +10,12 @@ codeDir="/Codes"
nodelist=$SLURM_JOB_NODELIST
nodes=$SLURM_JOB_NUM_NODES
configFile=$1
outIndex=$2
outIndex=0
if [ $# -ge 2 ]
then
outIndex=$2
fi
echo "MPICH"
#module load mpich-3.4.1-noucx
......@@ -23,7 +28,7 @@ numP=$(head -$fin $configFile | tail -$diff | cut -d ';' -f1 | grep Procs | cut
ls /home/martini/malleability_benchmark/Codes/build/a.out
echo "Test PreRUN $numP"
echo "Test PreRUN $numP $nodes"
mpirun -np $numP $dir$codeDir/build/a.out $configFile $outIndex $nodelist $nodes
echo "END RUN"
......
......@@ -7,8 +7,8 @@
dir="/home/martini/malleability_benchmark"
codeDir="/Codes"
nodelist=$SLURM_JOB_NODELIST
nodes=$SLURM_JOB_NUM_NODES
nodelist="localhost"
nodes=1
configFile=$1
outIndex=$2
......@@ -21,7 +21,7 @@ read -r ini fin <<<$(echo $aux)
diff=$(( fin - ini ))
numP=$(head -$fin $1 | tail -$diff | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
mpirun -np $numP valgrind --leak-check=full --show-leak-kinds=all --log-file=nc.vg.%p $dir$codeDir/build/a.out $configFile $outIndex $nodelist $nodes
mpirun -np $numP valgrind --leak-check=full --show-leak-kinds=all --trace-children=yes --log-file=nc.vg.%p $dir$codeDir/build/a.out $configFile $outIndex $nodelist $nodes
echo "END RUN"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
#!/bin/bash
#SBATCH --exclude=c02,c01,c00
#SBATCH -p P1
dir="/home/martini/malleability_benchmark"
codeDir="/Codes"
ResultsDir="/Results"
nodelist=$SLURM_JOB_NODELIST
nodes=$SLURM_JOB_NUM_NODES
module load mpich-3.4.1-noucx
name_dir=$1
i=$2
procs_parents=$3
procs_sons=$4
#percs_array=(0 25 50 75 100)
percs_array=(0)
at_array=(0)
dist_array=(cpu)
cst_array=(0 1 2 3)
css_array=(0 1)
aux=$(($i + 1))
echo "START TEST init=$aux"
for adr_perc in "${percs_array[@]}"
do
for phy_dist in "${dist_array[@]}"
do
for ibarrier_use in "${at_array[@]}"
do
for cst in "${cst_array[@]}"
do
for css in "${css_array[@]}"
do
i=$(($i + 1))
cd $name_dir/Run$i
configFile="config$i.ini"
aux=$(grep "\[resize0\]" -n $configFile | cut -d ":" -f1)
read -r ini fin <<<$(echo $aux)
diff=$(( fin - ini ))
numP=$(head -$fin $configFile | tail -$diff | cut -d ';' -f1 | grep Procs | cut -d '=' -f2)
echo "EXEC $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- $cst -- $css -- RUN $i"
for index in 1 2 3 4 5 6 7 8 9 10
do
mpirun -f hostfile.o$SLURM_JOB_ID $dir$codeDir/./bench.out $configFile $i $nodelist $nodes
rm hostfile.o$SLURM_JOB_ID
done
done
done
done
done
done
echo "END TEST"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
import sys
import glob
def general(f, resizes, matrix_tam, comm_tam, sdr, adr, aib, cst, css, time):
f.write("[general]\n")
f.write("resizes=" + resizes +"\n")
f.write("matrix_tam=" + matrix_tam +"\n")
f.write("comm_tam=" + comm_tam +"\n")
f.write("SDR=" + sdr +"\n")
f.write("ADR=" + adr +"\n")
f.write("AIB=" + aib +"\n")
f.write("CST=" + cst +"\n")
f.write("CSS=" + css +"\n")
f.write("time=" + time +"\n")
f.write("; end [general]\n")
def resize_section(f, resize, iters, procs, factor, physical_dist):
f.write("[resize" + resize + "]\n")
f.write("iters=" + iters +"\n")
f.write("procs=" + procs +"\n")
f.write("factor=" + factor +"\n")
f.write("physical_dist=" + physical_dist +"\n")
f.write(";end [resize" + resize +"]\n")
if len(sys.argv) < 2:
print("The config file name is missing\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR AIB CST CSS time iters0 procs0 dist0 iters1 procs1 dist1 ...")
exit(1)
if len(sys.argv) < 12:
print("The are not enough arguments\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR_perc AIB CST CSS time proc_time iters0 procs0 dist0 iters1 procs1 dist1 ...")
exit(1)
name = sys.argv[1]
resizes = int(sys.argv[2])
matrix_tam = sys.argv[3]
comm_tam = sys.argv[4]
sdr = int(sys.argv[5])
adr_perc = float(sys.argv[6])
aib = sys.argv[7]
cst = sys.argv[8]
css = sys.argv[9]
time = sys.argv[10]
proc_time = float(sys.argv[11]) # Usado para calcular el factor de cada proceso
adr = (sdr * adr_perc) / 100
sdr = sdr - adr
adr = str(adr)
sdr = str(sdr)
factor = 0
f = open(name, "w")
general(f, str(resizes), matrix_tam, comm_tam, sdr, adr, aib, cst, css, time)
resizes = resizes + 1 # Internamente, los primeros procesos se muestran como un grupo
for resize in range(resizes):
iters = sys.argv[12 + 3 * resize]
procs = sys.argv[12 + 3 * resize + 1]
physical_dist = sys.argv[12 + 3 * resize + 2]
if proc_time != 0: # Si el argumento proc_time es 0, todos los grupos tienen un factor de 1
factor = proc_time / float(procs)
if proc_time != int(procs):
factor = factor/0.85 # Para reducir la escalabilidad por un porcentaje
else:
factor = 1
resize_section(f, str(resize), iters, procs, str(factor), physical_dist)
f.close()
exit(1)
#!/bin/bash
dir="/home/martini/malleability_benchmark/"
codeDir="Codes/"
execDir="Exec/"
ResultsDir="Results/"
#TODO Añadir diferenciar phy_dist de padres e hijos al ejecutar
#TODO Añadir que se considere la cantidad de nucleos de un nodo y no este fijada
if [[ $# -lt 9 ]]
then
echo "Faltan argumentos"
echo "bash run.sh grupos tam_computo tam_comm tam_resize tiempo proc_init iters first_iter node_qty"
exit -1
fi
echo "START TEST"
groups=$1 #TODO Modificar para que admita más de dos grupos de procesos
matrix_tam=$2
comm_tam=$3
N_qty=$4 # Datos a redistribuir
time=$5
proc_init=$6 #El tiempo por iteracion es para esta cantidad de procesos
iters=$7
first_iter=$8
node_qty=$9
# Si el valor es 0 el primer grupo de procesos realiza $iters iteraciones antes de redimensionar
if [[ $first_iter -eq 0 ]]
then
iters_first_group=$iters
# Si el valor es diferente a 0, el primer grupo de procesos realiza $first_iter iteraciones antes de redimensionar
else
iters_first_group=$first_iter
fi
max_procs=$(($node_qty * 20))
procs_array=(1 10)
#percs_array=(0 25 50 75 100)
percs_array=(0)
at_array=(0)
dist_array=(cpu)
cst_array=(0 1 2 3)
css_array=(0 1)
#Obtener cantidades de procesos posibles a ejecutar
i=0
#while [[ $value -lt $max_procs ]]
#do
# i=$(($i + 1))
# value=$((20 * $i))
# procs_array=(${procs_array[@]} $value)
#done
i=0
while [[ $value -lt $max_procs ]]
do
i=$(($i + 1))
value=$((2 ** $i))
value=$(($value * 10))
procs_array=(${procs_array[@]} $value)
done
i=$(($i + 1))
procs_array[$i]=120
#Crear carpeta de resultados
cd $dir$ResultsDir
name_res=$node_qty"N-"$(date '+%m-%d')
if [ -d $name_res ] # Si ya existe el directorio, modificar levemente el nombre y crear otro
then
name_res=$name_res"-"$(date '+%H:%M')
fi
echo "Localizacion de los resultados: $dir$ResultsDir$name_res"
mkdir $name_res
# Ejecutar pruebas
i=0
j=0
for procs_parents in "${procs_array[@]}"
do
node_qty1=$(($procs_parents / 20))
for procs_sons in "${procs_array[@]}"
do
node_qty2=$(($procs_sons / 20))
if [ $node_qty1 -lt $node_qty2 ]
then
node_qty1=$node_qty2
fi
if [ $node_qty1 -eq 0 ]
then
node_qty1=1
fi
if [ $procs_parents -ne $procs_sons ]
then
for adr_perc in "${percs_array[@]}"
do
for phy_dist in "${dist_array[@]}"
do
for ibarrier_use in "${at_array[@]}"
do
for cst in "${cst_array[@]}"
do
for css in "${css_array[@]}"
do
i=$(($i + 1))
# Crear directorio para esta ejecucion
cd $dir$ResultsDir$name_res
mkdir Run$i
cd Run$i
# Crear archivo de configuracion
echo "Config $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- $cst -- $css -- RUN $i"
array0=($iters_first_group $procs_parents $phy_dist)
array=("${array0[@]}")
array0=($iters $procs_sons $phy_dist)
array+=("${array0[@]}")
python3 $dir$execDir/./create_ini.py config$i.ini 1 $matrix_tam $comm_tam $N_qty $adr_perc $ibarrier_use $cst $css $time $proc_init "${array[@]}"
done
done
done
done
done #adr_perc
start_i=$(($j * ${#percs_array[@]} * ${#dist_array[@]} * ${#at_array[@]} * ${#cst_array[@]} * ${#css_array[@]}))
# LANZAR SCRIPT
echo $aux
sbatch -N $node_qty1 $dir$execDir./arrayRun.sh $dir$ResultsDir$name_res $start_i $procs_parents $procs_sons
j=$(($j + 1))
fi
done
done
echo "Localizacion de los resultados: $dir$ResultsDir$name_res"
echo "END TEST"
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment