Commit b6569593 authored by iker_martin's avatar iker_martin
Browse files

Integracion completa de la creacion de procesos para utilizar el metodo merge....

Integracion completa de la creacion de procesos para utilizar el metodo merge. Se han modificado los lanzamientos de simulaciones en consecuencia.
parent 635dfb14
......@@ -139,7 +139,7 @@ int main(int argc, char *argv[]) {
}
free_application_data();
if(group->myId == ROOT) MPI_Abort(MPI_COMM_WORLD, 0);
if(group->myId == ROOT) MPI_Abort(MPI_COMM_WORLD, -100);
MPI_Finalize();
return 0;
......
#!/bin/bash
#SBATCH -N 1
#SBATCH --exclude=c01,c00
dir="/home/martini/malleability_benchmark"
codeDir="/Codes"
......@@ -13,7 +14,8 @@ module load mpich-3.4.1-noucx
numP=$(bash recordMachinefile.sh $1)
#mpirun -f hostfile.o$SLURM_JOB_ID ./a.out $1 $2
mpirun -f hostfile.o$SLURM_JOB_ID $dir$codeDir/a.out $1 $2
mpirun -print-all-exitcodes -f hostfile.o$SLURM_JOB_ID $dir$codeDir/a.out $1 $2
rm hostfile.o$SLURM_JOB_ID
echo "END RUN"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
......@@ -13,30 +13,41 @@ procs_parents=$3
procs_sons=$4
#percs_array=(0 25 50 75 100)
percs_array=(0)
at_array=(0)
dist_array=(cpu)
cst_array=(0 1 2 3)
css_array=(0 1)
aux=$(($i + 1))
echo "START TEST init=$aux"
for adr_perc in "${percs_array[@]}"
do
for phy_dist in cpu node
for phy_dist in "${dist_array[@]}"
do
for ibarrier_use in 3 #TODO Simplificar
for ibarrier_use in "${at_array[@]}"
do
i=$(($i + 1))
cd $name_dir/Run$i
config_file="config$i.ini"
for cst in "${cst_array[@]}"
do
for css in "${css_array[@]}"
do
echo "EXEC $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- RUN $i"
i=$(($i + 1))
cd $name_dir/Run$i
config_file="config$i.ini"
for index in 1 2 3
do
numP=$(bash $dir$codeDir/recordMachinefile.sh $config_file) # Crea el fichero hostfile
mpirun -f hostfile.o$SLURM_JOB_ID $dir$codeDir/./bench.out $config_file $i
rm hostfile.o$SLURM_JOB_ID
echo "EXEC $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- $cst -- $css -- RUN $i"
for index in 1 2 3 4 5 6 7 8 9 10
do
numP=$(bash $dir$codeDir/recordMachinefile.sh $config_file) # Crea el fichero hostfile
mpirun -f hostfile.o$SLURM_JOB_ID $dir$codeDir/./bench.out $config_file $i
rm hostfile.o$SLURM_JOB_ID
done
done
done
done
done
done
echo "END TEST"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
import sys
import glob
def general(f, resizes, matrix_tam, comm_tam, sdr, adr, aib, time):
def general(f, resizes, matrix_tam, comm_tam, sdr, adr, aib, cst, css, time):
f.write("[general]\n")
f.write("resizes=" + resizes +"\n")
f.write("matrix_tam=" + matrix_tam +"\n")
......@@ -9,6 +9,8 @@ def general(f, resizes, matrix_tam, comm_tam, sdr, adr, aib, time):
f.write("SDR=" + sdr +"\n")
f.write("ADR=" + adr +"\n")
f.write("AIB=" + aib +"\n")
f.write("CST=" + cst +"\n")
f.write("CSS=" + css +"\n")
f.write("time=" + time +"\n")
f.write("; end [general]\n")
......@@ -21,11 +23,11 @@ def resize_section(f, resize, iters, procs, factor, physical_dist):
f.write(";end [resize" + resize +"]\n")
if len(sys.argv) < 2:
print("The config file name is missing\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR AIB time iters0 procs0 dist0 iters1 procs1 dist1 ...")
print("The config file name is missing\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR AIB CST CSS time iters0 procs0 dist0 iters1 procs1 dist1 ...")
exit(1)
if len(sys.argv) < 12:
print("The are not enough arguments\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR_perc AIB time proc_time iters0 procs0 dist0 iters1 procs1 dist1 ...")
print("The are not enough arguments\nUsage: python3 program nameFile args\nArgs: resizes matrix_tam SDR ADR_perc AIB CST CSS time proc_time iters0 procs0 dist0 iters1 procs1 dist1 ...")
exit(1)
name = sys.argv[1]
......@@ -35,8 +37,10 @@ comm_tam = sys.argv[4]
sdr = int(sys.argv[5])
adr_perc = float(sys.argv[6])
aib = sys.argv[7]
time = sys.argv[8]
proc_time = float(sys.argv[9]) # Usado para calcular el factor de cada proceso
cst = sys.argv[8]
css = sys.argv[9]
time = sys.argv[10]
proc_time = float(sys.argv[11]) # Usado para calcular el factor de cada proceso
adr = (sdr * adr_perc) / 100
sdr = sdr - adr
......@@ -46,13 +50,13 @@ sdr = str(sdr)
factor = 0
f = open(name, "w")
general(f, str(resizes), matrix_tam, comm_tam, sdr, adr, aib, time)
general(f, str(resizes), matrix_tam, comm_tam, sdr, adr, aib, cst, css, time)
resizes = resizes + 1 # Internamente, los primeros procesos se muestran como un grupo
for resize in range(resizes):
iters = sys.argv[10 + 3 * resize]
procs = sys.argv[10 + 3 * resize + 1]
physical_dist = sys.argv[10 + 3 * resize + 2]
iters = sys.argv[12 + 3 * resize]
procs = sys.argv[12 + 3 * resize + 1]
physical_dist = sys.argv[12 + 3 * resize + 2]
if proc_time != 0: # Si el argumento proc_time es 0, todos los grupos tienen un factor de 1
factor = proc_time / float(procs)
......
......@@ -36,10 +36,13 @@ else
iters_first_group=$first_iter
fi
max_procs=$(($node_qty * 20))
procs_array=(2 10)
procs_array=(1 10)
#percs_array=(0 25 50 75 100)
percs_array=(0)
at_array=(3)
at_array=(0)
dist_array=(cpu)
cst_array=(0 1 2 3)
css_array=(0 1)
#Obtener cantidades de procesos posibles a ejecutar
i=0
......@@ -66,39 +69,57 @@ i=0
j=0
for procs_parents in "${procs_array[@]}"
do
node_qty1=$(($procs_parents / 20))
for procs_sons in "${procs_array[@]}"
do
for adr_perc in "${percs_array[@]}"
do
for phy_dist in cpu node
node_qty2=$(($procs_sons / 20))
if [ $node_qty1 -lt $node_qty2 ]
then
node_qty1=$node_qty2
fi
if [ $node_qty1 -eq 0 ]
then
node_qty1=1
fi
if [ $procs_parents -ne $procs_sons ]
then
for adr_perc in "${percs_array[@]}"
do
for ibarrier_use in "${at_array[@]}"
for phy_dist in "${dist_array[@]}"
do
i=$(($i + 1))
# Crear directorio para esta ejecucion
cd $dir$ResultsDir$name_res
mkdir Run$i
cd Run$i
# Crear archivo de configuracion
echo "Config $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- RUN $i"
array0=($iters_first_group $procs_parents $phy_dist)
array=("${array0[@]}")
array0=($iters $procs_sons $phy_dist)
array+=("${array0[@]}")
python3 $dir$execDir/./create_ini.py config$i.ini 1 $matrix_tam $comm_tam $N_qty $adr_perc $ibarrier_use $time $proc_init "${array[@]}"
for ibarrier_use in "${at_array[@]}"
do
for cst in "${cst_array[@]}"
do
for css in "${css_array[@]}"
do
i=$(($i + 1))
# Crear directorio para esta ejecucion
cd $dir$ResultsDir$name_res
mkdir Run$i
cd Run$i
# Crear archivo de configuracion
echo "Config $procs_parents -- $procs_sons -- $adr_perc -- $ibarrier_use -- $phy_dist -- $cst -- $css -- RUN $i"
array0=($iters_first_group $procs_parents $phy_dist)
array=("${array0[@]}")
array0=($iters $procs_sons $phy_dist)
array+=("${array0[@]}")
python3 $dir$execDir/./create_ini.py config$i.ini 1 $matrix_tam $comm_tam $N_qty $adr_perc $ibarrier_use $cst $css $time $proc_init "${array[@]}"
done
done
done
done
done
done
start_i=$(($j * ${#percs_array[@]} * ${#at_array[@]} * 2)) #TODO modficar utlimo valor conforme cambie phy_dist
done #adr_perc
start_i=$(($j * ${#percs_array[@]} * ${#dist_array[@]} * ${#at_array[@]} * ${#cst_array[@]} * ${#css_array[@]}))
# LANZAR SCRIPT
echo $aux
sbatch -N $node_qty $dir$execDir./arrayRun.sh $dir$ResultsDir$name_res $start_i $procs_parents $procs_sons
sbatch -N $node_qty1 $dir$execDir./arrayRun.sh $dir$ResultsDir$name_res $start_i $procs_parents $procs_sons
j=$(($j + 1))
fi
done
done
......
......@@ -29,3 +29,4 @@ do
done
echo "END TEST"
sed -i 's/application called MPI_Abort(MPI_COMM_WORLD, -100) - process/shrink cleaning/g' slurm-$SLURM_JOB_ID.out
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment