Commit 5eb9cddd authored by Iker Martín's avatar Iker Martín
Browse files

Version 2 de aplicación

parent 2663ec23
This diff is collapsed.
This diff is collapsed.
import sys
import glob
import numpy as np
import pandas as pd
def getData(lineS, outData, tp, hasIter = False):
for data in lineS:
k_v = data.split('=')
if k_v[0] == "time":
time = float(k_v[1])
elif k_v[0] == "iters" and hasIter:
iters = int(k_v[1])
outData[tp] = time
if hasIter:
outData[tp+1] = iters
#-----------------------------------------------
def record(f, observation, line):
# Record first line - General info
lineS = line.split()
for j in range(1,7):
observation[j] = int(lineS[j].split('=')[1])
# Record procces number
line = next(f)
lineS = line.split()
j = 7
for key_values in lineS:
k_v = key_values.split('=')
observation[j] = int(k_v[1])
j+=1
# Record data
j = 9
for j in range(9, 13):
line = next(f)
lineS = line.split()
getData(lineS, observation, j)
line = next(f)
lineS = line.split()
#if observation[0] == "A":
getData(lineS, observation, 13, True)
#else:
# getData(lineS, observation, 13)
#-----------------------------------------------
def read_file(f, dataA, dataB, it):
recording = False
resizes = 0
timer = 0
previousNP = 0
for line in f:
lineS = line.split()
if len(lineS) > 0:
if lineS[0] == "Config": # CONFIG LINE
recording = True
it += 1
dataA.append([None]*8)
dataB.append([None]*11)
resizes = int(lineS[2].split('=')[1].split(',')[0])
matrix = int(lineS[3].split('=')[1].split(',')[0])
sdr = int(lineS[4].split('=')[1].split(',')[0])
adr = int(lineS[5].split('=')[1].split(',')[0]) #TODO Que lo tome como porcentaje
time = float(lineS[7].split('=')[1])
dataB[it][5] = matrix
dataB[it][0] = sdr
dataB[it][1] = adr
dataB[it][6] = time
dataB[it][4] = ""
dataA[it][4] = matrix
dataA[it][0] = sdr
dataA[it][1] = adr
dataA[it][5] = time
dataA[it][3] = ""
elif recording and resizes != 0: # RESIZE LINE
iters = int(lineS[2].split('=')[1].split(',')[0])
npr = int(lineS[3].split('=')[1].split(',')[0])
dist = lineS[5].split('=')[1]
dataB[it][7] = iters
dataA[it][6] = iters
resizes = resizes - 1
if resizes == 0:
dataB[it][3] = npr
dataB[it][4] += dist
dataA[it][3] += dist
dataA[it][2] = str(previousNP) + "," + str(npr)
timer = 3
else:
dataB[it][2] = npr
dataB[it][4] += dist + ","
dataA[it][3] += dist + ","
previousNP = npr
else: # SAVE TIMES
if timer == 3:
dataB[it][8] = float(lineS[1])
elif timer == 2:
dataB[it][9] = float(lineS[1])
elif timer == 1:
dataB[it][10] = float(lineS[1])
else:
dataA[it][7] = float(lineS[1])
timer = timer - 1
return it
#columnsA1 = ["N", "%Async", "Groups", "Dist", "Matrix", "Time", "Iters", "TE"] #7
#columnsB1 = ["N", "%Async", "NP", "NS", "Dist", "Matrix", "Time", "Iters", "TC", "TS", "TA"] #10
#Config loaded: resizes=2, matrix=1000, sdr=1000000000, adr=0, aib=0, time=2.000000 || grp=1
#Resize 0: Iters=100, Procs=2, Factors=1.000000, Phy=2
#Resize 1: Iters=100, Procs=4, Factors=0.500000, Phy=2
#Tspawn: 0.249393
#Tsync: 0.330391
#Tasync: 0
#Tex: 301.428615
#-----------------------------------------------
if len(sys.argv) < 2:
print("The files name is missing\nUsage: python3 iterTimes.py resultsName directory csvOutName")
exit(1)
if len(sys.argv) >= 3:
BaseDir = sys.argv[2]
print("Searching in directory: "+ BaseDir)
else:
BaseDir = sys.argv[2]
if len(sys.argv) >= 4:
print("Csv name will be: " + sys.argv[3] + ".csv")
name = sys.argv[3]
else:
name = "data"
insideDir = "Run"
lista = glob.glob("./" + BaseDir + insideDir + "*/" + sys.argv[1]+ "*Global.o*")
print("Number of files found: "+ str(len(lista)));
it = -1
dataA = []
dataB = []
columnsA = ["N", "%Async", "Groups", "Dist", "Matrix", "Time", "Iters", "TE"] #7
columnsB = ["N", "%Async", "NP", "NS", "Dist", "Matrix", "Time", "Iters", "TC", "TS", "TA"] #10
for elem in lista:
f = open(elem, "r")
it = read_file(f, dataA, dataB, it)
f.close()
#print(data)
dfA = pd.DataFrame(dataA, columns=columnsA)
dfA.to_csv(name + '_G.csv')
dfB = pd.DataFrame(dataB, columns=columnsB)
dfB.to_csv(name + '_M.csv')
import sys
import glob
import numpy as np
import pandas as pd
#-----------------------------------------------
def read_file(f, data, it):
matrix = 0
sdr = 0
adr = 0
time = 0
recording = False
it_line = 0
aux_it = 0
iters = 0
np = 0
np_par = 0
ns = 0
for line in f:
lineS = line.split()
if len(lineS) > 1:
if recording:
aux_it = 0
if it_line==0:
lineS.pop(0)
for observation in lineS:
data.append([None]*11)
data[it+aux_it][0] = sdr
data[it+aux_it][1] = adr
data[it+aux_it][2] = np
data[it+aux_it][3] = np_par
data[it+aux_it][4] = ns
data[it+aux_it][5] = matrix
data[it+aux_it][6] = time
data[it+aux_it][7] = iters
data[it+aux_it][8] = float(observation)
aux_it+=1
it_line = it_line + 1
elif it_line==1:
lineS.pop(0)
for observation in lineS:
data[it+aux_it][9] = float(observation)
aux_it+=1
it_line = it_line + 1
else:
lineS.pop(0)
for observation in lineS:
data[it+aux_it][10] = float(observation)
aux_it+=1
it = it + aux_it
recording = False
it_line = 0
#TODO Que tome adr como porcentaje
if lineS[0] == "Config:":
matrix = int(lineS[1].split('=')[1].split(',')[0])
sdr = int(lineS[2].split('=')[1].split(',')[0])
adr = int(lineS[3].split('=')[1].split(',')[0])
time = float(lineS[5].split('=')[1])
elif lineS[0] == "Config":
recording = True
iters = int(lineS[2].split('=')[1].split(',')[0])
np = int(lineS[5].split('=')[1].split(',')[0])
np_par = int(lineS[6].split('=')[1].split(',')[0])
ns = int(float(lineS[7].split('=')[1]))
return it
#-----------------------------------------------
#Config: matrix=1000, sdr=1000000000, adr=0, aib=0 time=2.000000
#Config Group: iters=100, factor=1.000000, phy=2, procs=2, parents=0, sons=4
#Ttype: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
if len(sys.argv) < 2:
print("The files name is missing\nUsage: python3 iterTimes.py resultsName directory csvOutName")
exit(1)
if len(sys.argv) >= 3:
BaseDir = sys.argv[2]
print("Searching in directory: "+ BaseDir)
else:
BaseDir = sys.argv[2]
if len(sys.argv) >= 4:
print("Csv name will be: " + sys.argv[3] + ".csv")
name = sys.argv[3]
else:
name = "data"
insideDir = "Run"
lista = glob.glob("./" + BaseDir + insideDir + "*/" + sys.argv[1]+ "*ID*.o*")
print("Number of files found: "+ str(len(lista)));
it = 0
data = [] #0 #1 #2 #3 #4 #5 #6 #7 #8 #9 #10
columns = ["N", "%Async", "NP", "N_par", "NS", "Matrix", "Time", "Iters", "Ti", "Tt", "To"] #11
for elem in lista:
f = open(elem, "r")
it = read_file(f, data, it)
f.close()
#print(data)
df = pd.DataFrame(data, columns=columns)
df.to_csv(name + '.csv')
import sys
import glob
import numpy as np
import pandas as pd
def getData(lineS, outData, tp, hasIter = False):
for data in lineS:
k_v = data.split('=')
if k_v[0] == "time":
time = float(k_v[1])
elif k_v[0] == "iters" and hasIter:
iters = int(k_v[1])
outData[tp] = time
if hasIter:
outData[tp+1] = iters
#-----------------------------------------------
def record(f, observation, line):
# Record first line - General info
lineS = line.split()
for j in range(1,7):
observation[j] = int(lineS[j].split('=')[1])
# Record procces number
line = next(f)
lineS = line.split()
j = 7
for key_values in lineS:
k_v = key_values.split('=')
observation[j] = int(k_v[1])
j+=1
# Record data
j = 9
for j in range(9, 13):
line = next(f)
lineS = line.split()
getData(lineS, observation, j)
line = next(f)
lineS = line.split()
#if observation[0] == "A":
getData(lineS, observation, 13, True)
#else:
# getData(lineS, observation, 13)
#-----------------------------------------------
def read_file(f, dataA, dataB, it):
recording = False
resizes = 0
timer = 0
previousNP = 0
for line in f:
lineS = line.split()
if len(lineS) > 0:
if lineS[0] == "Config": # CONFIG LINE
recording = True
it += 1
dataA.append([None]*13)
dataB.append([None]*15)
#resizes = int(lineS[2].split('=')[1].split(',')[0])
resizes = 2
compute_tam = int(lineS[3].split('=')[1].split(',')[0])
comm_tam = int(lineS[4].split('=')[1].split(',')[0])
sdr = int(lineS[5].split('=')[1].split(',')[0])
adr = int(lineS[6].split('=')[1].split(',')[0]) #TODO Que lo tome como porcentaje
css = int(lineS[8].split('=')[1].split(',')[0])
cst = int(lineS[9].split('=')[1].split(',')[0])
# TODO Que obtenga Aib
time = float(lineS[10].split('=')[1])
dataB[it][0] = sdr
dataB[it][1] = adr
dataB[it][4] = ""
dataB[it][5] = compute_tam
dataB[it][6] = comm_tam
dataB[it][7] = cst
dataB[it][8] = css
dataB[it][9] = time
dataB[it][10] = ""
dataA[it][0] = sdr
dataA[it][1] = adr
dataA[it][5] = ""
dataA[it][6] = compute_tam
dataA[it][7] = comm_tam
dataA[it][8] = cst
dataA[it][9] = css
dataA[it][10] = time
dataA[it][11] = ""
elif recording and resizes != 0: # RESIZE LINE
iters = int(lineS[2].split('=')[1].split(',')[0])
npr = int(lineS[3].split('=')[1].split(',')[0])
dist = lineS[5].split('=')[1]
resizes = resizes - 1
if resizes == 0:
dataB[it][3] = npr
dataB[it][4] += dist
dataB[it][10] += str(iters)
dataA[it][4] = npr #FIXME No sera correcta si hay mas de una reconfig
dataA[it][2] = str(previousNP) + "," + str(npr)
dataA[it][5] += dist
dataA[it][11] += str(iters)
timer = 4
else:
dataB[it][2] = npr
dataB[it][4] += dist + ","
dataB[it][10] += str(iters) + ","
dataA[it][3] = npr
dataA[it][5] += dist + ","
dataA[it][11] += str(iters) + ","
previousNP = npr
else: # SAVE TIMES
if timer == 4:
dataB[it][11] = float(lineS[1])
elif timer == 3:
dataB[it][12] = float(lineS[1])
elif timer == 2:
dataB[it][13] = float(lineS[1])
elif timer == 1:
dataB[it][14] = float(lineS[1])
else:
dataA[it][12] = float(lineS[1])
timer = timer - 1
return it
#columnsA1 = ["N", "%Async", "Groups", "Dist", "Matrix", "CommTam", "Cst", "Css", "Time", "Iters", "TE"] #8
#columnsB1 = ["N", "%Async", "NP", "NS", "Dist", "Matrix", "CommTam", "Cst", "Css", "Time", "Iters", "TC", "TS", "TA"] #12
#Config loaded: resizes=2, matrix=1000, sdr=1000000000, adr=0, aib=0, time=2.000000 || grp=1
#Resize 0: Iters=100, Procs=2, Factors=1.000000, Phy=2
#Resize 1: Iters=100, Procs=4, Factors=0.500000, Phy=2
#Tspawn: 0.249393
#Tthread: 0
#Tsync: 0.330391
#Tasync: 0
#Tex: 301.428615
#Config loaded: resizes=1, matrix=0, comm_tam=0, sdr=0, adr=0, aib=0, cst=3, css=1, time=1 || grp=1
#-----------------------------------------------
if len(sys.argv) < 2:
print("The files name is missing\nUsage: python3 iterTimes.py resultsName directory csvOutName")
exit(1)
if len(sys.argv) >= 3:
BaseDir = sys.argv[2]
print("Searching in directory: "+ BaseDir)
else:
BaseDir = sys.argv[2]
if len(sys.argv) >= 4:
print("Csv name will be: " + sys.argv[3] + "G.csv & " + sys.argv[3] + "M.csv")
name = sys.argv[3]
else:
name = "data"
insideDir = "Run"
lista = glob.glob("./" + BaseDir + insideDir + "*/" + sys.argv[1]+ "*Global.o*")
print("Number of files found: "+ str(len(lista)));
it = -1
dataA = []
dataB = []
columnsA = ["N", "%Async", "Groups", "NP", "NS", "Dist", "Matrix", "CommTam", "Cst", "Css", "Time", "Iters", "TE"] #13
columnsB = ["N", "%Async", "NP", "NS", "Dist", "Matrix", "CommTam", "Cst", "Css", "Time", "Iters", "TC", "TH", "TS", "TA"] #15
for elem in lista:
f = open(elem, "r")
it = read_file(f, dataA, dataB, it)
f.close()
#print(data)
dfA = pd.DataFrame(dataA, columns=columnsA)
dfA.to_csv(name + 'G.csv')
dfB = pd.DataFrame(dataB, columns=columnsB)
#Poner en TC el valor real y en TH el necesario para la app
cond = dfB.TH != 0
dfB.loc[cond, ['TC', 'TH']] = dfB.loc[cond, ['TH', 'TC']].values
dfB.to_csv(name + 'M.csv')
Esta carpeta contiene códigos para poder analizar los resultados obtenidos.
Para utilizar los códigos es necesario Python con los módulos Numpy y Pandas.
El código analyser.ipynb necesita además de la aplicación JupyterLab.
Los códigos son los siguientes:
- Malltimes.py: Recoge los tiempos globales de maleabilidad y ejecución de todos los ficheros pasados como argumento y
los almacena en dos ficheros CSV para ser utilizados en analyser.ipynb
- Itertimes.py: Recoge los tiempos locales de iteraciones de un grupo de procesos de todos los ficheros pasados como
argumento y los almacena en un fichero CSV para ser utilizado en analyser.ipynb
+ Ejemplo de uso de ambos códigos (Esperan los mismos argumentos):
python3 Malltimes.py NombreFicheros DirectorioFicheros/ NombreCSV
NombreFicheros: La parte común de los ficheros, los códigos buscan solo aquellos nombres que empiecen por esta cadena.
Por defecto, con poner "R" es suficiente.
DirectorioFicheros/: Nombre del directorio donde se encuentran todos los resultados. Esta pensado para que busque
en todos las subdirectorios que tenga en el primer nivel, pero no en segundos niveles o más.
NombreCSV: Nombre del fichero CSV en el que escribir la recopilación de resultados.
- analyser.ipynb: Código para ser ejecutado por JupyterNotebook. Dentro del mismo hay que indicar los nombres de los
ficheros CSV a analizar y tras ellos ejecutar las celdas. Como resultado se obtienen tres ficheros XLSX e imagenes
en el directorio "Images", y además varios resultados sobre T-test entre varios resultados que se reflejan como
output en la salida estandar de JupyterNotebook.
This diff is collapsed.
This diff is collapsed.
import sys
import glob
import numpy as numpy
import pandas as pd
#-----------------------------------------------
def read_file(f, dataA, dataB, itA, itB):
compute_tam = 0
comm_tam = 0
sdr = 0
adr = 0
dist = 0
css = 0
cst = 0
time = 0
recording = False
it_line = 0
aux_itA = 0
aux_itB = 0
iters = 0
np = 0
np_par = 0
ns = 0
array = []
columnas = ['Titer','Ttype','Top']
#print(f)
for line in f:
lineS = line.split()
if len(lineS) > 1:
if recording and lineS[0].split(':')[0] in columnas: #Record data
aux_itA = 0
lineS.pop(0)
if it_line==0:
for observation in lineS:
dataA.append([None]*15)
dataA[itA+aux_itA][0] = sdr
dataA[itA+aux_itA][1] = adr
dataA[itA+aux_itA][2] = np
dataA[itA+aux_itA][3] = np_par
dataA[itA+aux_itA][4] = ns
dataA[itA+aux_itA][5] = dist
dataA[itA+aux_itA][6] = compute_tam
dataA[itA+aux_itA][7] = comm_tam
dataA[itA+aux_itA][8] = cst
dataA[itA+aux_itA][9] = css
dataA[itA+aux_itA][10] = time
dataA[itA+aux_itA][11] = iters
dataA[itA+aux_itA][12] = float(observation)
array.append(float(observation))
aux_itA+=1
elif it_line==1:
deleted = 0
for observation in lineS:
dataA[itA+aux_itA][13] = float(observation)
if float(observation) == 0:
array.pop(aux_itA - deleted)
deleted+=1
aux_itA+=1
else:
for observation in lineS:
dataA[itA+aux_itA][14] = float(observation)
aux_itA+=1
it_line += 1
if(it_line % 3 == 0): # Comprobar si se ha terminado de mirar esta ejecucion
recording = False
it_line = 0
itA = itA + aux_itA
if ns != 0: # Solo obtener datos de grupos con hijos
dataB.append([None]*14)
dataB[itB][0] = sdr
dataB[itB][1] = adr
dataB[itB][2] = np
dataB[itB][3] = np_par
dataB[itB][4] = ns
dataB[itB][5] = dist
dataB[itB][6] = compute_tam
dataB[itB][7] = comm_tam
dataB[itB][8] = cst
dataB[itB][9] = css
dataB[itB][10] = time
dataB[itB][11] = iters
dataB[itB][12] = tuple(array)
dataB[itB][13] = numpy.sum(array)
itB+=1
array = []
if lineS[0] == "Config:":
compute_tam = int(lineS[1].split('=')[1].split(',')[0])
comm_tam = int(lineS[2].split('=')[1].split(',')[0])
sdr = int(lineS[3].split('=')[1].split(',')[0])
adr = int(lineS[4].split('=')[1].split(',')[0])
css = int(lineS[6].split('=')[1].split(',')[0])
cst = int(lineS[7].split('=')[1].split(',')[0])
time = float(lineS[8].split('=')[1])
elif lineS[0] == "Config":
recording = True
iters = int(lineS[2].split('=')[1].split(',')[0])
dist = int(lineS[4].split('=')[1].split(',')[0])
np = int(lineS[5].split('=')[1].split(',')[0])
np_par = int(lineS[6].split('=')[1].split(',')[0])
ns = int(float(lineS[7].split('=')[1]))
return itA,itB
#-----------------------------------------------
#Config: matrix=1000, sdr=1000000000, adr=0, aib=0 time=2.000000
#Config Group: iters=100, factor=1.000000, phy=2, procs=2, parents=0, sons=4
#Ttype: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
if len(sys.argv) < 2:
print("The files name is missing\nUsage: python3 iterTimes.py resultsName directory csvOutName")
exit(1)
if len(sys.argv) >= 3:
BaseDir = sys.argv[2]
print("Searching in directory: "+ BaseDir)
else: #FIXME
BaseDir = sys.argv[2]
if len(sys.argv) >= 4:
print("Csv name will be: " + sys.argv[3] + ".csv and "+ sys.argv[3] + "_Total.csv")
name = sys.argv[3]
else:
name = "data"
insideDir = "Run"
lista = glob.glob("./" + BaseDir + insideDir + "*/" + sys.argv[1]+ "*ID*.o*")
print("Number of files found: "+ str(len(lista)));
itA = itB = 0
dataA = []
dataB = [] #0 #1 #2 #3 #4 #5 #6 #7 #8 #9 #10 #11 #12 #13 #14
columnsA = ["N", "%Async", "NP", "N_par", "NS", "Dist", "Compute_tam", "Comm_tam", "Cst", "Css","Time", "Iters", "Ti", "Tt", "To"] #15
columnsB = ["N", "%Async", "NP", "N_par", "NS", "Dist", "Compute_tam", "Comm_tam", "Cst", "Css","Time", "Iters", "Ti", "Sum"] #14
for elem in lista:
f = open(elem, "r")
itA,itB = read_file(f, dataA, dataB, itA, itB)
f.close()
#print(data)
dfA = pd.DataFrame(dataA, columns=columnsA)
dfB = pd.DataFrame(dataB, columns=columnsB)
dfA['N'] += dfA['%Async']
dfA['%Async'] = (dfA['%Async'] / dfA['N']) * 100
dfA.to_csv(name + '.csv')
dfB['N'] += dfB['%Async']
dfB['%Async'] = (dfB['%Async'] / dfB['N']) * 100
dfB.to_csv(name + '_Total.csv')
import sys
import glob
import numpy as numpy
import pandas as pd
if len(sys.argv) < 3:
print("The files name is missing\nUsage: python3 joinDf.py resultsName1.csv resultsName2.csv csvOutName")
exit(1)
if len(sys.argv) >= 4:
print("Csv name will be: " + sys.argv[3] + ".csv")
name = sys.argv[3]
else:
name = "dataJOINED"
df1 = pd.read_csv( sys.argv[1] )
df2 = pd.read_csv( sys.argv[2] )
frames = [df1, df2]
df3 = pd.concat(frames)
df3 = df3.drop(columns=df3.columns[0])
df3.to_csv(name + '.csv')
...@@ -33,12 +33,18 @@ static int handler(void* user, const char* section, const char* name, ...@@ -33,12 +33,18 @@ static int handler(void* user, const char* section, const char* name,
} else if (MATCH("general", "matrix_tam")) { } else if (MATCH("general", "matrix_tam")) {
pconfig->matrix_tam = atoi(value); pconfig->matrix_tam = atoi(value);
} else if (MATCH("general", "comm_tam")) {
pconfig->comm_tam = atoi(value);
} else if (MATCH("general", "SDR")) { } else if (MATCH("general", "SDR")) {
pconfig->sdr = atoi(value); pconfig->sdr = atoi(value);
} else if (MATCH("general", "ADR")) { } else if (MATCH("general", "ADR")) {
pconfig->adr = atoi(value); pconfig->adr = atoi(value);
} else if (MATCH("general", "AIB")) { } else if (MATCH("general", "AIB")) { //TODO Refactor cambiar nombre
pconfig->aib = atoi(value); pconfig->aib = atoi(value);
} else if (MATCH("general", "CST")) {
pconfig->cst = atoi(value);
} else if (MATCH("general", "CSS")) {
pconfig->css = atoi(value);
} else if (MATCH("general", "time")) { } else if (MATCH("general", "time")) {
pconfig->general_time = atof(value); pconfig->general_time = atof(value);
...@@ -134,8 +140,8 @@ void free_config(configuration *user_config) { ...@@ -134,8 +140,8 @@ void free_config(configuration *user_config) {
void print_config(configuration *user_config, int grp) { void print_config(configuration *user_config, int grp) {
if(user_config != NULL) { if(user_config != NULL) {
int i; int i;
printf("Config loaded: resizes=%d, matrix=%d, sdr=%d, adr=%d, aib=%d, time=%f || grp=%d\n", printf("Config loaded: resizes=%d, matrix=%d, comm_tam=%d, sdr=%d, adr=%d, aib=%d, css=%d, cst=%d, time=%f || grp=%d\n",
user_config->resizes, user_config->matrix_tam, user_config->sdr, user_config->adr, user_config->aib, user_config->general_time, grp); user_config->resizes, user_config->matrix_tam, user_config->comm_tam, user_config->sdr, user_config->adr, user_config->aib, user_config->css, user_config->cst, user_config->general_time, grp);
for(i=0; i<user_config->resizes; i++) { for(i=0; i<user_config->resizes; i++) {
printf("Resize %d: Iters=%d, Procs=%d, Factors=%f, Phy=%d\n", printf("Resize %d: Iters=%d, Procs=%d, Factors=%f, Phy=%d\n",
i, user_config->iters[i], user_config->procs[i], user_config->factors[i], user_config->phy_dist[i]); i, user_config->iters[i], user_config->procs[i], user_config->factors[i], user_config->phy_dist[i]);
...@@ -159,8 +165,8 @@ void print_config_group(configuration *user_config, int grp) { ...@@ -159,8 +165,8 @@ void print_config_group(configuration *user_config, int grp) {
sons = user_config->procs[grp+1]; sons = user_config->procs[grp+1];
} }
printf("Config: matrix=%d, sdr=%d, adr=%d, aib=%d time=%f\n", printf("Config: matrix=%d, comm_tam=%d, sdr=%d, adr=%d, aib=%d, css=%d, cst=%d, time=%f\n",
user_config->matrix_tam, user_config->sdr, user_config->adr, user_config->aib, user_config->general_time); user_config->matrix_tam, user_config->comm_tam, user_config->sdr, user_config->adr, user_config->aib, user_config->css, user_config->cst, user_config->general_time);
printf("Config Group: iters=%d, factor=%f, phy=%d, procs=%d, parents=%d, sons=%d\n", printf("Config Group: iters=%d, factor=%f, phy=%d, procs=%d, parents=%d, sons=%d\n",
user_config->iters[grp], user_config->factors[grp], user_config->phy_dist[grp], user_config->procs[grp], parents, sons); user_config->iters[grp], user_config->factors[grp], user_config->phy_dist[grp], user_config->procs[grp], parents, sons);
} }
...@@ -245,15 +251,15 @@ configuration *recv_config_file(int root, MPI_Comm intercomm) { ...@@ -245,15 +251,15 @@ configuration *recv_config_file(int root, MPI_Comm intercomm) {
* de la estructura de configuracion con una sola comunicacion. * de la estructura de configuracion con una sola comunicacion.
*/ */
void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type) { void def_struct_config_file(configuration *config_file, MPI_Datatype *config_type) {
int i, counts = 8; int i, counts = 11;
int blocklengths[8] = {1, 1, 1, 1, 1, 1, 1, 1}; int blocklengths[11] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir; MPI_Aint displs[counts], dir;
MPI_Datatype types[counts]; MPI_Datatype types[counts];
// Rellenar vector types // Rellenar vector types
types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = MPI_INT; types[0] = types[1] = types[2] = types[3] = types[4] = types[5] = types[6] = types[7] = types[8] = MPI_INT;
types[6] = MPI_FLOAT; types[9] = MPI_FLOAT;
types[7] = MPI_DOUBLE; types[10] = MPI_DOUBLE;
// Rellenar vector displs // Rellenar vector displs
MPI_Get_address(config_file, &dir); MPI_Get_address(config_file, &dir);
...@@ -261,11 +267,14 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ ...@@ -261,11 +267,14 @@ void def_struct_config_file(configuration *config_file, MPI_Datatype *config_typ
MPI_Get_address(&(config_file->resizes), &displs[0]); MPI_Get_address(&(config_file->resizes), &displs[0]);
MPI_Get_address(&(config_file->actual_resize), &displs[1]); MPI_Get_address(&(config_file->actual_resize), &displs[1]);
MPI_Get_address(&(config_file->matrix_tam), &displs[2]); MPI_Get_address(&(config_file->matrix_tam), &displs[2]);
MPI_Get_address(&(config_file->sdr), &displs[3]); MPI_Get_address(&(config_file->comm_tam), &displs[3]);
MPI_Get_address(&(config_file->adr), &displs[4]); MPI_Get_address(&(config_file->sdr), &displs[4]);
MPI_Get_address(&(config_file->aib), &displs[5]); MPI_Get_address(&(config_file->adr), &displs[5]);
MPI_Get_address(&(config_file->general_time), &displs[6]); MPI_Get_address(&(config_file->aib), &displs[6]);
MPI_Get_address(&(config_file->Top), &displs[7]); MPI_Get_address(&(config_file->css), &displs[7]);
MPI_Get_address(&(config_file->cst), &displs[8]);
MPI_Get_address(&(config_file->general_time), &displs[9]);
MPI_Get_address(&(config_file->Top), &displs[10]);
for(i=0;i<counts;i++) displs[i] -= dir; for(i=0;i<counts;i++) displs[i] -= dir;
......
...@@ -7,7 +7,8 @@ typedef struct ...@@ -7,7 +7,8 @@ typedef struct
{ {
int resizes; int resizes;
int actual_resize; int actual_resize;
int matrix_tam, sdr, adr; int matrix_tam, comm_tam, sdr, adr;
int css, cst;
int aib; int aib;
float general_time; float general_time;
double Top; double Top;
......
...@@ -11,7 +11,7 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_ ...@@ -11,7 +11,7 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
//======================================================|| //======================================================||
//======================================================|| //======================================================||
//TODO Generalizar ambas funciones en una sola
/* /*
* Envia una estructura de resultados al grupo de procesos al que se * Envia una estructura de resultados al grupo de procesos al que se
* enlaza este grupo a traves del intercomunicador pasado como argumento. * enlaza este grupo a traves del intercomunicador pasado como argumento.
...@@ -57,17 +57,17 @@ void recv_results(results_data *results, int root, int resizes, MPI_Comm interco ...@@ -57,17 +57,17 @@ void recv_results(results_data *results, int root, int resizes, MPI_Comm interco
* Define un tipo derivado de MPI para mandar los tiempos * Define un tipo derivado de MPI para mandar los tiempos
* con una sola comunicacion. * con una sola comunicacion.
* *
* En concreto son tres escales y un vector de tamaño "resizes" * En concreto son tres escalares y dos vectores de tamaño "resizes"
*/ */
void def_results_type(results_data *results, int resizes, MPI_Datatype *results_type) { void def_results_type(results_data *results, int resizes, MPI_Datatype *results_type) {
int i, counts = 4; int i, counts = 5;
int blocklengths[4] = {1, 1, 1, 1}; int blocklengths[] = {1, 1, 1, 1, 1};
MPI_Aint displs[counts], dir; MPI_Aint displs[counts], dir;
MPI_Datatype types[counts]; MPI_Datatype types[counts];
// Rellenar vector types // Rellenar vector types
types[0] = types[1] = types[2] = types[3] = MPI_DOUBLE; types[0] = types[1] = types[2] = types[3] = types[4] = MPI_DOUBLE;
blocklengths[3] = resizes; blocklengths[3] = blocklengths[4] = resizes;
// Rellenar vector displs // Rellenar vector displs
MPI_Get_address(results, &dir); MPI_Get_address(results, &dir);
...@@ -75,13 +75,65 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_ ...@@ -75,13 +75,65 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
MPI_Get_address(&(results->sync_start), &displs[0]); MPI_Get_address(&(results->sync_start), &displs[0]);
MPI_Get_address(&(results->async_start), &displs[1]); MPI_Get_address(&(results->async_start), &displs[1]);
MPI_Get_address(&(results->exec_start), &displs[2]); MPI_Get_address(&(results->exec_start), &displs[2]);
MPI_Get_address(&(results->spawn_time[0]), &displs[3]); //TODO Revisar si se puede simplificar MPI_Get_address(&(results->spawn_real_time[0]), &displs[3]);
MPI_Get_address(&(results->spawn_time[0]), &displs[4]); //TODO Revisar si se puede simplificar //FIXME Si hay mas de un spawn error?
for(i=0;i<counts;i++) displs[i] -= dir; for(i=0;i<counts;i++) displs[i] -= dir;
MPI_Type_create_struct(counts, blocklengths, displs, types, results_type); MPI_Type_create_struct(counts, blocklengths, displs, types, results_type);
MPI_Type_commit(results_type); MPI_Type_commit(results_type);
} }
//======================================================||
//======================================================||
//================SET RESULTS FUNCTIONS=================||
//======================================================||
//======================================================||
/*
* Guarda los resultados respecto a la redistribución de datos
* tras una reconfiguración. A llamar por los hijos tras
* terminar la redistribución y obtener la configuración.
*/
void set_results_post_reconfig(results_data *results, int grp, int sdr, int adr) {
if(sdr) { // Si no hay datos sincronos, el tiempo es 0
results->sync_time[grp] = results->sync_end - results->sync_start;
} else {
results->sync_time[grp] = 0;
}
if(adr) { // Si no hay datos asincronos, el tiempo es 0
results->async_time[grp] = results->async_end - results->async_start;
} else {
results->async_time[grp] = 0;
}
}
/*
* Pone el indice del siguiente elemento a escribir a 0 para los vectores
* que tengan que ver con las iteraciones.
* Por tanto, todos los anteriores valores de esos vectores pasan a ser invalidos
* si se intentan acceder desde un código externo.
*
* Solo es necesario llamar a esta funcion cuando se ha realizado una
* expansion con el metodo MERGE
*/
void reset_results_index(results_data *results) {
results->iter_index = 0;
}
/*
* Obtiene para cada iteracion, el tiempo maximo entre todos los procesos
* que han participado.
*
* Es necesario obtener el maximo, pues es el que representa el tiempo real
* que se ha utilizado.
*/
void compute_results_iter(results_data *results, int myId, int root, MPI_Comm comm) {
if(myId == root)
MPI_Reduce(MPI_IN_PLACE, results->iters_time, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
else
MPI_Reduce(results->iters_time, NULL, results->iter_index, MPI_DOUBLE, MPI_MAX, root, comm);
}
//======================================================|| //======================================================||
//======================================================|| //======================================================||
...@@ -95,22 +147,22 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_ ...@@ -95,22 +147,22 @@ void def_results_type(results_data *results, int resizes, MPI_Datatype *results_
* por iteracion, el tipo (Normal o durante communicacion asincrona) * por iteracion, el tipo (Normal o durante communicacion asincrona)
* y cuantas operaciones internas se han realizado en cada iteracion. * y cuantas operaciones internas se han realizado en cada iteracion.
*/ */
void print_iter_results(results_data *results, int last_normal_iter_index) { void print_iter_results(results_data results, int last_normal_iter_index) {
int i, aux; int i, aux;
printf("Titer: "); printf("Titer: ");
for(i=0; i< results->iter_index; i++) { for(i=0; i< results.iter_index; i++) {
printf("%lf ", results->iters_time[i]); printf("%lf ", results.iters_time[i]);
} }
printf("\nTtype: "); //FIXME modificar a imprimir solo la cantidad de asincronas printf("\nTtype: "); //FIXME modificar a imprimir solo la cantidad de asincronas
for(i=0; i< results->iter_index; i++) { for(i=0; i< results.iter_index; i++) {
printf("%d ", results->iters_type[i] == 0); printf("%d ", results.iters_type[i] == 0);
} }
printf("\nTop: "); //FIXME modificar a imprimir solo cuantas operaciones cuestan una iteracion printf("\nTop: "); //TODO modificar a imprimir solo cuantas operaciones cuestan una iteracion?
for(i=0; i< results->iter_index; i++) { for(i=0; i< results.iter_index; i++) {
aux = results->iters_type[i] == 0 ? results->iters_type[last_normal_iter_index] : results->iters_type[i]; aux = results.iters_type[i] == 0 ? results.iters_type[last_normal_iter_index] : results.iters_type[i];
printf("%d ", aux); printf("%d ", aux);
} }
printf("\n"); printf("\n");
...@@ -121,25 +173,30 @@ void print_iter_results(results_data *results, int last_normal_iter_index) { ...@@ -121,25 +173,30 @@ void print_iter_results(results_data *results, int last_normal_iter_index) {
* Estos son el tiempo de creacion de procesos, los de comunicacion * Estos son el tiempo de creacion de procesos, los de comunicacion
* asincrona y sincrona y el tiempo total de ejecucion. * asincrona y sincrona y el tiempo total de ejecucion.
*/ */
void print_global_results(results_data *results, int resizes) { void print_global_results(results_data results, int resizes) {
int i; int i;
printf("Tspawn: "); printf("Tspawn: "); // FIXME REFACTOR Cambiar nombre a T_resize_real
for(i=0; i< resizes - 1; i++) {
printf("%lf ", results.spawn_time[i]);
}
printf("\nTspawn_real: "); // FIXME REFACTOR Cambiar nombre a T_resize
for(i=0; i< resizes - 1; i++) { for(i=0; i< resizes - 1; i++) {
printf("%lf ", results->spawn_time[i]); printf("%lf ", results.spawn_real_time[i]);
} }
printf("\nTsync: "); printf("\nTsync: ");
for(i=1; i < resizes; i++) { for(i=1; i < resizes; i++) {
printf("%lf ", results->sync_time[i]); printf("%lf ", results.sync_time[i]);
} }
printf("\nTasync: "); printf("\nTasync: ");
for(i=1; i < resizes; i++) { for(i=1; i < resizes; i++) {
printf("%lf ", results->async_time[i]); printf("%lf ", results.async_time[i]);
} }
printf("\nTex: %lf\n", results->exec_time); printf("\nTex: %lf\n", results.exec_time);
} }
//======================================================|| //======================================================||
...@@ -154,27 +211,50 @@ void print_global_results(results_data *results, int resizes) { ...@@ -154,27 +211,50 @@ void print_global_results(results_data *results, int resizes) {
* Los argumentos "resizes" y "iters_size" se necesitan para obtener el tamaño * Los argumentos "resizes" y "iters_size" se necesitan para obtener el tamaño
* de los vectores de resultados. * de los vectores de resultados.
*/ */
void init_results_data(results_data **results, int resizes, int iters_size) { void init_results_data(results_data *results, int resizes, int iters_size) {
*results = malloc(1 * sizeof(results_data)); //*results = malloc(1 * sizeof(results_data)); FIXME Borrar
(*results)->spawn_time = calloc(resizes, sizeof(double)); results->spawn_time = calloc(resizes, sizeof(double));
(*results)->sync_time = calloc(resizes, sizeof(double)); results->spawn_real_time = calloc(resizes, sizeof(double));
(*results)->async_time = calloc(resizes, sizeof(double)); results->sync_time = calloc(resizes, sizeof(double));
results->async_time = calloc(resizes, sizeof(double));
(*results)->iters_time = calloc(iters_size * 20, sizeof(double)); //FIXME Numero magico - Añadir funcion que amplie tamaño results->iters_size = iters_size + 100;
(*results)->iters_type = calloc(iters_size * 20, sizeof(int)); results->iters_time = calloc(iters_size + 100, sizeof(double)); //FIXME Numero magico
(*results)->iter_index = 0; results->iters_type = calloc(iters_size + 100, sizeof(int));
results->iter_index = 0;
}
void realloc_results_iters(results_data *results, int needed) {
double *time_aux;
int *type_aux;
time_aux = (double *) realloc(results->iters_time, needed * sizeof(double));
type_aux = (int *) realloc(results->iters_type, needed * sizeof(int));
if(time_aux == NULL || type_aux == NULL) {
fprintf(stderr, "Fatal error - No se ha podido realojar la memoria de resultados\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
results->iters_time = time_aux;
results->iters_type = type_aux;
} }
/* /*
* Libera toda la memoria asociada con una estructura de resultados. * Libera toda la memoria asociada con una estructura de resultados.
* TODO Asegurar que ha sido inicializado?
*/ */
void free_results_data(results_data **results) { void free_results_data(results_data *results) {
free((*results)->spawn_time); if(results != NULL) {
free((*results)->sync_time); free(results->spawn_time);
free((*results)->async_time); free(results->spawn_real_time);
free(results->sync_time);
free((*results)->iters_time); free(results->async_time);
free((*results)->iters_type);
free(*results); free(results->iters_time);
free(results->iters_type);
}
//free(*results); FIXME Borrar
} }
...@@ -2,23 +2,31 @@ ...@@ -2,23 +2,31 @@
#include <stdlib.h> #include <stdlib.h>
#include <mpi.h> #include <mpi.h>
#define RESULTS_INIT_DATA_QTY 100
typedef struct { typedef struct {
// Iters data // Iters data
double *iters_time; double *iters_time;
int *iters_type, iter_index; int *iters_type, iter_index, iters_size;
// Spawn, Sync and Async time // Spawn, Thread, Sync, Async and Exec time
double spawn_start, *spawn_time; double spawn_start, *spawn_time, *spawn_real_time;
double sync_start, *sync_time; double sync_start, sync_end, *sync_time;
double async_start, *async_time; double async_start, async_end, *async_time;
double exec_start, exec_time; double exec_start, exec_time;
//Overcharge time is time spent in malleability that is from IO modules
} results_data; } results_data;
void send_results(results_data *results, int root, int resizes, MPI_Comm intercomm); void send_results(results_data *results, int root, int resizes, MPI_Comm intercomm);
void recv_results(results_data *results, int root, int resizes, MPI_Comm intercomm); void recv_results(results_data *results, int root, int resizes, MPI_Comm intercomm);
void print_iter_results(results_data *results, int last_normal_iter_index); void set_results_post_reconfig(results_data *results, int grp, int sdr, int adr);
void print_global_results(results_data *results, int resizes); void reset_results_index(results_data *results);
void init_results_data(results_data **results, int resizes, int iters_size);
void free_results_data(results_data **results); void compute_results_iter(results_data *results, int myId, int root, MPI_Comm comm);
void print_iter_results(results_data results, int last_normal_iter_index);
void print_global_results(results_data results, int resizes);
void init_results_data(results_data *results, int resizes, int iters_size);
void realloc_results_iters(results_data *results, int needed);
void free_results_data(results_data *results);
This diff is collapsed.
...@@ -102,15 +102,11 @@ void node_dist(slurm_job_info_t job_record, int type, int total_procs, int **qty ...@@ -102,15 +102,11 @@ void node_dist(slurm_job_info_t job_record, int type, int total_procs, int **qty
procs[i] += total_procs - asigCores; procs[i] += total_procs - asigCores;
(*used_nodes)++; (*used_nodes)++;
} }
if(*used_nodes > job_record.num_nodes) *used_nodes = job_record.num_nodes; if(*used_nodes > job_record.num_nodes) *used_nodes = job_record.num_nodes; //FIXME Si ocurre esto no es un error?
} }
*used_nodes=job_record.num_nodes; *used_nodes=job_record.num_nodes;
for(i=0; i<*used_nodes; i++) { // Antes se ponia aqui todos los nodos sin cpus a 1
if(procs[i] == 0){
procs[i]++;
}
}
*qty = procs; *qty = procs;
} }
...@@ -153,7 +149,8 @@ void fill_hostfile(slurm_job_info_t job_record, int ptr, int *qty, int used_node ...@@ -153,7 +149,8 @@ void fill_hostfile(slurm_job_info_t job_record, int ptr, int *qty, int used_node
hostlist = slurm_hostlist_create(job_record.nodes); hostlist = slurm_hostlist_create(job_record.nodes);
while ( (host = slurm_hostlist_shift(hostlist)) && i < used_nodes) { while ( (host = slurm_hostlist_shift(hostlist)) && i < used_nodes) {
write_hostfile_node(ptr, qty[i], host); if(qty[i] != 0)
write_hostfile_node(ptr, qty[i], host);
i++; i++;
free(host); free(host);
} }
......
module load mpich-3.4.1-noucx module load mpich-3.4.1-noucx
mpicc -Wall Main/Main.c Main/computing_func.c IOcodes/results.c IOcodes/read_ini.c IOcodes/ini.c malleability/ProcessDist.c malleability/CommDist.c -pthread -lslurm -lm #mpicc -Wall Main/Main.c Main/computing_func.c IOcodes/results.c IOcodes/read_ini.c IOcodes/ini.c malleability/ProcessDist.c malleability/CommDist.c -pthread -lslurm -lm
mpicc -Wall Main/Main.c Main/computing_func.c IOcodes/results.c IOcodes/read_ini.c IOcodes/ini.c malleability/malleabilityManager.c malleability/malleabilityTypes.c malleability/malleabilityZombies.c malleability/ProcessDist.c malleability/CommDist.c -pthread -lslurm -lm
if [ $# -gt 0 ] if [ $# -gt 0 ]
then then
if [ $1 = "-e" ] if [ $1 = "-e" ]
then then
cp a.out benchm.out echo "Creado ejecutable para ejecuciones"
cp a.out bench.out
fi fi
fi fi
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <mpi.h>
#include <pthread.h>
#include <math.h>
#include <string.h>
#include <slurm/slurm.h>
#include "ProcessDist.h"
/*
* ESTE CODIGO ES PARA COMPROBAR EL FUNCIONAMIENTO DEL FICHERO ProcessDist.h
* NO TIENE QUE VER CON EL BENCHMARK DE MALEABILIDAD
*/
#define ROOT 0
#define MAXGRP 3
#define TYPE_D 1
// 1 Es nodos
// 2 Es por nucleos
// Función para crear un fichero con el formato GxNPyIDz.o{jobId}.
// El proceso que llama a la función pasa a tener como salida estandar
// dicho fichero.
int create_out_file(int myId, int numP, int grp, char *jobId);
int create_out_file(int myId, int numP, int grp, char *jobId) {
int ptr, err;
char *file_name;
file_name = NULL;
file_name = malloc(40 * sizeof(char));
if(file_name == NULL) return -1; // No ha sido posible alojar la memoria
err = snprintf(file_name, 40, "G%dNP%dID%d.o%s", grp, numP, myId, jobId);
if(err < 0) return -2; // No ha sido posible obtener el nombre de fichero
ptr = open(file_name, O_WRONLY | O_CREAT | O_APPEND, 0644);
if(ptr < 0) return -3; // No ha sido posible crear el fichero
err = close(1);
if(err < 0) return -4; // No es posible modificar la salida estandar
err = dup(ptr);
if(err < 0) return -4; // No es posible modificar la salida estandar
return 0;
}
// Se realizan varios tests de ancho de banda
// al mandar N datos a los procesos impares desde el
// par inmediatamente anterior. Tras esto, los impares
// vuelven a enviar los N datos al proceso par.
//
// Tras las pruebas se imprime el ancho de banda, todo
// el tiempo necesario para realizar todas las pruebas y
// finalmente el tiempo medio por prueba.
void bandwidth(int myId, double latency, int n);
void bandwidth(int myId, double latency, int n) {
int i, loop_count = 100, n_bytes;
double start_time, stop_time, elapsed_time, bw, time;
char *aux;
n_bytes = n * sizeof(char);
aux = malloc(n_bytes);
elapsed_time = 0;
for(i=0; i<loop_count; i++){
MPI_Barrier(MPI_COMM_WORLD);
start_time = MPI_Wtime();
if(myId %2 == 0){
MPI_Ssend(aux, n, MPI_CHAR, myId+1, 99, MPI_COMM_WORLD);
MPI_Recv(aux, n, MPI_CHAR, myId+1, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else if(myId %2 == 1){
MPI_Recv(aux, n, MPI_CHAR, myId-1, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Ssend(aux, n, MPI_CHAR, myId-1, 99, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
stop_time = MPI_Wtime();
elapsed_time += stop_time - start_time;
}
if(myId %2 == 0) {
time = elapsed_time / loop_count - latency;
bw = ((double)n_bytes * 2) / time;
printf("MyId %d Bw=%lf GB/s\nTot time=%lf\nTime=%lf\n", myId, bw/ 1000000000.0, elapsed_time, time);
}
}
// Se realizan varios tests de latencia al
// mandar un único dato de tipo CHAR a los procesos impares
// desde el par inmediatamente anterior. Tras esto, los impares
// vuelven a enviar el dato al proceso par.
//
// Tras las pruebas se imprime el tiempo necesario para realizar
// TODAS las pruebas y se devuleve el tiempo medio (latencia) de
// las pruebas
double ping_pong(int myId, int start);
double ping_pong(int myId, int start) {
int i, loop_count = 100;
double start_time, stop_time, elapsed_time;
char aux;
aux = '0';
elapsed_time = 0;
for(i=0; i<loop_count; i++){
MPI_Barrier(MPI_COMM_WORLD);
start_time = MPI_Wtime();
if(myId % 2 == 0){
MPI_Ssend(&aux, 1, MPI_CHAR, myId+1, 99, MPI_COMM_WORLD);
MPI_Recv(&aux, 1, MPI_CHAR, myId+1, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else if(myId % 2 == 1){
MPI_Recv(&aux, 1, MPI_CHAR, myId-1, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Ssend(&aux, 1, MPI_CHAR, myId-1, 99, MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
stop_time = MPI_Wtime();
elapsed_time += stop_time - start_time;
}
if(myId %2 == 0 && start != 0) {
printf("MyId %d Ping=%lf\n", myId, elapsed_time);
elapsed_time/=loop_count;
}
MPI_Bcast(&elapsed_time, 1, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
return elapsed_time;
}
// Trabajo común para todos los grupos de procesos
int work(int myId, int numP, char **argv, char *job_id) {
int grp, n_value, aux=0;
double latency;
MPI_Comm comm = MPI_COMM_NULL, comm_par= MPI_COMM_NULL;
int rootBcast = MPI_PROC_NULL;
if(myId == ROOT) rootBcast = MPI_ROOT;
// 1.000.000.00 1GB
n_value = 400000000;
grp = 0;
// Obtener que grupo de procesos soy de los padres
MPI_Comm_get_parent(&comm_par);
if(comm_par != MPI_COMM_NULL) {
MPI_Bcast(&grp, 1, MPI_INT, ROOT, comm_par);
grp+=1;
MPI_Barrier(comm_par);
MPI_Bcast(&aux, 1, MPI_INT, rootBcast, comm_par);
//MPI_Comm_free(&comm_par);
MPI_Comm_disconnect(&comm_par);
}
// Dividir los resultados por procesos
//create_out_file(myId, numP, grp, job_id);
/*----- PRUEBAS PRESTACIONES -----*/
// Asegurar que se ha inicializado la comunicación de MPI
ping_pong(myId, 0);
MPI_Barrier(MPI_COMM_WORLD);
// Obtener la latencia de la red
latency = ping_pong(myId, 1);
// Obtener el ancho de banda
bandwidth(myId, latency, n_value);
/*----- CREACIÓN DE PROCESOS -----*/
// Creación de un nuevo grupo de procesos
// Para evitar que se creen más grupos hay que asignar
// el valor 0 en la variable MAXGRP
if(grp != MAXGRP) {
// Inicialización de la comunicación con SLURM
int aux = numP;
init_slurm_comm(argv, myId, aux, ROOT, TYPE_D, COMM_SPAWN_SERIAL);
// Esperar a que la comunicación y creación de procesos
// haya finalizado
int test = -1;
while(test != MPI_SUCCESS) {
test = check_slurm_comm(myId, ROOT, MPI_COMM_WORLD, &comm);
}
// Enviar a los hijos que grupo de procesos son
MPI_Bcast(&grp, 1, MPI_INT, rootBcast, comm);
MPI_Barrier(comm);
MPI_Bcast(&aux, 1, MPI_INT, ROOT, comm);
// Desconectar intercomunicador con los hijos
MPI_Comm_disconnect(&comm);
//MPI_Comm_free(&comm);
} //IF GRP
if(comm != MPI_COMM_NULL || comm_par != MPI_COMM_NULL) {
printf("GRP=%d || El comunicador no esta a NULO\n", grp);
fflush(stdout);
}
return grp;
}
int main(int argc, char ** argv) {
int rank, numP, grp, len, pid;
char *tmp;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numP);
pid = getpid();
// Imprimir datos sobre el comunicador de
// este grupo de procesos
tmp = getenv("SLURM_JOB_ID");
if(rank == ROOT) {
//system("printenv"); // Imprime todas las variables de entorno
printf("DATA\n");
//print_Info(MPI_COMM_WORLD);
}
// Imprimir nombre del nodo en el que se encuentra el proceso
char *name = malloc(MPI_MAX_PROCESSOR_NAME * sizeof(char));
MPI_Get_processor_name(name,&len);
printf("ID=%d Name %s PID=%d\n", rank, name, pid);
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
// Se manda el trabajo a los hijos
grp = work(rank, numP, argv, tmp);
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
...@@ -28,6 +28,9 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num ...@@ -28,6 +28,9 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num
void send_async_arrays(struct Dist_data dist_data, char *array, int root, int numP_child, int idI, int idE, struct Counts counts, MPI_Request *comm_req); void send_async_arrays(struct Dist_data dist_data, char *array, int root, int numP_child, int idI, int idE, struct Counts counts, MPI_Request *comm_req);
void recv_async_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts, MPI_Request *comm_req); void recv_async_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts, MPI_Request *comm_req);
void send_async_point_arrays(struct Dist_data dist_data, char *array, int rootBcast, int numP_child, int idI, int idE, struct Counts counts, MPI_Request *comm_req);
void recv_async_point_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts, MPI_Request *comm_req);
// DIST FUNCTIONS // DIST FUNCTIONS
void get_dist(int qty, int id, int numP, struct Dist_data *dist_data); void get_dist(int qty, int id, int numP, struct Dist_data *dist_data);
void set_counts(int id, int numP, struct Dist_data data_dist, int *sendcounts); void set_counts(int id, int numP, struct Dist_data data_dist, int *sendcounts);
...@@ -63,7 +66,7 @@ void malloc_comm_array(char **array, int qty, int myId, int numP) { ...@@ -63,7 +66,7 @@ void malloc_comm_array(char **array, int qty, int myId, int numP) {
//================================================================================ //================================================================================
//================================================================================ //================================================================================
//========================SINCHRONOUS FUNCTIONS=================================== //========================SYNCHRONOUS FUNCTIONS===================================
//================================================================================ //================================================================================
//================================================================================ //================================================================================
...@@ -136,7 +139,6 @@ void recv_sync(char **array, int qty, int myId, int numP, int root, MPI_Comm int ...@@ -136,7 +139,6 @@ void recv_sync(char **array, int qty, int myId, int numP, int root, MPI_Comm int
void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, int numP_child, int idI, int idE, struct Counts counts) { void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, int numP_child, int idI, int idE, struct Counts counts) {
int i; int i;
// PREPARAR ENVIO DEL VECTOR // PREPARAR ENVIO DEL VECTOR
if(idI == 0) { if(idI == 0) {
set_counts(0, numP_child, dist_data, counts.counts); set_counts(0, numP_child, dist_data, counts.counts);
...@@ -147,10 +149,8 @@ void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, in ...@@ -147,10 +149,8 @@ void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, in
counts.displs[i] = counts.displs[i-1] + counts.counts[i-1]; counts.displs[i] = counts.displs[i-1] + counts.counts[i-1];
} }
//print_counts(dist_data, counts.counts, counts.displs, numP_child, "Padres"); //print_counts(dist_data, counts.counts, counts.displs, numP_child, "Padres");
/* COMUNICACION DE DATOS */ /* COMUNICACION DE DATOS */
MPI_Alltoallv(array, counts.counts, counts.displs, MPI_CHAR, NULL, counts.zero_arr, counts.zero_arr, MPI_CHAR, dist_data.intercomm); MPI_Alltoallv(array, counts.counts, counts.displs, MPI_CHAR, NULL, counts.zero_arr, counts.zero_arr, MPI_CHAR, dist_data.intercomm);
} }
/* /*
...@@ -161,7 +161,7 @@ void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, in ...@@ -161,7 +161,7 @@ void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, in
void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts) { void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts) {
int i; int i;
char *aux = malloc(1); char aux;
// Ajustar los valores de recepcion // Ajustar los valores de recepcion
if(idI == 0) { if(idI == 0) {
...@@ -175,14 +175,13 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num ...@@ -175,14 +175,13 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num
//print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos"); //print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos");
/* COMUNICACION DE DATOS */ /* COMUNICACION DE DATOS */
MPI_Alltoallv(aux, counts.zero_arr, counts.zero_arr, MPI_CHAR, array, counts.counts, counts.displs, MPI_CHAR, dist_data.intercomm); MPI_Alltoallv(&aux, counts.zero_arr, counts.zero_arr, MPI_CHAR, array, counts.counts, counts.displs, MPI_CHAR, dist_data.intercomm);
free(aux);
} }
//================================================================================ //================================================================================
//================================================================================ //================================================================================
//========================ASINCHRONOUS FUNCTIONS================================== //========================ASYNCHRONOUS FUNCTIONS==================================
//================================================================================ //================================================================================
//================================================================================ //================================================================================
...@@ -196,7 +195,7 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num ...@@ -196,7 +195,7 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num
* El vector array no se modifica en esta funcion. * El vector array no se modifica en esta funcion.
*/ */
int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_child, MPI_Request **comm_req, int parents_wait) { int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_child, MPI_Request **comm_req, int parents_wait) {
int rootBcast = MPI_PROC_NULL; int i, rootBcast = MPI_PROC_NULL;
int *idS = NULL; int *idS = NULL;
struct Counts counts; struct Counts counts;
struct Dist_data dist_data; struct Dist_data dist_data;
...@@ -211,17 +210,25 @@ int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm inte ...@@ -211,17 +210,25 @@ int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm inte
getIds_intercomm(dist_data, numP_child, &idS); // Obtener rango de Id hijos a los que este proceso manda datos getIds_intercomm(dist_data, numP_child, &idS); // Obtener rango de Id hijos a los que este proceso manda datos
// MAL_USE_THREAD sigue el camino sincrono
if(parents_wait == MAL_USE_NORMAL) { if(parents_wait == MAL_USE_NORMAL) {
*comm_req = (MPI_Request *) malloc(sizeof(MPI_Request)); //*comm_req = (MPI_Request *) malloc(sizeof(MPI_Request));
*comm_req[0] = MPI_REQUEST_NULL; *comm_req[0] = MPI_REQUEST_NULL;
send_async_arrays(dist_data, array, rootBcast, numP_child, idS[0], idS[1], counts, &(*comm_req[0])); send_async_arrays(dist_data, array, rootBcast, numP_child, idS[0], idS[1], counts, &(*comm_req[0]));
} else { } else if (parents_wait == MAL_USE_IBARRIER){
*comm_req = (MPI_Request *) malloc(2 * sizeof(MPI_Request)); //*comm_req = (MPI_Request *) malloc(2 * sizeof(MPI_Request));
(*comm_req)[0] = MPI_REQUEST_NULL; *comm_req[0] = MPI_REQUEST_NULL;
(*comm_req)[1] = MPI_REQUEST_NULL; *comm_req[1] = MPI_REQUEST_NULL;
send_async_arrays(dist_data, array, rootBcast, numP_child, idS[0], idS[1], counts, &((*comm_req)[1])); send_async_arrays(dist_data, array, rootBcast, numP_child, idS[0], idS[1], counts, &((*comm_req)[1]));
MPI_Ibarrier(intercomm, &((*comm_req)[0]) ); MPI_Ibarrier(intercomm, &((*comm_req)[0]) );
} else if (parents_wait == MAL_USE_POINT){
//*comm_req = (MPI_Request *) malloc(numP_child * sizeof(MPI_Request));
for(i=0; i<numP_child; i++){
(*comm_req)[i] = MPI_REQUEST_NULL;
}
send_async_point_arrays(dist_data, array, rootBcast, numP_child, idS[0], idS[1], counts, *comm_req);
} else if (parents_wait == MAL_USE_THREAD) { //TODO
} }
freeCounts(&counts); freeCounts(&counts);
...@@ -242,15 +249,14 @@ int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm inte ...@@ -242,15 +249,14 @@ int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm inte
*/ */
void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_parents, int parents_wait) { void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_parents, int parents_wait) {
int *idS = NULL; int *idS = NULL;
int wait_err; int wait_err, i;
struct Counts counts; struct Counts counts;
struct Dist_data dist_data; struct Dist_data dist_data;
MPI_Request comm_req, aux; MPI_Request *comm_req, aux;
// Obtener distribución para este hijo // Obtener distribución para este hijo
get_dist(qty, myId, numP, &dist_data); get_dist(qty, myId, numP, &dist_data);
*array = malloc(dist_data.tamBl * sizeof(char)); *array = malloc(dist_data.tamBl * sizeof(char));
//(*array)[dist_data.tamBl] = '\0';
dist_data.intercomm = intercomm; dist_data.intercomm = intercomm;
/* PREPARAR DATOS DE RECEPCION SOBRE VECTOR*/ /* PREPARAR DATOS DE RECEPCION SOBRE VECTOR*/
...@@ -258,14 +264,28 @@ void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm in ...@@ -258,14 +264,28 @@ void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm in
getIds_intercomm(dist_data, numP_parents, &idS); // Obtener el rango de Ids de padres del que este proceso recibira datos getIds_intercomm(dist_data, numP_parents, &idS); // Obtener el rango de Ids de padres del que este proceso recibira datos
recv_async_arrays(dist_data, *array, root, numP_parents, idS[0], idS[1], counts, &comm_req); // MAL_USE_THREAD sigue el camino sincrono
if(parents_wait == MAL_USE_POINT) {
comm_req = (MPI_Request *) malloc(numP_parents * sizeof(MPI_Request));
for(i=0; i<numP_parents; i++){
comm_req[i] = MPI_REQUEST_NULL;
}
recv_async_point_arrays(dist_data, *array, root, numP_parents, idS[0], idS[1], counts, comm_req);
wait_err = MPI_Waitall(numP_parents, comm_req, MPI_STATUSES_IGNORE);
} else if (parents_wait == MAL_USE_NORMAL || parents_wait == MAL_USE_IBARRIER) {
comm_req = (MPI_Request *) malloc(sizeof(MPI_Request));
*comm_req = MPI_REQUEST_NULL;
recv_async_arrays(dist_data, *array, root, numP_parents, idS[0], idS[1], counts, comm_req);
wait_err = MPI_Wait(comm_req, MPI_STATUS_IGNORE);
} else if (parents_wait == MAL_USE_THREAD) { //TODO
}
wait_err = MPI_Wait(&comm_req, MPI_STATUS_IGNORE);
if(wait_err != MPI_SUCCESS) { if(wait_err != MPI_SUCCESS) {
MPI_Abort(MPI_COMM_WORLD, wait_err); MPI_Abort(MPI_COMM_WORLD, wait_err);
} }
if(parents_wait == MAL_USE_IBARRIER) { if(parents_wait == MAL_USE_IBARRIER) { //MAL USE IBARRIER END
MPI_Ibarrier(intercomm, &aux); MPI_Ibarrier(intercomm, &aux);
MPI_Wait(&aux, MPI_STATUS_IGNORE); //Es necesario comprobar que la comunicación ha terminado para desconectar los grupos de procesos MPI_Wait(&aux, MPI_STATUS_IGNORE); //Es necesario comprobar que la comunicación ha terminado para desconectar los grupos de procesos
} }
...@@ -273,12 +293,15 @@ void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm in ...@@ -273,12 +293,15 @@ void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm in
//printf("S%d Tam %d String: %s END\n", myId, dist_data.tamBl, *array); //printf("S%d Tam %d String: %s END\n", myId, dist_data.tamBl, *array);
freeCounts(&counts); freeCounts(&counts);
free(idS); free(idS);
free(comm_req);
} }
/* /*
* Envia a los hijos un vector que es redistribuido a los procesos * Envia a los hijos un vector que es redistribuido a los procesos
* hijos. Antes de realizar la comunicacion, cada proceso padre calcula sobre que procesos * hijos. Antes de realizar la comunicacion, cada proceso padre calcula sobre que procesos
* del otro grupo se transmiten elementos. * del otro grupo se transmiten elementos.
*
* El envio se realiza a partir de una comunicación colectiva.
*/ */
void send_async_arrays(struct Dist_data dist_data, char *array, int rootBcast, int numP_child, int idI, int idE, struct Counts counts, MPI_Request *comm_req) { void send_async_arrays(struct Dist_data dist_data, char *array, int rootBcast, int numP_child, int idI, int idE, struct Counts counts, MPI_Request *comm_req) {
int i; int i;
...@@ -298,10 +321,36 @@ void send_async_arrays(struct Dist_data dist_data, char *array, int rootBcast, i ...@@ -298,10 +321,36 @@ void send_async_arrays(struct Dist_data dist_data, char *array, int rootBcast, i
MPI_Ialltoallv(array, counts.counts, counts.displs, MPI_CHAR, NULL, counts.zero_arr, counts.zero_arr, MPI_CHAR, dist_data.intercomm, comm_req); MPI_Ialltoallv(array, counts.counts, counts.displs, MPI_CHAR, NULL, counts.zero_arr, counts.zero_arr, MPI_CHAR, dist_data.intercomm, comm_req);
} }
/*
* Envia a los hijos un vector que es redistribuido a los procesos
* hijos. Antes de realizar la comunicacion, cada proceso padre calcula sobre que procesos
* del otro grupo se transmiten elementos.
*
* El envio se realiza a partir de varias comunicaciones punto a punto.
*/
void send_async_point_arrays(struct Dist_data dist_data, char *array, int rootBcast, int numP_child, int idI, int idE, struct Counts counts, MPI_Request *comm_req) {
int i;
// PREPARAR ENVIO DEL VECTOR
if(idI == 0) {
set_counts(0, numP_child, dist_data, counts.counts);
idI++;
MPI_Isend(array, counts.counts[0], MPI_CHAR, 0, 99, dist_data.intercomm, &(comm_req[0]));
}
for(i=idI; i<idE; i++) {
set_counts(i, numP_child, dist_data, counts.counts);
counts.displs[i] = counts.displs[i-1] + counts.counts[i-1];
MPI_Isend(array+counts.displs[i], counts.counts[i], MPI_CHAR, i, 99, dist_data.intercomm, &(comm_req[i]));
}
//print_counts(dist_data, counts.counts, counts.displs, numP_child, "Padres");
}
/* /*
* Recibe de los padres un vector que es redistribuido a los procesos * Recibe de los padres un vector que es redistribuido a los procesos
* de este grupo. Antes de realizar la comunicacion cada hijo calcula sobre que procesos * de este grupo. Antes de realizar la comunicacion cada hijo calcula sobre que procesos
* del otro grupo se transmiten elementos. * del otro grupo se transmiten elementos.
*
* La recepcion se realiza a partir de una comunicacion colectiva.
*/ */
void recv_async_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts, MPI_Request *comm_req) { void recv_async_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts, MPI_Request *comm_req) {
int i; int i;
...@@ -323,6 +372,30 @@ void recv_async_arrays(struct Dist_data dist_data, char *array, int root, int nu ...@@ -323,6 +372,30 @@ void recv_async_arrays(struct Dist_data dist_data, char *array, int root, int nu
free(aux); free(aux);
} }
/*
* Recibe de los padres un vector que es redistribuido a los procesos
* de este grupo. Antes de realizar la comunicacion cada hijo calcula sobre que procesos
* del otro grupo se transmiten elementos.
*
* La recepcion se realiza a partir de varias comunicaciones punto a punto.
*/
void recv_async_point_arrays(struct Dist_data dist_data, char *array, int root, int numP_parents, int idI, int idE, struct Counts counts, MPI_Request *comm_req) {
int i;
// Ajustar los valores de recepcion
if(idI == 0) {
set_counts(0, numP_parents, dist_data, counts.counts);
idI++;
MPI_Irecv(array, counts.counts[0], MPI_CHAR, 0, 99, dist_data.intercomm, &(comm_req[0])); //FIXME BUffer recv
}
for(i=idI; i<idE; i++) {
set_counts(i, numP_parents, dist_data, counts.counts);
counts.displs[i] = counts.displs[i-1] + counts.counts[i-1];
MPI_Irecv(array+counts.displs[i], counts.counts[i], MPI_CHAR, i, 99, dist_data.intercomm, &(comm_req[i])); //FIXME BUffer recv
}
//print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos");
}
/* /*
* ======================================================================================== * ========================================================================================
* ======================================================================================== * ========================================================================================
...@@ -477,8 +550,8 @@ void print_counts(struct Dist_data data_dist, int *xcounts, int *xdispls, int si ...@@ -477,8 +550,8 @@ void print_counts(struct Dist_data data_dist, int *xcounts, int *xdispls, int si
int i; int i;
for(i=0; i < size; i++) { for(i=0; i < size; i++) {
if(xcounts[i] != 0) { //if(xcounts[i] != 0) {
printf("P%d of %d | %scounts[%d]=%d disp=%d\n", data_dist.myId, data_dist.numP, name, i, xcounts[i], xdispls[i]); printf("P%d of %d | %scounts[%d]=%d disp=%d\n", data_dist.myId, data_dist.numP, name, i, xcounts[i], xdispls[i]);
} //}
} }
} }
...@@ -2,13 +2,16 @@ ...@@ -2,13 +2,16 @@
#include <stdlib.h> #include <stdlib.h>
#include <mpi.h> #include <mpi.h>
#include <string.h> #include <string.h>
#include "malleabilityStates.h"
#define MAL_COMM_COMPLETED 0 //#define MAL_COMM_COMPLETED 0
#define MAL_COMM_UNINITIALIZED 2 //#define MAL_COMM_UNINITIALIZED 2
#define MAL_ASYNC_PENDING 1 //#define MAL_ASYNC_PENDING 1
#define MAL_USE_NORMAL 0 //#define MAL_USE_NORMAL 0
#define MAL_USE_IBARRIER 1 //#define MAL_USE_IBARRIER 1
//#define MAL_USE_POINT 2
//#define MAL_USE_THREAD 3
int send_sync(char *array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_child); int send_sync(char *array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_child);
void recv_sync(char **array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_parents); void recv_sync(char **array, int qty, int myId, int numP, int root, MPI_Comm intercomm, int numP_parents);
......
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment