Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Iker Martín Álvarez
Proteo
Commits
eed0d373
Commit
eed0d373
authored
Sep 13, 2021
by
iker_martin
Browse files
Arreglada condicion de carrera en Comunicaciones del bucle
parent
03f566b9
Changes
3
Hide whitespace changes
Inline
Side-by-side
Codes/Main/Main.c
View file @
eed0d373
...
...
@@ -169,6 +169,7 @@ int checkpoint(int iter, int state, MPI_Request **comm_req) {
group
->
numS
=
config_file
->
procs
[
group
->
grp
+
1
];
results
->
spawn_start
=
MPI_Wtime
();
if
(
group
->
myId
==
ROOT
)
{
printf
(
"Malleability
\n
"
);}
TC
(
group
->
numS
);
results
->
spawn_time
[
group
->
grp
]
=
MPI_Wtime
()
-
results
->
spawn_start
;
...
...
@@ -180,6 +181,8 @@ int checkpoint(int iter, int state, MPI_Request **comm_req) {
}
else
{
state
=
check_redistribution
(
iter
,
comm_req
);
}
printf
(
"P%d/%d Malleability END state=%d
\n
"
,
group
->
myId
,
group
->
numP
,
state
);
}
return
state
;
...
...
@@ -255,16 +258,18 @@ int thread_creation() {
* El estado de la comunicación es devuelto al finalizar la función.
*/
int
thread_check
(
int
iter
)
{
if
(
group
->
commAsync
==
MAL_COMM_COMPLETED
)
{
if
(
pthread_join
(
async_thread
,
NULL
))
{
printf
(
"Error al esperar al hilo
\n
"
);
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
return
-
2
;
}
return
end_redistribution
(
iter
);
}
int
all_completed
=
0
;
return
MAL_ASYNC_PENDING
;
// Comprueba que todos los hilos han terminado la distribucion (Mismo valor en commAsync)
MPI_Allreduce
(
&
group
->
commAsync
,
&
all_completed
,
1
,
MPI_INT
,
MPI_MAX
,
MPI_COMM_WORLD
);
if
(
all_completed
!=
MAL_COMM_COMPLETED
)
return
MAL_ASYNC_PENDING
;
// Continue only if asynchronous send has ended
if
(
pthread_join
(
async_thread
,
NULL
))
{
printf
(
"Error al esperar al hilo
\n
"
);
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
return
-
2
;
}
return
end_redistribution
(
iter
);
}
/*
...
...
@@ -430,6 +435,7 @@ void iterate(double *matrix, int n, int async_comm) {
}
if
(
config_file
->
comm_tam
)
{
printf
(
"P%d/%d Bcast
\n
"
,
group
->
myId
,
group
->
numP
);
MPI_Bcast
(
group
->
compute_comm_array
,
config_file
->
comm_tam
,
MPI_CHAR
,
ROOT
,
MPI_COMM_WORLD
);
}
...
...
@@ -566,8 +572,8 @@ void obtain_op_times() {
for
(
i
=
0
;
i
<
qty
;
i
++
)
{
result
+=
computePiSerial
(
config_file
->
matrix_tam
);
}
printf
(
"Creado Top con valor %lf
\n
"
,
result
);
fflush
(
stdout
);
//
printf("Creado Top con valor %lf\n", result);
//
fflush(stdout);
config_file
->
Top
=
(
MPI_Wtime
()
-
start_time
)
/
qty
;
//Tiempo de una operacion
MPI_Bcast
(
&
(
config_file
->
Top
),
1
,
MPI_DOUBLE
,
ROOT
,
MPI_COMM_WORLD
);
...
...
Codes/compila.sh
View file @
eed0d373
...
...
@@ -6,6 +6,7 @@ if [ $# -gt 0 ]
then
if
[
$1
=
"-e"
]
then
echo
"Creado ejecutable para ejecuciones"
cp
a.out bench.out
fi
fi
Codes/malleability/CommDist.c
View file @
eed0d373
...
...
@@ -150,14 +150,7 @@ void send_sync_arrays(struct Dist_data dist_data, char *array, int rootBcast, in
}
//print_counts(dist_data, counts.counts, counts.displs, numP_child, "Padres");
/* COMUNICACION DE DATOS */
//int myId;
//MPI_Comm_rank(MPI_COMM_WORLD, &myId);
//if(myId == 0) { printf("TEST PREALL SEND\n"); fflush(stdout); }
//MPI_Barrier(dist_data.intercomm);
MPI_Alltoallv
(
array
,
counts
.
counts
,
counts
.
displs
,
MPI_CHAR
,
NULL
,
counts
.
zero_arr
,
counts
.
zero_arr
,
MPI_CHAR
,
dist_data
.
intercomm
);
//MPI_Barrier(dist_data.intercomm);
//if(myId == 0) { printf("TEST POSTALL SEND\n"); fflush(stdout); }
}
/*
...
...
@@ -182,13 +175,7 @@ void recv_sync_arrays(struct Dist_data dist_data, char *array, int root, int num
//print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos");
/* COMUNICACION DE DATOS */
//int myId;
//MPI_Comm_rank(MPI_COMM_WORLD, &myId);
//if(myId == 0) { printf("TEST PREALL RECV\n"); fflush(stdout); }
//MPI_Barrier(dist_data.intercomm);
MPI_Alltoallv
(
&
aux
,
counts
.
zero_arr
,
counts
.
zero_arr
,
MPI_CHAR
,
array
,
counts
.
counts
,
counts
.
displs
,
MPI_CHAR
,
dist_data
.
intercomm
);
//MPI_Barrier(dist_data.intercomm);
//if(myId == 0) { printf("TEST POSTALL RECV\n"); fflush(stdout); }
}
...
...
@@ -223,6 +210,7 @@ int send_async(char *array, int qty, int myId, int numP, int root, MPI_Comm inte
getIds_intercomm
(
dist_data
,
numP_child
,
&
idS
);
// Obtener rango de Id hijos a los que este proceso manda datos
// MAL_USE_THREAD sigue el camino sincrono
if
(
parents_wait
==
MAL_USE_NORMAL
)
{
*
comm_req
=
(
MPI_Request
*
)
malloc
(
sizeof
(
MPI_Request
));
*
comm_req
[
0
]
=
MPI_REQUEST_NULL
;
...
...
@@ -275,6 +263,7 @@ void recv_async(char **array, int qty, int myId, int numP, int root, MPI_Comm in
getIds_intercomm
(
dist_data
,
numP_parents
,
&
idS
);
// Obtener el rango de Ids de padres del que este proceso recibira datos
// MAL_USE_THREAD sigue el camino sincrono
if
(
parents_wait
==
MAL_USE_POINT
)
{
comm_req
=
(
MPI_Request
*
)
malloc
(
numP_parents
*
sizeof
(
MPI_Request
));
for
(
i
=
0
;
i
<
numP_parents
;
i
++
){
...
...
@@ -394,12 +383,12 @@ void recv_async_point_arrays(struct Dist_data dist_data, char *array, int root,
if
(
idI
==
0
)
{
set_counts
(
0
,
numP_parents
,
dist_data
,
counts
.
counts
);
idI
++
;
MPI_I
send
(
array
,
counts
.
counts
[
0
],
MPI_CHAR
,
0
,
99
,
dist_data
.
intercomm
,
&
(
comm_req
[
0
]));
MPI_I
recv
(
array
,
counts
.
counts
[
0
],
MPI_CHAR
,
0
,
99
,
dist_data
.
intercomm
,
&
(
comm_req
[
0
]));
}
for
(
i
=
idI
;
i
<
idE
;
i
++
)
{
set_counts
(
i
,
numP_parents
,
dist_data
,
counts
.
counts
);
counts
.
displs
[
i
]
=
counts
.
displs
[
i
-
1
]
+
counts
.
counts
[
i
-
1
];
MPI_I
send
(
array
+
counts
.
displs
[
i
],
counts
.
counts
[
0
],
MPI_CHAR
,
i
,
99
,
dist_data
.
intercomm
,
&
(
comm_req
[
0
]));
MPI_I
recv
(
array
+
counts
.
displs
[
i
],
counts
.
counts
[
0
],
MPI_CHAR
,
i
,
99
,
dist_data
.
intercomm
,
&
(
comm_req
[
0
]));
}
//print_counts(dist_data, counts.counts, counts.displs, numP_parents, "Hijos");
}
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment