Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Iker Martín Álvarez
Proteo
Commits
8bf3566f
Commit
8bf3566f
authored
Nov 11, 2024
by
Iker Martín Álvarez
Browse files
Merge branch 'dev' into 'DMR_Adaptation'
Update for minor fixes See merge request martini/malleability_benchmark!7
parents
6a71bbf2
16e76bb7
Changes
2
Hide whitespace changes
Inline
Side-by-side
Codes/MaM/spawn_methods/ProcessDist.c
View file @
8bf3566f
...
...
@@ -41,7 +41,7 @@ void fill_str_hosts_slurm(char *nodelist, int *qty, size_t used_nodes, char **ho
void
generate_info_hostfile_slurm
(
char
*
nodelist
,
int
*
qty
,
size_t
used_nodes
,
Spawn_data
*
spawn_data
);
void
fill_hostfile_slurm
(
char
*
file_name
,
size_t
used_nodes
,
int
*
qty
,
hostlist_t
*
hostlist
);
size_t
fill_multiple_hostfile_slurm
(
char
*
file_name
,
int
*
qty
,
hostlist_t
*
hostlist
,
char
**
line
,
size_t
*
len_line
);
void
fill_multiple_hostfile_slurm
(
char
*
file_name
,
int
*
qty
,
size_t
*
index
,
hostlist_t
*
hostlist
,
char
**
line
,
size_t
*
len_line
);
#endif
//--------------------------------SLURM USAGE-------------------------------------//
...
...
@@ -484,7 +484,7 @@ void generate_info_hostfile_slurm(char *nodelist, int *qty, size_t used_nodes, S
if
(
spawn_data
->
spawn_is_multiple
||
spawn_data
->
spawn_is_parallel
)
{
// MULTIPLE
for
(;
index
<
spawn_data
->
total_spawns
;
index
++
)
{
// This strat creates 1 hostfile per spawn
qty_index
=
fill_multiple_hostfile_slurm
(
hostfile_name
,
qty
+
qty_index
,
&
hostlist
,
&
line
,
&
len_line
);
fill_multiple_hostfile_slurm
(
hostfile_name
,
qty
,
&
qty_index
,
&
hostlist
,
&
line
,
&
len_line
);
set_mapping_host
(
qty
[
qty_index
-
1
],
"hostfile"
,
hostfile_name
,
index
,
spawn_data
);
snprintf
(
hostfile_name
+
MAM_HOSTFILE_SIZE1
,
MAM_HOSTFILE_SIZE2
,
"%03d%s"
,
index
+
1
,
MAM_HOSTFILE_NAME3
);
}
...
...
@@ -523,9 +523,9 @@ void fill_hostfile_slurm(char* file_name, size_t used_nodes, int *qty, hostlist_
free
(
line
);
}
size_t
fill_multiple_hostfile_slurm
(
char
*
file_name
,
int
*
qty
,
hostlist_t
*
hostlist
,
char
**
line
,
size_t
*
len_line
)
{
void
fill_multiple_hostfile_slurm
(
char
*
file_name
,
int
*
qty
,
size_t
*
index
,
hostlist_t
*
hostlist
,
char
**
line
,
size_t
*
len_line
)
{
char
*
host
;
size_t
i
=
0
;
size_t
i
=
*
index
;
int
file
=
open
(
file_name
,
O_WRONLY
|
O_CREAT
|
O_TRUNC
,
0644
);
if
(
file
<
0
)
{
...
...
@@ -533,6 +533,7 @@ size_t fill_multiple_hostfile_slurm(char* file_name, int *qty, hostlist_t *hostl
exit
(
EXIT_FAILURE
);
}
// El valor de I tiene que continuar donde estaba
while
(
(
host
=
slurm_hostlist_shift
(
*
hostlist
))
)
{
if
(
qty
[
i
]
!=
0
)
{
write_hostfile_node
(
file
,
qty
[
i
],
host
,
line
,
len_line
);
...
...
@@ -545,7 +546,7 @@ size_t fill_multiple_hostfile_slurm(char* file_name, int *qty, hostlist_t *hostl
if
(
host
!=
NULL
)
free
(
host
);
close
(
file
);
return
i
;
*
index
=
i
;
}
#endif
//--------------------------------SLURM USAGE-------------------------------------//
\ No newline at end of file
//--------------------------------SLURM USAGE-------------------------------------//
Codes/MaM/spawn_methods/Strategy_Parallel.c
View file @
8bf3566f
...
...
@@ -165,37 +165,45 @@ void hypercube_spawn(int group_id, int groups, int init_nodes, int init_step,
}
void
common_synch
(
Spawn_data
spawn_data
,
int
qty_comms
,
MPI_Comm
intercomm
,
MPI_Comm
*
spawn_comm
)
{
int
i
,
root
,
root_othe
r
;
int
i
,
colo
r
;
char
aux
;
MPI_Request
*
requests
=
NULL
;
MPI_Comm
involved_procs
,
aux_comm
;
requests
=
(
MPI_Request
*
)
malloc
(
qty_comms
*
sizeof
(
MPI_Request
));
root
=
root_other
=
0
;
//FIXME Magical Number
// Upside synchronization
aux_comm
=
intercomm
==
MPI_COMM_NULL
?
spawn_data
.
comm
:
mall
->
comm
;
color
=
qty_comms
?
1
:
MPI_UNDEFINED
;
MPI_Comm_split
(
aux_comm
,
color
,
mall
->
myId
,
&
involved_procs
);
// Upside synchronization starts
for
(
i
=
0
;
i
<
qty_comms
;
i
++
)
{
MPI_Irecv
(
&
aux
,
1
,
MPI_CHAR
,
root_other
,
130
,
spawn_comm
[
i
],
&
requests
[
i
]);
MPI_Irecv
(
&
aux
,
1
,
MPI_CHAR
,
MAM_ROOT
,
130
,
spawn_comm
[
i
],
&
requests
[
i
]);
}
if
(
qty_comms
)
{
MPI_Waitall
(
qty_comms
,
requests
,
MPI_STATUSES_IGNORE
);
MPI_Barrier
(
involved_procs
);
}
// Sources are the only synchronized procs at this point
if
(
intercomm
!=
MPI_COMM_NULL
&&
mall
->
myId
==
MAM_ROOT
)
{
MPI_Send
(
&
aux
,
1
,
MPI_CHAR
,
MAM_ROOT
,
130
,
intercomm
);
// Upside synchronization ends
// Downside synchronization starts
MPI_Recv
(
&
aux
,
1
,
MPI_CHAR
,
MAM_ROOT
,
130
,
intercomm
,
MPI_STATUS_IGNORE
);
}
if
(
qty_comms
)
{
MPI_Waitall
(
qty_comms
,
requests
,
MPI_STATUSES_IGNORE
);
}
if
(
intercomm
!=
MPI_COMM_NULL
)
{
MPI_Barrier
(
mall
->
comm
);
}
if
(
intercomm
!=
MPI_COMM_NULL
&&
mall
->
myId
==
root
)
{
MPI_Send
(
&
aux
,
1
,
MPI_CHAR
,
root_other
,
130
,
intercomm
);
}
// Sources synchronization
// TODO Maybe could be used an split comm to reduce overhead of Barrier when not all sources spawn
if
(
intercomm
==
MPI_COMM_NULL
)
{
MPI_Barrier
(
spawn_data
.
comm
);
}
// Downside synchronization
if
(
intercomm
!=
MPI_COMM_NULL
&&
mall
->
myId
==
root
)
{
MPI_Recv
(
&
aux
,
1
,
MPI_CHAR
,
root_other
,
130
,
intercomm
,
MPI_STATUS_IGNORE
);
}
MPI_Barrier
(
mall
->
comm
);
// FIXME This barrier should not be required
if
(
intercomm
!=
MPI_COMM_NULL
&&
qty_comms
)
{
MPI_Barrier
(
involved_procs
);
}
for
(
i
=
0
;
i
<
qty_comms
;
i
++
)
{
MPI_Isend
(
&
aux
,
1
,
MPI_CHAR
,
root_other
,
130
,
spawn_comm
[
i
],
&
requests
[
i
]);
MPI_Isend
(
&
aux
,
1
,
MPI_CHAR
,
MAM_ROOT
,
130
,
spawn_comm
[
i
],
&
requests
[
i
]);
}
if
(
qty_comms
)
{
MPI_Waitall
(
qty_comms
,
requests
,
MPI_STATUSES_IGNORE
);
}
if
(
requests
!=
NULL
)
{
free
(
requests
);
}
if
(
involved_procs
!=
MPI_COMM_NULL
)
{
MPI_Comm_disconnect
(
&
involved_procs
);
}
}
void
binary_tree_connection
(
int
groups
,
int
group_id
,
Spawn_ports
*
spawn_port
,
MPI_Comm
*
newintracomm
)
{
int
service_id
;
int
middle
,
new_groups
,
new_group_id
,
new_rank
;
...
...
@@ -245,27 +253,12 @@ void binary_tree_connection(int groups, int group_id, Spawn_ports *spawn_port, M
}
void
binary_tree_reorder
(
MPI_Comm
*
newintracomm
,
int
group_id
)
{
int
merge_size
,
*
reorder
,
*
index_reorder
;
int
expected_rank
;
MPI_Group
merge_group
,
aux_group
;
MPI_Comm
aux_comm
;
index_reorder
=
NULL
;
reorder
=
NULL
;
// FIXME Expects all groups having the same size
expected_rank
=
mall
->
numP
*
group_id
+
mall
->
myId
;
MPI_Comm_group
(
*
newintracomm
,
&
merge_group
);
MPI_Comm_size
(
*
newintracomm
,
&
merge_size
);
index_reorder
=
(
int
*
)
malloc
(
merge_size
*
sizeof
(
int
));
reorder
=
(
int
*
)
malloc
(
merge_size
*
sizeof
(
int
));
MPI_Allgather
(
&
expected_rank
,
1
,
MPI_INT
,
index_reorder
,
1
,
MPI_INT
,
*
newintracomm
);
for
(
int
i
=
0
;
i
<
merge_size
;
i
++
)
{
reorder
[
index_reorder
[
i
]]
=
i
;
}
MPI_Group_incl
(
merge_group
,
merge_size
,
reorder
,
&
aux_group
);
MPI_Comm_create
(
*
newintracomm
,
aux_group
,
&
aux_comm
);
MPI_Comm_split
(
*
newintracomm
,
0
,
expected_rank
,
&
aux_comm
);
//int merge_rank, new_rank;
//MPI_Comm_rank(*newintracomm, &merge_rank);
...
...
@@ -273,9 +266,5 @@ void binary_tree_reorder(MPI_Comm *newintracomm, int group_id) {
//printf("Grupo %d -- Merge rank = %d - New rank = %d\n", group_id, merge_rank, new_rank);
if
(
*
newintracomm
!=
MPI_COMM_WORLD
&&
*
newintracomm
!=
MPI_COMM_NULL
)
MPI_Comm_disconnect
(
newintracomm
);
MPI_Group_free
(
&
merge_group
);
MPI_Group_free
(
&
aux_group
);
*
newintracomm
=
aux_comm
;
free
(
index_reorder
);
free
(
reorder
);
}
\ No newline at end of file
}
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment