Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
maestro-core
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container registry
Model registry
Analyze
Contributor analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
maestro
maestro-core
Commits
ad07528c
Commit
ad07528c
authored
2 years ago
by
Ali Mohammed
Browse files
Options
Downloads
Patches
Plain Diff
print omp and example thread pinning
parent
0335f5b9
No related branches found
No related tags found
No related merge requests found
Pipeline
#123295
canceled
2 years ago
Stage: build-and-test
Changes
3
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
examples/omp_consumer.c
+7
-4
7 additions, 4 deletions
examples/omp_consumer.c
examples/omp_injector.c
+8
-0
8 additions, 0 deletions
examples/omp_injector.c
examples/run_core_bench.sh
+9
-7
9 additions, 7 deletions
examples/run_core_bench.sh
with
24 additions
and
11 deletions
examples/omp_consumer.c
+
7
−
4
View file @
ad07528c
...
@@ -139,12 +139,16 @@ mstro_status require_CDOs(mstro_cdo *cdos, size_t num_CDOs, int *injector_ids, i
...
@@ -139,12 +139,16 @@ mstro_status require_CDOs(mstro_cdo *cdos, size_t num_CDOs, int *injector_ids, i
int
CDOs_per_inj
=
num_CDOs
/
num_injectors
;
int
CDOs_per_inj
=
num_CDOs
/
num_injectors
;
size_t
cdoidx
,
cdo_gid
,
i
,
j
;
size_t
cdoidx
,
cdo_gid
,
i
,
j
;
int
num
=
atoi
(
getenv
(
"OMP_NUM_THREADS"
));
#pragma omp parallel for schedule(static,1)
for
(
int
i
=
0
;
i
<
num
;
i
++
)
{
INFO
(
"Running on CPU %d
\n
"
,
sched_getcpu
());
}
/*declare CDOs loop */
/*declare CDOs loop */
#pragma omp parallel for private(cdoidx, cdo_gid, i, j) reduction(| :s)
#pragma omp parallel for private(cdoidx, cdo_gid, i, j) reduction(| :s)
for
(
cdo_gid
=
0
;
cdo_gid
<
num_CDOs
;
cdo_gid
++
)
{
for
(
cdo_gid
=
0
;
cdo_gid
<
num_CDOs
;
cdo_gid
++
)
{
i
=
cdo_gid
/
CDOs_per_inj
;
/* injector id */
i
=
cdo_gid
/
CDOs_per_inj
;
/* injector id */
j
=
cdo_gid
%
CDOs_per_inj
;
/* cdo id within this injector */
j
=
cdo_gid
%
CDOs_per_inj
;
/* cdo id within this injector */
...
@@ -192,7 +196,6 @@ mstro_status demand_CDOs(mstro_cdo *cdos, size_t num_CDOs){
...
@@ -192,7 +196,6 @@ mstro_status demand_CDOs(mstro_cdo *cdos, size_t num_CDOs){
#pragma omp parallel for reduction(| :s)
#pragma omp parallel for reduction(| :s)
for
(
size_t
i
=
0
;
i
<
num_CDOs
;
i
++
){
for
(
size_t
i
=
0
;
i
<
num_CDOs
;
i
++
){
mstro_status
s3
,
s4
;
mstro_status
s3
,
s4
;
s3
=
mstro_cdo_demand
(
cdos
[
i
]);
s3
=
mstro_cdo_demand
(
cdos
[
i
]);
DEBUG
(
"Hey, I recieved %s
\n
"
,
mstro_cdo_name
(
cdos
[
i
]));
DEBUG
(
"Hey, I recieved %s
\n
"
,
mstro_cdo_name
(
cdos
[
i
]));
s4
=
mstro_cdo_dispose
(
cdos
[
i
]);
s4
=
mstro_cdo_dispose
(
cdos
[
i
]);
...
...
This diff is collapsed.
Click to expand it.
examples/omp_injector.c
+
8
−
0
View file @
ad07528c
...
@@ -52,6 +52,14 @@ mstro_status inject_CDOs(int injector_id, mstro_cdo *cdos, size_t num_CDOs, size
...
@@ -52,6 +52,14 @@ mstro_status inject_CDOs(int injector_id, mstro_cdo *cdos, size_t num_CDOs, size
size_t
cdoidx
;
size_t
cdoidx
;
int
num
=
atoi
(
getenv
(
"OMP_NUM_THREADS"
));
#pragma omp parallel for schedule(static,1)
for
(
int
i
=
0
;
i
<
num
;
i
++
)
{
INFO
(
"Running on CPU %d
\n
"
,
sched_getcpu
());
}
/* declare CDOs loop */
/* declare CDOs loop */
#pragma omp parallel for private(cdoidx) reduction(| :s)
#pragma omp parallel for private(cdoidx) reduction(| :s)
for
(
size_t
i
=
0
;
i
<
num_CDOs
;
i
++
)
{
for
(
size_t
i
=
0
;
i
<
num_CDOs
;
i
++
)
{
...
...
This diff is collapsed.
Click to expand it.
examples/run_core_bench.sh
+
9
−
7
View file @
ad07528c
...
@@ -19,13 +19,16 @@
...
@@ -19,13 +19,16 @@
# number of procduer and comsumer threads
# number of procduer and comsumer threads
export
OMP_NUM_THREADS
=
4
export
OMP_NUM_THREADS
=
4
#OpenMP thread pinning for application threads
#OpenMP thread pinning for application threads
export
OMP_PLACES
=
cores
export
OMP_PLACES
=
"{0,1,2,3}"
export
OMP_PROC_BIND
=
close
export
OMP_PROC_BIND
=
close
#debug omp pinning
export
CRAY_OMP_CHECK_AFFINITY
=
TRUE
#Maestro thread pinning
#Maestro thread pinning
export
MSTRO_BIND_PM_PC
=
10
export
MSTRO_BIND_PM_PC
=
8
export
MSTRO_BIND_TRANSPORT_THREAD
=
11
export
MSTRO_BIND_TRANSPORT_THREAD
=
7
export
MSTRO_BIND_CQ_HANDLER
=
"3-12"
export
MSTRO_BIND_CQ_HANDLER
=
"4-6"
export
MSTRO_LOG_LEVEL
=
0
# FI provider, e.g. sockets, gni, verbs, cxi
# FI provider, e.g. sockets, gni, verbs, cxi
export
FI_PROVIDER
=
cxi
export
FI_PROVIDER
=
cxi
...
@@ -47,8 +50,7 @@ export MSTRO_CONSUMER_MODE=MSTRO_CONSUMER_SINK_ALL
...
@@ -47,8 +50,7 @@ export MSTRO_CONSUMER_MODE=MSTRO_CONSUMER_SINK_ALL
# total number of ranks = number of producer ranks + number of consumer ranks + 1 (pool manager rank)
# total number of ranks = number of producer ranks + number of consumer ranks + 1 (pool manager rank)
# number of procducers = total number of ranks - #consumers - 1 (pool manager)
# number of procducers = total number of ranks - #consumers - 1 (pool manager)
# srun <options> <core_bench> <#attributes> <attribute size> <#CDOs/thread> <#consumers> <CDO size in bytes>
# srun <options> <core_bench> <#attributes> <attribute size> <#CDOs/thread> <#consumers> <CDO size in byte>
srun
--exclusive
--cpu-bind
=
v
-c
250
--ntasks-per-node
1
-n
3 ./core_bench 0 0 20 1 671088640
srun
--exclusive
-c
128
--cpu-bind
=
v
--ntasks-per-node
1
-N
3 ./core_bench 0 0 20 1 671088640
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment