Skip to content

Commit c86a74e

Browse files
authored
Support Event-based Scheduler in Holoscan Flow Benchmarking (#446)
* Support Event-based Scheduler in Holoscan Flow Benchmarking Signed-off-by: sohams <sohams@nvidia.com> * lint issue fixed Signed-off-by: sohams <sohams@nvidia.com> --------- Signed-off-by: sohams <sohams@nvidia.com>
1 parent e9ab766 commit c86a74e

File tree

4 files changed

+41
-14
lines changed

4 files changed

+41
-14
lines changed

benchmarks/holoscan_flow_benchmarking/benchmark.hpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,15 @@ class BenchmarkedApplication : public holoscan::Application {
7777
scheduler->add_arg(holoscan::Arg("stop_on_deadlock", true));
7878
scheduler->add_arg(holoscan::Arg("check_recession_period_ms", (double)0));
7979
scheduler->add_arg(holoscan::Arg("max_duration_ms", (int64_t)100000));
80+
} else if (scheduler_str && std::string(scheduler_str) == "eventbased") {
81+
holoscan::Fragment::scheduler(
82+
holoscan::Fragment::make_scheduler<holoscan::EventBasedScheduler>(
83+
"event-based-scheduler"));
84+
auto scheduler = holoscan::Fragment::scheduler();
85+
86+
const char* num_threads_str = std::getenv("HOLOSCAN_EVENTBASED_WORKER_THREADS");
87+
if (num_threads_str)
88+
scheduler->add_arg(holoscan::Arg("worker_thread_number", std::stoi(num_threads_str)));
8089
} else {
8190
holoscan::Fragment::scheduler(
8291
holoscan::Fragment::make_scheduler<holoscan::GreedyScheduler>("greedy-scheduler"));

benchmarks/holoscan_flow_benchmarking/benchmark.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,8 @@ def monitor_gpu(gpu_uuids, filename):
4848
sum(device.gpu_utilization() for device in devices) / len(devices)
4949
)
5050
time.sleep(2)
51-
# write average gpu utilization to a file and a new line
5251

52+
# write average gpu utilization to a file and a new line
5353
with open(filename, "w") as f:
5454
# discard first 2 and last 2 values
5555
average_text = ",".join(map(str, average_gpu_utilizations[2:-2]))
@@ -87,7 +87,7 @@ def main():
8787
requiredArgument.add_argument(
8888
"--sched",
8989
nargs="+",
90-
choices=["greedy", "multithread"],
90+
choices=["greedy", "multithread", "eventbased"],
9191
required=True,
9292
help="scheduler(s) to use",
9393
)
@@ -171,7 +171,7 @@ def main():
171171
"--num_worker_threads",
172172
type=int,
173173
default=1,
174-
help="number of worker threads for multithread scheduler (default: 1)",
174+
help="number of worker threads for multithread or eventbased scheduler (default: 1)",
175175
required=False,
176176
)
177177

@@ -181,8 +181,14 @@ def main():
181181

182182
args = parser.parse_args()
183183

184-
if "multithread" not in args.sched and args.num_worker_threads != 1:
185-
print("Warning: num_worker_threads is ignored as multithread scheduler is not used")
184+
if (
185+
"multithread" not in args.sched
186+
and "eventbased" not in args.sched
187+
and args.num_worker_threads != 1
188+
):
189+
print(
190+
"Warning: num_worker_threads is ignored as multithread or eventbased scheduler is not used"
191+
)
186192

187193
log_directory = None
188194
if args.log_directory is None:
@@ -200,10 +206,6 @@ def main():
200206
)
201207
os.mkdir(os.path.abspath(log_directory))
202208

203-
# if args.not_holohub or args.binary_path is not None:
204-
# print ("Currently non-HoloHub applications are not supported")
205-
# sys.exit(1)
206-
207209
env = os.environ.copy()
208210
if args.gpu != "all":
209211
env["CUDA_VISIBLE_DEVICES"] = args.gpu
@@ -222,12 +224,16 @@ def main():
222224
if scheduler == "multithread":
223225
env["HOLOSCAN_SCHEDULER"] = scheduler
224226
env["HOLOSCAN_MULTITHREAD_WORKER_THREADS"] = str(args.num_worker_threads)
227+
elif scheduler == "eventbased":
228+
env["HOLOSCAN_SCHEDULER"] = scheduler
229+
env["HOLOSCAN_EVENTBASED_WORKER_THREADS"] = str(args.num_worker_threads)
225230
elif scheduler != "greedy":
226231
print("Unsupported scheduler ", scheduler)
227232
sys.exit(1)
228233
# No need to set the scheduler for greedy scheduler
229234

230235
for i in range(1, args.runs + 1):
236+
print(f"Run {i} started for {scheduler} scheduler.")
231237
instance_threads = []
232238
if args.monitor_gpu:
233239
gpu_utilization_logfile_name = (

benchmarks/holoscan_flow_benchmarking/benchmarked_application.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from holoscan.conditions import CountCondition
44
from holoscan.core import Application
5-
from holoscan.schedulers import GreedyScheduler, MultiThreadScheduler
5+
from holoscan.schedulers import EventBasedScheduler, GreedyScheduler, MultiThreadScheduler
66

77

88
class BenchmarkedApplication(Application):
@@ -43,6 +43,13 @@ def run(self):
4343
check_recession_period_ms=0,
4444
max_duration_ms=100000,
4545
)
46+
elif scheduler_str and scheduler_str == "multithread":
47+
num_threads = os.environ.get("HOLOSCAN_EVENTBASED_WORKER_THREADS", None)
48+
scheduler = EventBasedScheduler(
49+
self,
50+
name="event-based-scheduler",
51+
worker_thread_number=int(num_threads) if num_threads is not None else 1,
52+
)
4653
else:
4754
scheduler = GreedyScheduler(self, name="greedy-scheduler")
4855
self.scheduler(scheduler)

benchmarks/holoscan_flow_benchmarking/flow_benchmarking_tutorial.md

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -319,23 +319,28 @@ your metrics options> -g endoscopy_outputs_<GPU Name>/logger* "GPU1" -g endoscop
319319
It is also possible to compare different schedulers using Holoscan flow
320320
benchmarking. The following commands can be used to evaluate performance
321321
of the endoscopy tool tracking sample application under the [Greedy
322-
scheduler](https://docs.nvidia.com/holoscan/sdk-user-guide/components/schedulers.html#greedy-scheduler)
323-
and the [Multithread scheduler](https://docs.nvidia.com/holoscan/sdk-user-guide/components/schedulers.html#multithreadscheduler):
322+
scheduler](https://docs.nvidia.com/holoscan/sdk-user-guide/components/schedulers.html#greedy-scheduler),
323+
the [Multithread
324+
scheduler](https://docs.nvidia.com/holoscan/sdk-user-guide/components/schedulers.html#multithreadscheduler)
325+
and the [Event-based Scheduler](https://docs.nvidia.com/holoscan/sdk-user-guide/components/schedulers.html#event-based-scheduler):
324326

325327
```
326328
$ python benchmarks/holoscan_flow_benchmarking/benchmark.py -a
327329
endoscopy_tool_tracking -r 10 -i 1 -m 1000 --sched greedy -d endoscopy_greedy_outputs
328330
329331
$ python benchmarks/holoscan_flow_benchmarking/benchmark.py -a
330-
endoscopy_tool_tracking -r 10 -i 1 -m 1000 --sched multithread -w 5 -d ndoscopy_multithread_outputs
332+
endoscopy_tool_tracking -r 10 -i 1 -m 1000 --sched multithread -w 5 -d endoscopy_multithread_outputs
333+
334+
$ python benchmarks/holoscan_flow_benchmarking/benchmark.py -a
335+
endoscopy_tool_tracking -r 10 -i 1 -m 1000 --sched eventbased -w 5 -d endoscopy_eventbased_outputs
331336
```
332337

333338
Then, the results can be analyzed and compared using the following
334339
command:
335340

336341
```
337342
$ python benchmarks/holoscan_flow_benchmarking/analyze.py <insert
338-
your metrics options> -g endoscopy_greedy_outputs/logger* "Endoscopy (Greedy)" -g endoscopy_multithread_outputs/logger* "Endoscopy (Multithread)"
343+
your metrics options> -g endoscopy_greedy_outputs/logger* "Endoscopy (Greedy)" -g endoscopy_multithread_outputs/logger* "Endoscopy (Multithread)" -g endoscopy_eventbased_outputs/logger* "Endoscopy (Event-based)"
339344
```
340345

341346
## Tail and Flatness of the CDF Curve

0 commit comments

Comments
 (0)