Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions .gitlab/benchmarks/microbenchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ variables:

capture-hardware-software-info.sh

if [[ $SCENARIO =~ ^flask_* || $SCENARIO =~ ^django_* ]];
if [[ $SCENARIO =~ ^flask_* || $SCENARIO =~ ^django_* || $SCENARIO =~ ^startup* ]];
then
BP_SCENARIO=$SCENARIO bp-runner "${CI_PROJECT_DIR:-.}/.gitlab/benchmarks/bp-runner.yml" --debug -t
else
Expand Down Expand Up @@ -153,6 +153,7 @@ microbenchmarks:
- "core_api"
- "otel_span"
- "otel_sdk_span"
- "startup"
- "appsec_iast_aspects"
- "appsec_iast_aspects_ospath"
- "appsec_iast_aspects_re_module"
Expand All @@ -174,9 +175,7 @@ microbenchmarks:
- "packages_update_imported_dependencies"
- "recursive_computation"
- "telemetry_add_metric"
# They take a long time to run, and now need the agent running
# TODO: Make benchmarks faster, or run less frequently, or as macrobenchmarks
# - "startup"


benchmarks-pr-comment:
image: $MICROBENCHMARKS_CI_IMAGE
Expand Down
16 changes: 14 additions & 2 deletions benchmarks/bm/_scenario.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def add_cmdline_args(cmd, args):
cmd = runner.argparser

for _field in dataclasses.fields(scenario_cls):
if _field.name == "cprofile_loops":
if _field.name in ("cprofile_loops", "inner_loops"):
continue

cmd.add_argument("--{}".format(_field.name), type=_field.type if _field.type is not bool else str_to_bool)
Expand All @@ -50,7 +50,7 @@ def add_cmdline_args(cmd, args):
finally:
pr.dump_stats(pstats_output)

runner.bench_time_func(scenario.scenario_name, scenario._pyperf)
runner.bench_time_func(scenario.scenario_name, scenario._pyperf, inner_loops=scenario._inner_loops)


@dataclasses.dataclass
Expand Down Expand Up @@ -81,6 +81,18 @@ def _cprofile_loops(self) -> int:
"""
return getattr(self, "cprofile_loops", 200)

@property
def _inner_loops(self) -> typing.Optional[int]:
"""Returns the number of inner loops to run for each pyperf iteration.

This is useful for scenarios that have a very long execution time per operation.

This can be set in the scenario class as a class variable, "inner_loops", or defaults to None.

If None, pyperf will determine the number of inner loops automatically.
"""
return getattr(self, "inner_loops", None)

@abc.abstractmethod
def run(self) -> typing.Generator[typing.Callable[[int], None], None, None]:
"""Returns a context manager that yields a function to be run for performance testing."""
Expand Down
3 changes: 3 additions & 0 deletions benchmarks/startup/scenario.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ class Startup(bm.Scenario):
# Not helpful for subprocess benchmarks
cprofile_loops: int = 0

# This benchmark takes a long time to run, reduce the inner loops
inner_loops: int = 10

def run(self):
env = os.environ.copy()
if self.env:
Expand Down
Loading