diff --git a/src/fuzz_introspector/analysis.py b/src/fuzz_introspector/analysis.py index 0b95ca031..c28955c40 100644 --- a/src/fuzz_introspector/analysis.py +++ b/src/fuzz_introspector/analysis.py @@ -88,23 +88,31 @@ def load_data_files(self, logger.info("[+] Accummulating profiles") logger.info("Accummulating using multiprocessing") - manager = multiprocessing.Manager() - semaphore = multiprocessing.Semaphore(10) - return_dict = manager.dict() + result_dict: Dict[Any, Any] = dict() + if parallelise: + manager = multiprocessing.Manager() + semaphore = multiprocessing.Semaphore(10) - jobs = [] - idx = 0 - for profile in self.profiles: - p = multiprocessing.Process( - target=fuzzer_profile.FuzzerProfile.accummulate_profile, - args=(profile, self.base_folder, return_dict, f"uniq-{idx}", - semaphore)) - jobs.append(p) - idx += 1 - p.start() - for proc in jobs: - proc.join() + return_dict = manager.dict() + + jobs = [] + idx = 0 + for profile in self.profiles: + p = multiprocessing.Process( + target=fuzzer_profile.FuzzerProfile.accummulate_profile, + args=(profile, self.base_folder, return_dict, f"uniq-{idx}", + semaphore)) + jobs.append(p) + idx += 1 + p.start() + for proc in jobs: + proc.join() + result_dict = return_dict.copy() + else: + for profile in self.profiles: + fuzzer_profile.FuzzerProfile.accummulate_profile( + profile, self.base_folder, result_dict, f"uniq-{idx}") new_profiles = [] for idx in return_dict: diff --git a/src/fuzz_introspector/commands.py b/src/fuzz_introspector/commands.py index ad13daec4..90a05ef52 100644 --- a/src/fuzz_introspector/commands.py +++ b/src/fuzz_introspector/commands.py @@ -113,6 +113,7 @@ def analyse_end_to_end(arg_language, correlation_file = '' try: + is_parallel = os.getenv('FUZZ_INTROSPECTOR_PARALLEL', "true").lower() == "true" exit_code, return_values2 = run_analysis_on_dir( target_folder=out_dir, coverage_url=coverage_url, @@ -123,7 +124,8 @@ def analyse_end_to_end(arg_language, language=language, out_dir=out_dir, dump_files=dump_files, - harness_lists=harness_lists) + harness_lists=harness_lists, + parallelise=is_parallel) for k, v in return_values2.items(): return_values[k] = v except DataLoaderError: