From 42daa841cab944f143e43a9c9029778f9ec39996 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Berland?= Date: Wed, 11 Dec 2024 12:32:26 +0100 Subject: [PATCH] Use memoryconservative mode for WF CSV_MERGE When run as a workflow job/ert plugin this has been observed through logging to fail due to lack of memory. When run as a workflow job, it is probable that there is one CSV for every realization that is to be loaded, thus the memory conservative option probably makes sense to have on by default. Keeping the faster option as default for usages as a forward model step. --- src/subscript/csv_merge/csv_merge.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/subscript/csv_merge/csv_merge.py b/src/subscript/csv_merge/csv_merge.py index a95f1b03a..e9fa6f7ee 100755 --- a/src/subscript/csv_merge/csv_merge.py +++ b/src/subscript/csv_merge/csv_merge.py @@ -68,7 +68,9 @@ def run(self, *args): args = parser.parse_args(args) logger.setLevel(logging.INFO) globbedfiles = glob_patterns(args.csvfiles) - csv_merge_main(csvfiles=globbedfiles, output=args.output) + csv_merge_main( + csvfiles=globbedfiles, output=args.output, memoryconservative=True + ) def get_parser() -> argparse.ArgumentParser: