diff --git a/.pylintrc b/.pylintrc index ca5c203d4..3cce1929c 100644 --- a/.pylintrc +++ b/.pylintrc @@ -13,7 +13,7 @@ ignore=CVS # regex matches against base names, not paths. Note that pre-commit runs # pylint on individual files, so if some patters should be ignored, put that # in .pre-commit-config.yaml -ignore-patterns= +ignore-patterns=version.py # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). @@ -54,37 +54,21 @@ confidence= # --disable=W" disable= duplicate-code, - wrong-import-order, - unused-import, + import-outside-toplevel, + invalid-name, + logging-format-interpolation, + missing-class-docstring, missing-function-docstring, missing-module-docstring, - protected-access, - invalid-name, - redefined-outer-name, - unused-argument, - bad-continuation, no-member, - singleton-comparison, - import-outside-toplevel, - line-too-long, - too-many-arguments, - too-many-locals, - misplaced-comparison-constant, - missing-class-docstring, - no-else-return, - too-many-instance-attributes, - inconsistent-return-statements, - logging-format-interpolation, - useless-object-inheritance, - unused-variable, no-self-use, - consider-merging-isinstance, - consider-using-in, - deprecated-method, - method-hidden, not-callable, + redefined-outer-name, + too-many-arguments, + too-many-locals, + unused-argument, unspecified-encoding - +# # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where @@ -169,7 +153,7 @@ function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ # Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ +good-names=i,j,k,ex,Run,_,description,category,short_description,examples # Include a hint for the correct naming format with invalid-name include-naming-hint=no diff --git a/semeio/_docs_utils/_json_schema_2_rst.py b/semeio/_docs_utils/_json_schema_2_rst.py index 3d60906b1..8cae1767d 100644 --- a/semeio/_docs_utils/_json_schema_2_rst.py +++ b/semeio/_docs_utils/_json_schema_2_rst.py @@ -2,13 +2,13 @@ def _insert_ref(schema): - for k, v in schema.items(): - if isinstance(v, dict): - _insert_ref(v) - elif isinstance(v, list): - for index, val in enumerate(v.copy()): + for value in schema.values(): + if isinstance(value, dict): + _insert_ref(value) + elif isinstance(value, list): + for index, val in enumerate(value.copy()): if "$ref" in val: - v[index] = val["$ref"] + "_" + value[index] = val["$ref"] + "_" def _remove_key(schema, del_key): diff --git a/semeio/communication/reporter.py b/semeio/communication/reporter.py index 389b28844..7c6cda1c1 100644 --- a/semeio/communication/reporter.py +++ b/semeio/communication/reporter.py @@ -2,7 +2,7 @@ import os -class FileReporter(object): +class FileReporter: def __init__(self, output_dir): self._output_dir = output_dir @@ -19,19 +19,19 @@ def publish(self, namespace, data): output_file = self._prepare_output_file(namespace) + ".json" if os.path.exists(output_file): - with open(output_file) as f: - all_data = json.load(f) + with open(output_file, encoding="utf-8") as f_handle: + all_data = json.load(f_handle) else: all_data = [] all_data.append(data) - with open(output_file, "w") as f: - json.dump(all_data, f) + with open(output_file, "w", encoding="utf-8") as f_handle: + json.dump(all_data, f_handle) def publish_msg(self, namespace, msg): output_file = self._prepare_output_file(namespace) - with open(output_file, "a") as f: - f.write(f"{msg}\n") + with open(output_file, "a", encoding="utf-8") as f_handle: + f_handle.write(f"{msg}\n") def _prepare_output_file(self, namespace): if not os.path.exists(self._output_dir): diff --git a/semeio/communication/semeio_script.py b/semeio/communication/semeio_script.py index a449bf6bd..9272d1a04 100644 --- a/semeio/communication/semeio_script.py +++ b/semeio/communication/semeio_script.py @@ -13,7 +13,7 @@ SEMEIOSCRIPT_LOG_FILE = "workflow-log.txt" -class _LogHandlerContext(object): +class _LogHandlerContext: def __init__(self, log, handler): self._log = log self._handler = handler @@ -34,16 +34,17 @@ def __init__(self, output_dir, thread_id): def flush(self): for log_record in self.buffer: - self._reporter.publish_msg(self._namespace, self._format_record(log_record)) + self._reporter.publish_msg(self._namespace, _format_record(log_record)) super().flush() - def _format_record(self, log_record): - return ( - f"{log_record.levelname} " - f"[{datetime.datetime.fromtimestamp(log_record.created)}]: " - f"{log_record.message}" - ) + +def _format_record(log_record): + return ( + f"{log_record.levelname} " + f"[{datetime.datetime.fromtimestamp(log_record.created)}]: " + f"{log_record.message}" + ) class SemeioScript(ErtScript): # pylint: disable=too-few-public-methods @@ -93,10 +94,12 @@ def reporter(self): @property def _reports_dir(self): + # pylint: disable=protected-access return self.reporter._output_dir @_reports_dir.setter def _reports_dir(self, output_dir): + # pylint: disable=protected-access output_dir = Path(output_dir) if not output_dir.is_absolute(): res_config = self.ert().resConfig() diff --git a/semeio/hook_implementations/jobs.py b/semeio/hook_implementations/jobs.py index d5eb4441d..abd4630f8 100644 --- a/semeio/hook_implementations/jobs.py +++ b/semeio/hook_implementations/jobs.py @@ -20,14 +20,15 @@ def _get_jobs_from_directory(directory): for f in os.listdir(resource_directory) if os.path.isfile(os.path.join(resource_directory, f)) ] - # libres will look for an executable in the same folder as the job configuration - # file is located. If the name of the config is the same as the name of the executable, - # libres will be confused. The usual standard in ERT would be to capitalize the config - # file. On OSX systems, which are case-insensitive, this isn't viable. The job config - # files are therefore appended with "_CONFIG". - # The jobs will be installed as JOB_NAME JOB_NAME_CONFIG, and the JOB_NAME_CONFIG will - # point to an executable named job_name - which we install with entry-points. The user - # can use the forward model job as normal: + # libres will look for an executable in the same folder as the job + # configuration file is located. If the name of the config is the same as + # the name of the executable, libres will be confused. The usual standard in + # ERT would be to capitalize the config file. On OSX systems, which are + # case-insensitive, this isn't viable. The job config files are therefore + # appended with "_CONFIG". The jobs will be installed as JOB_NAME + # JOB_NAME_CONFIG, and the JOB_NAME_CONFIG will point to an executable named + # job_name - which we install with entry-points. The user can use the + # forward model job as normal: # SIMULATION_JOB JOB_NAME return { _remove_suffix(os.path.basename(path), "_CONFIG"): path for path in all_files @@ -62,18 +63,16 @@ def job_documentation(job_name): insert = verb == "insert" return { "description": ( - "{} NOSIM {} the ECLIPSE data file. " # pylint: disable=consider-using-f-string - "This will {} simulation in ECLIPSE. NB: the job does not currently work on osx systems" - ).format( - verb.capitalize(), - "into" if insert else "from", - "disable" if insert else "enable", + f"{verb.capitalize()} NOSIM " + f"{'into' if insert else 'from'} the ECLIPSE data file. " + f"This will {'disable' if insert else 'enable'} simulation in ECLIPSE. " + "NB: the job does not currently work on osx systems" ), "examples": "", "category": "utility.eclipse", } - if job_name == "STEA" or job_name == "PYSCAL": + if job_name in ["STEA", "PYSCAL"]: module_name = f"semeio.jobs.scripts.fm_{job_name.lower()}" elif job_name == "OTS": module_name = "semeio.jobs.scripts.overburden_timeshift" diff --git a/semeio/jobs/design2params/design2params.py b/semeio/jobs/design2params/design2params.py index 7657f0495..284fac695 100644 --- a/semeio/jobs/design2params/design2params.py +++ b/semeio/jobs/design2params/design2params.py @@ -34,6 +34,7 @@ def run( parametersfilename="parameters.txt", log_level=None, ): + # pylint: disable=too-many-arguments """ Reads out all file content from different files and create dataframes """ @@ -73,6 +74,7 @@ def run( def _complete_parameters_file( realization, parameters, parametersfilename, design_matrix_sheet, default_sheet ): + # pylint: disable=too-many-locals """ Pick key / values from choosen realization in design matrix Append those key / values if not present into parameters.txt @@ -176,7 +178,7 @@ def _complete_parameters_file( ) # if all ok - write the ok file - with open(_TARGET_FILE_TXT, "w") as target_file: + with open(_TARGET_FILE_TXT, "w", encoding="utf-8") as target_file: target_file.write("OK\n") @@ -188,7 +190,7 @@ def _read_excel(file_name, sheet_name, header=0, usecols=None, engine=None): :raises: SystemExit if file not loaded correctly """ try: - df = pd.read_excel( + dframe = pd.read_excel( file_name, sheet_name, header=header, @@ -214,13 +216,13 @@ def _read_excel(file_name, sheet_name, header=0, usecols=None, engine=None): ) ) from err - p = Path(file_name) - if p.suffix == ".xls": + file_path = Path(file_name) + if file_path.suffix == ".xls": warnings.warn( "Support for XLS files is deprecated. Use XLSX", DeprecationWarning ) - return df.dropna(axis=1, how="all") + return dframe.dropna(axis=1, how="all") def _validate_design_matrix_header(design_matrix): diff --git a/semeio/jobs/design_kw/design_kw.py b/semeio/jobs/design_kw/design_kw.py index ffd44557d..d0adaf202 100644 --- a/semeio/jobs/design_kw/design_kw.py +++ b/semeio/jobs/design_kw/design_kw.py @@ -26,18 +26,18 @@ def run( valid = True - with open(parameters_file_name) as parameters_file: + with open(parameters_file_name, encoding="utf-8") as parameters_file: parameters = parameters_file.readlines() key_vals = extract_key_value(parameters) key_vals.update(rm_genkw_prefix(key_vals)) - with open(template_file_name, "r") as template_file: + with open(template_file_name, "r", encoding="utf-8") as template_file: template = template_file.readlines() if valid: - with open(result_file_name, "w") as result_file: + with open(result_file_name, "w", encoding="utf-8") as result_file: for line in template: if not is_comment(line): for key, value in key_vals.items(): @@ -49,7 +49,7 @@ def run( result_file.write(line) if valid: - with open(_STATUS_FILE_NAME, "w") as status_file: + with open(_STATUS_FILE_NAME, "w", encoding="utf-8") as status_file: status_file.write("DESIGN_KW OK\n") @@ -90,8 +90,7 @@ def unmatched_templates(line): bracketpattern = re.compile("<.+?>") if bracketpattern.search(line): return bracketpattern.findall(line) - else: - return [] + return [] def is_comment(line): diff --git a/semeio/jobs/overburden_timeshift/ots.py b/semeio/jobs/overburden_timeshift/ots.py index 8ebdf6657..afe302404 100644 --- a/semeio/jobs/overburden_timeshift/ots.py +++ b/semeio/jobs/overburden_timeshift/ots.py @@ -1,27 +1,26 @@ +# pylint: disable=invalid-name import logging import os.path from collections import namedtuple -from itertools import product from datetime import datetime as dt +from itertools import product from pathlib import Path - import configsuite -import yaml -import xtgeo import numpy as np -from scipy.interpolate import CloughTocher2DInterpolator -from ecl.grid import EclGrid -from ecl.gravimetry import EclSubsidence +import xtgeo +import yaml from ecl.eclfile import Ecl3DKW, EclFile +from ecl.gravimetry import EclSubsidence +from ecl.grid import EclGrid +from scipy.interpolate import CloughTocher2DInterpolator -from semeio.jobs.overburden_timeshift.ots_vel_surface import OTSVelSurface -from semeio.jobs.overburden_timeshift.ots_res_surface import OTSResSurface -from semeio.jobs.overburden_timeshift.ots_config import build_schema from semeio._exceptions.exceptions import ConfigurationError +from semeio.jobs.overburden_timeshift.ots_config import build_schema +from semeio.jobs.overburden_timeshift.ots_res_surface import OTSResSurface +from semeio.jobs.overburden_timeshift.ots_vel_surface import OTSVelSurface -# pylint: disable=consider-using-enumerate def extract_ots_context(configuration): rstfile_path = Path(f"{configuration.eclbase}.UNRST") if not rstfile_path.exists(): @@ -63,7 +62,7 @@ def write_surface(vintage_pairs, ts, output_dir, type_str, file_format="irap_bin def ots_run(parameter_file): - + # pylint: disable=too-many-locals parms = ots_load_params(parameter_file) vintage_pairs = parms.vintages @@ -156,7 +155,8 @@ def ots_run(parameter_file): ) -class OverburdenTimeshift(object): +class OverburdenTimeshift: + # pylint: disable=too-many-instance-attributes def __init__( self, eclbase, @@ -169,6 +169,7 @@ def __init__( above, velocity_model, ): + # pylint: disable=too-many-arguments """ The OTS class manages the information required to calculate overburden timeshift. @@ -207,6 +208,7 @@ def _create_surface(self, z=None): :param z: replace z values of surface """ + # pylint: disable=too-many-locals nx = self._surface.nx ny = self._surface.ny x = self._surface.x @@ -270,9 +272,9 @@ def add_survey(self, name, date): @staticmethod def _divide_negative_shift(ts, div_val=5.0): - for i in range(len(ts)): - if ts[i] < 0: - ts[i] /= div_val + for index, value in enumerate(ts): + if value < 0: + ts[index] = value / div_val def geertsma_ts_rporv(self, vintage_pairs): """ @@ -306,7 +308,7 @@ def _geertsma_ts_custom(self, vintage_pairs, subsidence_func, method_name): :param subsidence_func: specify subsidence method to be used :param method_name: string represeting the subsudence func name """ - + # pylint: disable=too-many-locals if len(vintage_pairs) < 1: return 0, [] @@ -377,6 +379,7 @@ def geertsma_ts(self, vintage_pairs): :param vintage_pairs: """ + # pylint: disable=too-many-locals if len(vintage_pairs) < 1: return 0, [] @@ -497,6 +500,7 @@ def dpv(self, vintage_pairs): :param vintage_pairs: list of pairs of vintages :return: """ + # pylint: disable=too-many-locals if len(vintage_pairs) < 1: return 0, [] diff --git a/semeio/jobs/overburden_timeshift/ots_config.py b/semeio/jobs/overburden_timeshift/ots_config.py index b372bc029..9b7fc362b 100644 --- a/semeio/jobs/overburden_timeshift/ots_config.py +++ b/semeio/jobs/overburden_timeshift/ots_config.py @@ -1,11 +1,10 @@ -# -*- coding: utf-8 -*- -import configsuite import datetime +from copy import deepcopy from pathlib import Path +import configsuite from configsuite import MetaKeys as MK from configsuite import types -from copy import deepcopy @configsuite.validator_msg("List needs to be of size 2") @@ -15,7 +14,7 @@ def _is_length_equal_2(value): @configsuite.validator_msg("Only value of 1 or -1 is allowed") def _is_int_one(value): - return value == 1 or value == -1 + return value in (1, -1) @configsuite.validator_msg("Vintages must contain at least an entry!") @@ -42,11 +41,11 @@ def _unrst_exists(value): def _str2dates(value): value = deepcopy(value) dates = [] - for x in value: - if isinstance(x, str): - dates.append(datetime.datetime.strptime(x, "%Y-%m-%d").date()) + for date in value: + if isinstance(date, str): + dates.append(datetime.datetime.strptime(date, "%Y-%m-%d").date()) else: - dates.append(x) + dates.append(date) return dates diff --git a/semeio/jobs/overburden_timeshift/ots_res_surface.py b/semeio/jobs/overburden_timeshift/ots_res_surface.py index 94a930989..c7b0fa918 100644 --- a/semeio/jobs/overburden_timeshift/ots_res_surface.py +++ b/semeio/jobs/overburden_timeshift/ots_res_surface.py @@ -1,7 +1,8 @@ +# pylint: disable=invalid-name import numpy as np -class OTSResSurface(object): +class OTSResSurface: def __init__(self, grid, above=0): """ Create a surface from a reservoir grid. @@ -54,6 +55,7 @@ def cell_corners(self): def _calculate_surface(self, grid, above): # calculate average from top face vecrtices # from unstructured grid as an interface between active and inactive cells + # pylint: disable=too-many-locals nx = grid.getNX() ny = grid.getNY() nz = grid.getNZ() diff --git a/semeio/jobs/overburden_timeshift/ots_vel_surface.py b/semeio/jobs/overburden_timeshift/ots_vel_surface.py index c1fb851da..53ce70f18 100644 --- a/semeio/jobs/overburden_timeshift/ots_vel_surface.py +++ b/semeio/jobs/overburden_timeshift/ots_vel_surface.py @@ -1,3 +1,4 @@ +# pylint: disable=invalid-name import numpy as np from scipy.interpolate import CloughTocher2DInterpolator @@ -6,7 +7,7 @@ from segyio import TraceField -class OTSVelSurface(object): +class OTSVelSurface: def __init__(self, res_surface, vcube): """ Create a surface where the timeshift can be calculated. @@ -32,6 +33,7 @@ def x(self): @property def y(self): + # pylint: disable=invalid-name return self._y @property @@ -143,7 +145,9 @@ def _upscaling_size_stepping(res_corners, axis, vel_axis): return nn, ups def _upscale_velocity(self, res_corners, x_vel, y_vel, traces, nt, dt): - # resample to a new grid size based on the grid size + """resample to a new grid size based on the grid size""" + # pylint: disable=too-many-arguments,too-many-locals + nxx, upsx = self._upscaling_size_stepping(res_corners[:, :, 0], 0, x_vel) nyy, upsy = self._upscaling_size_stepping(res_corners[:, :, 1], 1, y_vel) @@ -173,9 +177,7 @@ def _map_reservoir_surface_to_velocity(self, res_surface, vcube): """ # downsample segy if segy resultion of CDP and # sample rate is higher than of the Eclgrid - x, y, traces, nt, self._dt = self._read_velocity( - vcube, res_surface.cell_corners - ) + x, y, traces, _, self._dt = self._read_velocity(vcube, res_surface.cell_corners) # this is some clever integration of traces using # averaging between two samples? vel_t_int = np.zeros_like(traces) @@ -185,11 +187,11 @@ def _map_reservoir_surface_to_velocity(self, res_surface, vcube): self._z3d = np.cumsum(vel_t_int * self._dt / 2, 2).reshape( -1, vel_t_int.shape[-1] ) - # this creates interploation functions over existing surface / grid + # this creates interpolation functions over existing surface / grid ip = CloughTocher2DInterpolator((res_surface.x, res_surface.y), res_surface.z) # interpolate over our vel field given by pos x and y z = ip(x, y) - # z gives a new depth for segy CDP pos given the grid inerface surface + # z gives a new depth for segy CDP pos given the grid interface surface # So far we have downsampled the segy file to correspond # with the resolution of the grid. diff --git a/semeio/jobs/rft/gendata_rft.py b/semeio/jobs/rft/gendata_rft.py index 34e2b6c52..ec23a3131 100644 --- a/semeio/jobs/rft/gendata_rft.py +++ b/semeio/jobs/rft/gendata_rft.py @@ -1,13 +1,14 @@ # pylint: disable=logging-fstring-interpolation +import logging import os -import logging import pandas as pd logger = logging.getLogger(__name__) def _write_gen_data_files(trajectory_df, directory, well, report_step): + # pylint: disable=line-too-long """Generate three files with the information GEN_DATA needs from the trajectory dataframe. @@ -62,9 +63,9 @@ def _write_simdata(fname, dataname, trajectory_df): """Write pressure value, one pr line for all points, -1 is used where there is no pressure information. """ - with open(fname + "", "w+", encoding="utf-8") as fh: + with open(fname + "", "w+", encoding="utf-8") as file_handle: if dataname in trajectory_df: - fh.write( + file_handle.write( "\n".join( trajectory_df.sort_values("order")[dataname] .fillna(value=-1) @@ -74,15 +75,15 @@ def _write_simdata(fname, dataname, trajectory_df): + "\n" ) else: - fh.write("\n".join(["-1"] * len(trajectory_df)) + "\n") + file_handle.write("\n".join(["-1"] * len(trajectory_df)) + "\n") logger.info(f"Forward model script gendata_rft.py: Wrote file {fname}") def _write_active(fname, trajectory_df): """Write a file with "1" pr row if a point is active, "0" if not""" - with open(fname, "w+", encoding="utf-8") as fh: - fh.write( + with open(fname, "w+", encoding="utf-8") as file_handle: + file_handle.write( "\n".join( trajectory_df.sort_values("order")["is_active"] .astype(int) @@ -94,11 +95,11 @@ def _write_active(fname, trajectory_df): def _write_inactive_info(fname, trajectory_df): """Write a file with explanations to users for inactive points""" - with open(fname, "w+", encoding="utf-8") as fh: + with open(fname, "w+", encoding="utf-8") as file_handle: if "inactive_info" not in trajectory_df: - fh.write("") + file_handle.write("") else: - fh.write( + file_handle.write( "\n".join( trajectory_df[~trajectory_df["is_active"]] .sort_values("order")["inactive_info"] @@ -111,6 +112,7 @@ def _write_inactive_info(fname, trajectory_df): def _populate_trajectory_points( well, date, trajectory_points, ecl_grid, ecl_rft, zonemap=None ): + # pylint: disable=too-many-arguments """ Populate a list of trajectory points, that only contain UTM coordinates for a well-path, with (i,j,k) indices corresponding to a given Eclipse grid, @@ -160,6 +162,7 @@ def run( csvfile=None, outputdirectory=".", ): + # pylint: disable=too-many-arguments dframes = [] if not well_times: diff --git a/semeio/jobs/rft/trajectory.py b/semeio/jobs/rft/trajectory.py index bd9cf060a..b698c5f1e 100644 --- a/semeio/jobs/rft/trajectory.py +++ b/semeio/jobs/rft/trajectory.py @@ -27,7 +27,9 @@ class TrajectoryPoint: zone (str) """ + # pylint: disable=too-many-instance-attributes def __init__(self, utm_x, utm_y, measured_depth, true_vertical_depth, zone=None): + # pylint: disable=too-many-arguments self.utm_x = utm_x self.utm_y = utm_y self.measured_depth = measured_depth @@ -97,6 +99,7 @@ def inactive_info(self, zonemap=None): f"{self.grid_ijk[2]} " f"{zonemap[self.grid_ijk[2]]}" ) + return None def get_pressure(self): """Returns the simulated pressure for the point, or -1 if @@ -281,8 +284,8 @@ def load_from_file(cls, filepath): if not os.path.isfile(filename): raise IOError(f"Trajectory file {filename} not found!") - with open(filename, "r", encoding="utf8") as f: - trajectory_lines = f.readlines() + with open(filename, "r", encoding="utf8") as file_handle: + trajectory_lines = file_handle.readlines() trajectory_lines = [strip_comments(line) for line in trajectory_lines] diff --git a/semeio/jobs/rft/utility.py b/semeio/jobs/rft/utility.py index 821529a19..935ff1533 100644 --- a/semeio/jobs/rft/utility.py +++ b/semeio/jobs/rft/utility.py @@ -33,6 +33,7 @@ def load_and_parse_well_time_file(filename): [Tuple] Returns a list of tuples with (well, datetime, report_step) """ + # pylint: disable=too-many-locals if not os.path.isfile(filename): raise argparse.ArgumentTypeError(f"The path {filename} does not exist") diff --git a/semeio/jobs/rft/zonemap.py b/semeio/jobs/rft/zonemap.py index 9c420ae70..31c3d9375 100644 --- a/semeio/jobs/rft/zonemap.py +++ b/semeio/jobs/rft/zonemap.py @@ -44,8 +44,8 @@ def load_and_parse_zonemap_file(cls, filename): zones_at_k_value = {} - with open(filename, "r") as f: - zonemap_lines = f.readlines() + with open(filename, "r", encoding="utf-8") as file_handle: + zonemap_lines = file_handle.readlines() zonemap_lines = [ (strip_comments(l), i + 1) for i, l in enumerate(zonemap_lines) @@ -88,14 +88,14 @@ def load_and_parse_zonemap_file(cls, filename): def __contains__(self, item): if isinstance(item, int): return item in self._zones_at_k_value - elif isinstance(item, str): + if isinstance(item, str): return item in self._k_values_at_zone return False def __getitem__(self, item): if isinstance(item, int): return self._zones_at_k_value[item] - elif isinstance(item, str): + if isinstance(item, str): return self._k_values_at_zone[item] raise KeyError(f"{item} is neither a k value nor a zone") diff --git a/semeio/jobs/scripts/design2params.py b/semeio/jobs/scripts/design2params.py index 9e7d7f8d1..aa1d19007 100755 --- a/semeio/jobs/scripts/design2params.py +++ b/semeio/jobs/scripts/design2params.py @@ -57,6 +57,7 @@ def create_parser(): "--parametersfilename", "-p", required=False, + # pylint: disable=protected-access default=design2params._PARAMETERS_TXT, type=str, ) diff --git a/semeio/jobs/scripts/fm_pyscal.py b/semeio/jobs/scripts/fm_pyscal.py index 531eb1064..b1b31989a 100755 --- a/semeio/jobs/scripts/fm_pyscal.py +++ b/semeio/jobs/scripts/fm_pyscal.py @@ -1,13 +1,13 @@ """Forward model for connecting ERT with the Pyscal command line client""" +import argparse import logging -import sys import os -import argparse - -from semeio.jobs.design_kw.design_kw import extract_key_value, rm_genkw_prefix +import sys from pyscal import pyscalcli +from semeio.jobs.design_kw.design_kw import extract_key_value, rm_genkw_prefix + _logger = logging.getLogger("FM_PYSCAL") # The string used here must match what is used as the DEFAULT @@ -164,6 +164,7 @@ def run( family, # int: 1 or 2, default 1 parameters_file_name="parameters.txt", ): + # pylint: disable=too-many-arguments """This function is a wrapper around the Pyscal command line tool. The command line tool is designed around argparse and this function wraps around that design. @@ -176,7 +177,7 @@ def run( sys.exit(1) # Determine which interpolation scenario the user has requested: - if int_param_wo_name != MAGIC_NONE and int_param_go_name != MAGIC_NONE: + if MAGIC_NONE not in (int_param_wo_name, int_param_go_name): # Separate interpolation parameter for WaterOil and GasOil do_interpolation = True elif int_param_wo_name != MAGIC_NONE and int_param_go_name == MAGIC_NONE: @@ -254,7 +255,7 @@ def _get_interpolation_values( # Read all key-value pairs from parameters.txt if not os.path.exists(parameters_file_name): raise FileNotFoundError(f"{parameters_file_name} does not exist") - with open(parameters_file_name) as parameters_file: + with open(parameters_file_name, encoding="utf-8") as parameters_file: parameters = parameters_file.readlines() parameter_dict = extract_key_value(parameters) parameter_dict.update(rm_genkw_prefix(parameter_dict)) diff --git a/semeio/jobs/scripts/fm_stea.py b/semeio/jobs/scripts/fm_stea.py index 3d3ee308f..cb0ce3738 100755 --- a/semeio/jobs/scripts/fm_stea.py +++ b/semeio/jobs/scripts/fm_stea.py @@ -1,6 +1,8 @@ +import argparse import json + import stea -import argparse + from semeio import valid_file description = ( @@ -49,7 +51,7 @@ def main_entry_point(): stea_input = stea.SteaInput([options.config, "--ecl_case", options.ecl_case]) result = stea.calculate(stea_input) for res, value in result.results(stea.SteaKeys.CORPORATE).items(): - with open(f"{res}_0", "w") as ofh: + with open(f"{res}_0", "w", encoding="utf-8") as ofh: ofh.write(f"{value}\n") profiles = _get_profiles( stea_input.stea_server, @@ -60,7 +62,7 @@ def main_entry_point(): full_response = _build_full_response( result.data[stea.SteaKeys.KEY_VALUES], profiles ) - with open(options.response_file, "w") as fout: + with open(options.response_file, "w", encoding="utf-8") as fout: json.dump(full_response, fout, indent=4) diff --git a/semeio/jobs/scripts/gendata_rft.py b/semeio/jobs/scripts/gendata_rft.py index 11da5aa8f..e9506016a 100755 --- a/semeio/jobs/scripts/gendata_rft.py +++ b/semeio/jobs/scripts/gendata_rft.py @@ -202,8 +202,8 @@ def main_entry_point(): csvfile=options.csvfile, outputdirectory=options.outputdirectory, ) - with open("GENDATA_RFT.OK", "w") as fh: - fh.write("GENDATA RFT completed OK") + with open("GENDATA_RFT.OK", "w", encoding="utf-8") as file_handle: + file_handle.write("GENDATA RFT completed OK") logger.info("Completed!") except ValueError as exception: logger.error(str(exception)) diff --git a/semeio/jobs/scripts/replace_string.py b/semeio/jobs/scripts/replace_string.py index fcbc0d844..a1f0db2bd 100644 --- a/semeio/jobs/scripts/replace_string.py +++ b/semeio/jobs/scripts/replace_string.py @@ -1,8 +1,6 @@ import argparse - from pathlib import Path - description = """ Performs inplace string replacement in a files. @@ -50,4 +48,7 @@ def main_entry_point(): parser = _get_args_parser() options = parser.parse_args() file = Path(options.file) - file.write_text(file.read_text().replace(options.original, options.new)) + file.write_text( + file.read_text(encoding="utf-8").replace(options.original, options.new), + encoding="utf-8", + ) diff --git a/semeio/workflows/ahm_analysis/ahmanalysis.py b/semeio/workflows/ahm_analysis/ahmanalysis.py index fa59f5356..942712907 100644 --- a/semeio/workflows/ahm_analysis/ahmanalysis.py +++ b/semeio/workflows/ahm_analysis/ahmanalysis.py @@ -1,37 +1,25 @@ import collections -import tempfile import glob -import os import itertools +import os +import tempfile import warnings +from pathlib import Path + import numpy as np import pandas as pd - -from pathlib import Path +import xtgeo +from ert_shared.libres_facade import LibresFacade +from ert_shared.plugins.plugin_manager import hook_implementation +from res.enkf import EnkfNode, ErtImplType, ErtRunContext, ESUpdate +from res.enkf.export import GenKwCollector, MisfitCollector from scipy.stats.stats import ks_2samp from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler - -import xtgeo from xtgeo.grid3d import GridProperty -from semeio.communication import SemeioScript -from semeio._exceptions.exceptions import ValidationError -from res.enkf import ( - ErtImplType, - EnkfNode, -) -from res.enkf import ( - ESUpdate, - ErtRunContext, -) -from res.enkf.export import ( - GenKwCollector, - MisfitCollector, -) - -from ert_shared.plugins.plugin_manager import hook_implementation -from ert_shared.libres_facade import LibresFacade +from semeio._exceptions.exceptions import ValidationError +from semeio.communication import SemeioScript DESCRIPTION = """ AHM_ANALYSIS will calculate the degree of update (using Kolmogorov Smirnov test) @@ -126,6 +114,11 @@ def run( group_by="data_key", output_dir=None, ): + # pylint: disable=method-hidden + # (SemeioScript wraps this run method) + + # pylint: disable=too-many-locals + """Perform analysis of parameters change per obs group prior to posterior of ahm""" if output_dir is not None: @@ -416,11 +409,6 @@ def count_active_observations(df_update_log): return len(df_active) -def create_path(runpathdf, target_name): - """to create path for output data""" - return tempfile.mkdtemp() - - def calc_observationsgroup_misfit(obs_keys, df_update_log, misfit_df): """To get the misfit for total observations (active/inactive).""" @@ -513,8 +501,8 @@ def check_names(ert_currentname, prior_name, target_name): def raise_if_empty(dataframes, messages): """Check input ensemble prior is not empty and if ensemble contains parameters for hm""" - for df, msg in zip(dataframes, messages): - if df.empty: + for dframe in dataframes: + if dframe.empty: raise ValidationError(f"{messages}") diff --git a/semeio/workflows/correlated_observations_scaling/cos.py b/semeio/workflows/correlated_observations_scaling/cos.py index cadd30462..d2d8074d7 100755 --- a/semeio/workflows/correlated_observations_scaling/cos.py +++ b/semeio/workflows/correlated_observations_scaling/cos.py @@ -38,6 +38,7 @@ ============================================= {} """.format( + # pylint: disable=protected-access configsuite.docs.generate(job_config._CORRELATED_OBSERVATIONS_SCHEMA) ) @@ -134,12 +135,13 @@ def _get_example(config_example): ) -class CorrelatedObservationsScalingJob( - SemeioScript -): # pylint: disable=too-few-public-methods - def run(self, job_config): +class CorrelatedObservationsScalingJob(SemeioScript): + # pylint: disable=too-few-public-methods + def run(self, job_configuration): + # pylint: disable=method-hidden + # (SemeioScript wraps this run method) facade = LibresFacade(self.ert()) - user_config = load_yaml(job_config) + user_config = load_yaml(job_configuration) user_config = _insert_default_group(user_config) obs = facade.get_observations() @@ -183,12 +185,11 @@ def _get_measured_data( return measured_data -def load_yaml(job_config): - # Allow job_config to be both list and dict. - if isinstance(job_config, dict) or isinstance(job_config, list): - return job_config +def load_yaml(job_configuration): + if isinstance(job_configuration, (dict, list)): + return job_configuration - with open(job_config, "r", encoding="utf-8") as fin: + with open(job_configuration, "r", encoding="utf-8") as fin: return yaml.safe_load(fin) diff --git a/semeio/workflows/correlated_observations_scaling/job_config.py b/semeio/workflows/correlated_observations_scaling/job_config.py index 64de35259..4a8f85672 100644 --- a/semeio/workflows/correlated_observations_scaling/job_config.py +++ b/semeio/workflows/correlated_observations_scaling/job_config.py @@ -1,8 +1,5 @@ -# -*- coding: utf-8 -*- from copy import deepcopy -import six - import configsuite from configsuite import MetaKeys as MK from configsuite import types @@ -27,15 +24,15 @@ def _min_value(value): return value >= 0 -_num_convert_msg = "Will go through the input and try to convert to list of int" +_NUM_CONVERT_MSG = "Will go through the input and try to convert to list of int" -@configsuite.transformation_msg(_num_convert_msg) +@configsuite.transformation_msg(_NUM_CONVERT_MSG) def _to_int_list(value): value = deepcopy(value) - if isinstance(value, six.integer_types): + if isinstance(value, int): return [value] - elif isinstance(value, (list, tuple)): + if isinstance(value, (list, tuple)): value = ",".join([str(x) for x in value]) return _realize_list(value) @@ -78,10 +75,10 @@ def _realize_list(input_string): return real_list -_num_convert_msg = "Create UPDATE_KEYS from CALCULATE_KEYS as it was not specified" +_NUM_CONVERT_MSG = "Create UPDATE_KEYS from CALCULATE_KEYS as it was not specified" -@configsuite.transformation_msg(_num_convert_msg) +@configsuite.transformation_msg(_NUM_CONVERT_MSG) def _expand_input(input_value): expanded_values = deepcopy(input_value) if "CALCULATE_KEYS" in expanded_values and "UPDATE_KEYS" not in expanded_values: @@ -98,6 +95,7 @@ def _min_max_value(value): @configsuite.validator_msg("keys must be provided for CALCULATE_KEYS") def _CALCULATE_KEYS_key_not_empty_list(content): + # pylint: disable=invalid-name return len(content) > 0 diff --git a/semeio/workflows/correlated_observations_scaling/obs_utils.py b/semeio/workflows/correlated_observations_scaling/obs_utils.py index 528fcad3f..c38c4ccf2 100644 --- a/semeio/workflows/correlated_observations_scaling/obs_utils.py +++ b/semeio/workflows/correlated_observations_scaling/obs_utils.py @@ -15,8 +15,7 @@ def _wildcard_to_dict_list(matching_keys, entry): """ if "index" in entry: return [{"key": key, "index": entry["index"]} for key in matching_keys] - else: - return [{"key": key} for key in matching_keys] + return [{"key": key} for key in matching_keys] def _expand_wildcard(obs_list, wildcard_key, entry): @@ -122,7 +121,7 @@ def keys_with_data(observations, keys, ensemble_size, storage): def _data_index_to_obs_index(obs, obs_key, data_index_list): if obs[obs_key].getImplementationType().name != "GEN_OBS": return data_index_list - elif data_index_list is None: + if data_index_list is None: return data_index_list for timestep in obs[obs_key].getStepList(): diff --git a/semeio/workflows/correlated_observations_scaling/observation_scale_factor.py b/semeio/workflows/correlated_observations_scaling/observation_scale_factor.py index 437c21824..4d6b7761c 100644 --- a/semeio/workflows/correlated_observations_scaling/observation_scale_factor.py +++ b/semeio/workflows/correlated_observations_scaling/observation_scale_factor.py @@ -1,7 +1,7 @@ from semeio.workflows.correlated_observations_scaling.scaled_matrix import DataMatrix -class ObservationScaleFactor(object): +class ObservationScaleFactor: def __init__( self, reporter, @@ -27,7 +27,7 @@ def get_scaling_factor(self, threshold): Collects data performs pca, and returns scaling factor, assumes validated input. """ nr_observations = self._measured_data.data.shape[1] - nr_components, singular_values = self.perform_pca(threshold) + nr_components, _ = self.perform_pca(threshold) scale_factor = DataMatrix.get_scaling_factor(nr_observations, nr_components) self._reporter.publish("scale_factor", scale_factor) diff --git a/semeio/workflows/correlated_observations_scaling/scaled_matrix.py b/semeio/workflows/correlated_observations_scaling/scaled_matrix.py index 0c018ea37..4b2c7b712 100644 --- a/semeio/workflows/correlated_observations_scaling/scaled_matrix.py +++ b/semeio/workflows/correlated_observations_scaling/scaled_matrix.py @@ -8,7 +8,7 @@ ) -class DataMatrix(object): +class DataMatrix: def __init__(self, input_data): """ Takes input data in the form of a Pandas multi index dataframe with @@ -91,6 +91,8 @@ def get_nr_primary_components(self, threshold): """ data_matrix = self.get_data_matrix() data_matrix = data_matrix - data_matrix.mean(axis=0) - _, s, _ = np.linalg.svd(data_matrix.astype(np.float), full_matrices=False) - variance_ratio = np.cumsum(s**2) / np.sum(s**2) - return len([1 for i in variance_ratio[:-1] if i < threshold]) + 1, s + _, singulars, _ = np.linalg.svd( + data_matrix.astype(np.float), full_matrices=False + ) + variance_ratio = np.cumsum(singulars**2) / np.sum(singulars**2) + return len([1 for i in variance_ratio[:-1] if i < threshold]) + 1, singulars diff --git a/semeio/workflows/localisation/local_config_script.py b/semeio/workflows/localisation/local_config_script.py index 8ad287b0f..6ee655df9 100644 --- a/semeio/workflows/localisation/local_config_script.py +++ b/semeio/workflows/localisation/local_config_script.py @@ -8,6 +8,8 @@ class LocalisationConfigJob(SemeioScript): def run(self, *args): + # pylint: disable=method-hidden + # (SemeioScript wraps this run method) ert = self.ert() facade = LibresFacade(self.ert()) # Read yml file with specifications diff --git a/semeio/workflows/localisation/local_script_lib.py b/semeio/workflows/localisation/local_script_lib.py index 69bc6fe89..5904101dd 100644 --- a/semeio/workflows/localisation/local_script_lib.py +++ b/semeio/workflows/localisation/local_script_lib.py @@ -1,23 +1,22 @@ -# pylint: disable=W0201 -import math -import yaml -import cwrap -import numpy as np -from numpy import ma -import logging +# pxylint: disable=attribute-defined-outside-init import itertools - +import logging +import math from collections import defaultdict -from typing import List from dataclasses import dataclass, field +from typing import List -from ecl.util.geometry import Surface -from ecl.eclfile import Ecl3DKW +import cwrap +import numpy as np +import yaml from ecl.ecl_type import EclDataType +from ecl.eclfile import Ecl3DKW from ecl.grid.ecl_grid import EclGrid -from res.enkf.row_scaling import RowScaling -from res.enkf.enums.ert_impl_type_enum import ErtImplType +from ecl.util.geometry import Surface +from numpy import ma from res.enkf.enums.enkf_var_type_enum import EnkfVarType +from res.enkf.enums.ert_impl_type_enum import ErtImplType +from res.enkf.row_scaling import RowScaling from semeio.workflows.localisation.localisation_debug_settings import ( LogLevel, @@ -34,8 +33,7 @@ class Parameter: def to_list(self): if self.parameters: return [f"{self.name}:{parameter}" for parameter in self.parameters] - else: - return [f"{self.name}"] + return [f"{self.name}"] def to_dict(self): return {self.name: self.parameters} @@ -46,17 +44,21 @@ class Parameters: parameters: List[Parameter] = field(default_factory=list) def append(self, new): + # pylint: disable=no-member + # (false positive) self.parameters.append(new) def to_list(self): - # pylint: disable=E1133 + # pylint: disable=not-an-iterable + # (false positive) result = [] for parameter in self.parameters: result.extend(parameter.to_list()) return result def to_dict(self): - # pylint: disable=E1133 + # pylint: disable=not-an-iterable + # (false positive) result = {} for parameter in self.parameters: if parameter.name in result: @@ -346,6 +348,9 @@ def define_look_up_index(user_defined_active_region_list, max_region_number): def calculate_scaling_factors_in_regions( grid, region_parameter, active_segment_list, scaling_value_list, smooth_range_list ): + # pylint: disable=unused-argument + # ('grid' and 'smooth-range-list' are not currently used) + min_region_number = region_parameter.min() max_region_number = region_parameter.max() @@ -503,7 +508,7 @@ def apply_segment( def read_region_files_for_all_correlation_groups(user_config, grid): - # pylint: disable-msg=R1702 + # pylint: disable=too-many-nested-blocks,too-many-locals,invalid-name if grid is None: # No grid is defined. Not relevant to look for region files to read. return None @@ -513,7 +518,7 @@ def read_region_files_for_all_correlation_groups(user_config, grid): nx = grid.get_nx() ny = grid.get_ny() nz = grid.get_nz() - for count, corr_spec in enumerate(user_config.correlations): + for _, corr_spec in enumerate(user_config.correlations): region_param_dict[corr_spec.name] = None if corr_spec.field_scale is not None: if corr_spec.field_scale.method == "segment": @@ -567,8 +572,8 @@ def add_ministeps( grid_for_field, ): # pylint: disable-msg=too-many-branches - # pylint: disable-msg=R0915 - # pylint: disable-msg=R1702 + # pylint: disable-msg=too-many-statements + # pylint: disable-msg=too-many-nested-blocks debug_print("Add all ministeps:", LogLevel.LEVEL1, user_config.log_level) ScalingValues.initialize() # Read all region files used in correlation groups, @@ -578,7 +583,7 @@ def add_ministeps( user_config, grid_for_field ) update_steps = [] - for count, corr_spec in enumerate(user_config.correlations): + for corr_spec in user_config.correlations: debug_print( f"Define ministep: {corr_spec.name}", LogLevel.LEVEL1, user_config.log_level ) @@ -808,6 +813,7 @@ class Decay: grid: object def __post_init__(self): + # pylint: disable=attribute-defined-outside-init angle = (90.0 - self.azimuth) * math.pi / 180.0 self.cosangle = math.cos(angle) self.sinangle = math.sin(angle) @@ -818,6 +824,7 @@ def get_dx_dy(self, data_index): x, y, _ = self.grid.get_xyz(active_index=data_index) except AttributeError: # Assume the grid is a 2D Surface grid + # pylint: disable=no-member x, y = self.grid.getXY(data_index) x_unrotated = x - self.obs_pos[0] y_unrotated = y - self.obs_pos[1] diff --git a/semeio/workflows/localisation/localisation_config.py b/semeio/workflows/localisation/localisation_config.py index badd2ea71..9bdd5a7ac 100644 --- a/semeio/workflows/localisation/localisation_config.py +++ b/semeio/workflows/localisation/localisation_config.py @@ -309,13 +309,11 @@ def validate_field_scale(cls, value): "from_file": ScalingFromFileConfig, "segment": ScalingForSegmentsConfig, } - if method in _valid_methods: - return _valid_methods[method](**value) - else: - valid_list = list(_valid_methods.keys()) + if method not in _valid_methods: raise ValueError( - f"Unknown method: {method}, valid methods are: {valid_list}" + f"Unknown method: {method}, valid methods are: {_valid_methods.keys()}" ) + return _valid_methods[method](**value) @validator("surface_scale", pre=True) def validate_surface_scale(cls, value): @@ -346,13 +344,11 @@ def validate_surface_scale(cls, value): "const_exponential_decay": ConstWithExponentialTaperingConfig, } - if method in _valid_methods: - return _valid_methods[method](**value) - else: - valid_list = list(_valid_methods.keys()) + if method not in _valid_methods: raise ValueError( - f"Unknown method: {method}, valid methods are: {valid_list}" + f"Unknown method: {method}, valid methods are: {_valid_methods.keys()}" ) + return _valid_methods[method](**value) class LocalisationConfig(BaseModel): diff --git a/semeio/workflows/misfit_preprocessor/config.py b/semeio/workflows/misfit_preprocessor/config.py index a9ac1b6d3..a4e2626d1 100644 --- a/semeio/workflows/misfit_preprocessor/config.py +++ b/semeio/workflows/misfit_preprocessor/config.py @@ -1,8 +1,10 @@ import fnmatch -from typing import List, Dict, Any +from typing import Any, Dict, List + +import pydantic + from semeio.workflows.misfit_preprocessor.exceptions import ValidationError from semeio.workflows.misfit_preprocessor.workflow_config import MisfitConfig -import pydantic def _observations_present(observations, context) -> List[Dict[str, Any]]: diff --git a/semeio/workflows/misfit_preprocessor/hierarchical_config.py b/semeio/workflows/misfit_preprocessor/hierarchical_config.py index 344ad23b6..56e3b2bb2 100644 --- a/semeio/workflows/misfit_preprocessor/hierarchical_config.py +++ b/semeio/workflows/misfit_preprocessor/hierarchical_config.py @@ -2,23 +2,25 @@ from typing import Literal except ImportError: from typing_extensions import Literal + +import collections + +# pylint: disable=ungrouped-imports from typing import Union + from pydantic import ( BaseModel, - root_validator, - validator, - conint, Extra, + PrivateAttr, + PyObject, StrictFloat, StrictInt, - PyObject, - PrivateAttr, + conint, + root_validator, + validator, ) -import collections -from semeio.workflows.spearman_correlation_job.cluster_analysis import ( - fcluster_analysis, -) +from semeio.workflows.spearman_correlation_job.cluster_analysis import fcluster_analysis # pylint: disable=too-few-public-methods,no-self-argument diff --git a/semeio/workflows/misfit_preprocessor/job.py b/semeio/workflows/misfit_preprocessor/job.py index 5f0a20e9c..a50122208 100644 --- a/semeio/workflows/misfit_preprocessor/job.py +++ b/semeio/workflows/misfit_preprocessor/job.py @@ -14,6 +14,7 @@ def run(config, measured_data, reporter): elif workflow.clustering.type == "limited_kmeans": sconfig["n_clusters"] = nr_components + # pylint: disable=protected-access scaling_configs = spearman_job( measured_data, reporter, diff --git a/semeio/workflows/misfit_preprocessor/misfit_preprocessor.py b/semeio/workflows/misfit_preprocessor/misfit_preprocessor.py index 02e2470d3..e38d160ba 100644 --- a/semeio/workflows/misfit_preprocessor/misfit_preprocessor.py +++ b/semeio/workflows/misfit_preprocessor/misfit_preprocessor.py @@ -19,6 +19,7 @@ class MisfitPreprocessorJob(SemeioScript): + # pylint: disable=method-hidden def run(self, *args): facade = LibresFacade(self.ert()) config_record = _fetch_config_record(args) @@ -40,6 +41,7 @@ def run(self, *args): # to run. try: + # pylint: disable=not-callable CorrelatedObservationsScalingJob(self.ert()).run(scaling_configs) except EmptyDatasetException: pass @@ -48,16 +50,15 @@ def run(self, *args): def _fetch_config_record(args): if len(args) == 0: return {} - elif len(args) == 1: + if len(args) == 1: with open(args[0], encoding="utf8") as f: return yaml.safe_load(f) - else: - raise ValueError( - ( - "Excepted at most one argument, namely the path to a " - f"configuration file. Received {len(args)} arguments: {args}" - ) + raise ValueError( + ( + "Excepted at most one argument, namely the path to a " + f"configuration file. Received {len(args)} arguments: {args}" ) + ) def _load_measured_record(facade, obs_keys): diff --git a/semeio/workflows/misfit_preprocessor/workflow_config.py b/semeio/workflows/misfit_preprocessor/workflow_config.py index ca750817a..d5c5dc9b1 100644 --- a/semeio/workflows/misfit_preprocessor/workflow_config.py +++ b/semeio/workflows/misfit_preprocessor/workflow_config.py @@ -79,7 +79,6 @@ def validate_workflow(cls, value): workflow = value.get("type") if workflow == "auto_scale": return AutoScaleConfig(**value) - elif workflow == "custom_scale": + if workflow == "custom_scale": return CustomScaleConfig(**value) - else: - raise ValueError(f"Unknown workflow {workflow}") + raise ValueError(f"Unknown workflow {workflow}") diff --git a/semeio/workflows/spearman_correlation_job/job.py b/semeio/workflows/spearman_correlation_job/job.py index 244058d8f..e39c0de24 100644 --- a/semeio/workflows/spearman_correlation_job/job.py +++ b/semeio/workflows/spearman_correlation_job/job.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # pylint: disable=logging-fstring-interpolation import itertools import logging @@ -78,7 +77,7 @@ def _remove_singular_obs(clusters): def _config_creation(clusters): config = [] - for cluster_nr, cluster in clusters.items(): + for cluster in clusters.values(): config.append( { "CALCULATE_KEYS": { diff --git a/semeio/workflows/spearman_correlation_job/spearman_correlation.py b/semeio/workflows/spearman_correlation_job/spearman_correlation.py index cae261408..a26458290 100755 --- a/semeio/workflows/spearman_correlation_job/spearman_correlation.py +++ b/semeio/workflows/spearman_correlation_job/spearman_correlation.py @@ -15,6 +15,8 @@ class SpearmanCorrelationJob(SemeioScript): def run(self, *args): + # pylint: disable=method-hidden + # (SemeioScript wraps this run method) facade = LibresFacade(self.ert()) obs_keys = [ @@ -33,6 +35,7 @@ def run(self, *args): if not args.dry_run: try: + # pylint: disable=not-callable CorrelatedObservationsScalingJob(self.ert()).run(scaling_configs) except EmptyDatasetException: pass diff --git a/setup.py b/setup.py index 8ca93b72a..f5b45a148 100755 --- a/setup.py +++ b/setup.py @@ -1,6 +1,8 @@ -from setuptools import setup, find_packages from pathlib import Path +from setuptools import find_packages, setup + +# pylint: disable=line-too-long setup( name="semeio", long_description=Path("README.md").read_text(), diff --git a/tests/communication/test-data/logging_test_workflow_job.py b/tests/communication/test-data/logging_test_workflow_job.py index 60fce63e0..20c0c5236 100755 --- a/tests/communication/test-data/logging_test_workflow_job.py +++ b/tests/communication/test-data/logging_test_workflow_job.py @@ -5,6 +5,7 @@ class TestWorkflowJob(SemeioScript): + # pylint: disable=method-hidden def run(self, *args): self.reporter.publish("test_data", list(range(10))) diff --git a/tests/communication/test_integration.py b/tests/communication/test_integration.py index 8ccd27aa3..d05bb6c62 100644 --- a/tests/communication/test_integration.py +++ b/tests/communication/test_integration.py @@ -1,11 +1,12 @@ import json import os -import pytest import shutil import subprocess import sys -from semeio.communication import SEMEIOSCRIPT_LOG_FILE +import pytest + +from semeio.communication import SEMEIOSCRIPT_LOG_FILE TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "test-data") ERT_INSTALLED = shutil.which("ert") is not None diff --git a/tests/communication/unit/test_semeio_script.py b/tests/communication/unit/test_semeio_script.py index 28ec3d6e9..e614c4dab 100644 --- a/tests/communication/unit/test_semeio_script.py +++ b/tests/communication/unit/test_semeio_script.py @@ -1,12 +1,14 @@ import json import logging import os -import pytest from threading import Thread +from unittest.mock import Mock + +import pytest from semeio.communication import SEMEIOSCRIPT_LOG_FILE, SemeioScript -from unittest.mock import Mock +# pylint: disable=method-hidden def _ert_mock(ensemble_path="storage", user_case_name="case_name"): diff --git a/tests/hook_implementations/test_hook_implementations.py b/tests/hook_implementations/test_hook_implementations.py index c39e95cb3..044838c6b 100644 --- a/tests/hook_implementations/test_hook_implementations.py +++ b/tests/hook_implementations/test_hook_implementations.py @@ -1,15 +1,16 @@ # pylint: disable=missing-docstring import os -import semeio.hook_implementations.jobs -from semeio.workflows.misfit_preprocessor import misfit_preprocessor -from semeio.workflows.spearman_correlation_job import spearman_correlation +from ert_shared.plugins.plugin_manager import ErtPluginManager + +import semeio.hook_implementations.jobs +from semeio.workflows.ahm_analysis import ahmanalysis from semeio.workflows.correlated_observations_scaling import cos -from semeio.workflows.localisation import local_config_script from semeio.workflows.csv_export2 import csv_export2 -from semeio.workflows.ahm_analysis import ahmanalysis -from ert_shared.plugins.plugin_manager import ErtPluginManager +from semeio.workflows.localisation import local_config_script +from semeio.workflows.misfit_preprocessor import misfit_preprocessor +from semeio.workflows.spearman_correlation_job import spearman_correlation def test_hook_implementations(): diff --git a/tests/jobs/ahm_analysis/test_ahm_analysis.py b/tests/jobs/ahm_analysis/test_ahm_analysis.py index 970fc84fe..29ad75b8a 100644 --- a/tests/jobs/ahm_analysis/test_ahm_analysis.py +++ b/tests/jobs/ahm_analysis/test_ahm_analysis.py @@ -1,16 +1,15 @@ -import pytest -import pandas as pd -import numpy as np - -from semeio.workflows.ahm_analysis import ahmanalysis -from semeio._exceptions.exceptions import ValidationError - -from unittest.mock import MagicMock from itertools import product from pathlib import Path +from unittest.mock import MagicMock -from scipy import stats +import numpy as np +import pandas as pd +import pytest from ecl.grid import EclGridGenerator +from scipy import stats + +from semeio._exceptions.exceptions import ValidationError +from semeio.workflows.ahm_analysis import ahmanalysis def test_make_update_log_df(test_data_root): @@ -160,6 +159,7 @@ def flatten(regular_list): grid_prop("PORO", iens + 5, grid.getGlobalSize(), f"{iens}_PORO_field.grdecl") files = [f"{ens_nr}_{input_parameter}_field.grdecl" for ens_nr in range(5)] + # pylint: disable=protected-access result = ahmanalysis._import_field_param("MY_GRID", input_parameter, files) assert flatten(result) == flatten(expected_result) @@ -429,6 +429,7 @@ def side_effect(key): ) def test_group_obs_data_key(keys, expected_result): facade = create_facade(keys) + # pylint: disable=protected-access result = ahmanalysis._group_observations(facade, keys.keys(), group_by="data_key") assert result == expected_result @@ -448,6 +449,7 @@ def test_group_obs_data_key(keys, expected_result): ) def test_group_obs_obs_key(obs_keys, expected_result): facade = MagicMock() + # pylint: disable=protected-access result = ahmanalysis._group_observations(facade, obs_keys, group_by="obs_key") assert result == expected_result facade.assert_not_called() diff --git a/tests/jobs/ahm_analysis/test_integration.py b/tests/jobs/ahm_analysis/test_integration.py index f86319177..e3ca04494 100644 --- a/tests/jobs/ahm_analysis/test_integration.py +++ b/tests/jobs/ahm_analysis/test_integration.py @@ -1,18 +1,16 @@ # pylint: disable=unsubscriptable-object # pylint issue import os import shutil -import pytest -import pandas as pd -from semeio.workflows.ahm_analysis import ahmanalysis -from semeio._exceptions.exceptions import ValidationError - from pathlib import Path -from res.enkf import EnKFMain, ResConfig -from res.enkf.export import ( - SummaryObservationCollector, - GenDataObservationCollector, -) + +import pandas as pd +import pytest from ecl.grid import EclGridGenerator +from res.enkf import EnKFMain, ResConfig +from res.enkf.export import GenDataObservationCollector, SummaryObservationCollector + +from semeio._exceptions.exceptions import ValidationError +from semeio.workflows.ahm_analysis import ahmanalysis @pytest.mark.usefixtures("setup_tmpdir") diff --git a/tests/jobs/correlated_observations_scaling/test_integration.py b/tests/jobs/correlated_observations_scaling/test_integration.py index 2bc21feb5..298692eba 100644 --- a/tests/jobs/correlated_observations_scaling/test_integration.py +++ b/tests/jobs/correlated_observations_scaling/test_integration.py @@ -1,9 +1,12 @@ +import json import os import shutil -import json +from unittest.mock import MagicMock + import numpy as np import pytest import yaml +from ert_shared.plugins.plugin_manager import ErtPluginManager from res.enkf import EnKFMain, ResConfig from semeio.workflows.correlated_observations_scaling import cos @@ -11,8 +14,6 @@ CorrelatedObservationsScalingJob, ) from tests.jobs.conftest import TEST_DATA_DIR -from ert_shared.plugins.plugin_manager import ErtPluginManager -from unittest.mock import MagicMock def get_std_from_obs_vector(vector): diff --git a/tests/jobs/correlated_observations_scaling/unit/test_examples.py b/tests/jobs/correlated_observations_scaling/unit/test_examples.py index 82180fc42..329f04f63 100644 --- a/tests/jobs/correlated_observations_scaling/unit/test_examples.py +++ b/tests/jobs/correlated_observations_scaling/unit/test_examples.py @@ -4,6 +4,8 @@ from semeio.workflows.correlated_observations_scaling import cos, job_config +# pylint: disable=protected-access + @pytest.mark.parametrize( "input_config", diff --git a/tests/jobs/correlated_observations_scaling/unit/test_obs_util.py b/tests/jobs/correlated_observations_scaling/unit/test_obs_util.py index 011b01732..89f81c686 100644 --- a/tests/jobs/correlated_observations_scaling/unit/test_obs_util.py +++ b/tests/jobs/correlated_observations_scaling/unit/test_obs_util.py @@ -135,6 +135,7 @@ def test_create_observation_vectors(setup_ert): "CALCULATE_KEYS": {"keys": [{"key": "WPR_DIFF_1"}]}, "UPDATE_KEYS": {"keys": [{"key": "WPR_DIFF_1"}]}, } + # pylint: disable=protected-access config = configsuite.ConfigSuite( valid_config_data, job_config._CORRELATED_OBSERVATIONS_SCHEMA, @@ -158,6 +159,7 @@ def test_add_observation_vectors(test_data_root): valid_config_data = {"UPDATE_KEYS": {"keys": [{"key": "WOPR_OP1_108"}]}} + # pylint: disable=protected-access schema = job_config._CORRELATED_OBSERVATIONS_SCHEMA config = configsuite.ConfigSuite(valid_config_data, schema, deduce_required=True) diff --git a/tests/jobs/correlated_observations_scaling/unit/test_scaling_job.py b/tests/jobs/correlated_observations_scaling/unit/test_scaling_job.py index 2408a7b86..56aee90f1 100644 --- a/tests/jobs/correlated_observations_scaling/unit/test_scaling_job.py +++ b/tests/jobs/correlated_observations_scaling/unit/test_scaling_job.py @@ -10,6 +10,7 @@ def test_filter_on_column_index(): matrix = np.random.rand(10, 10) index_lists = [[0, 1], [1, 2, 3], [1, 2, 3, 4, 5]] + # pylint: disable=protected-access for index_list in index_lists: result = measured.MeasuredData._filter_on_column_index( pd.DataFrame(matrix), index_list diff --git a/tests/jobs/csv_export2/conftest.py b/tests/jobs/csv_export2/conftest.py index 319298775..c35034e4b 100644 --- a/tests/jobs/csv_export2/conftest.py +++ b/tests/jobs/csv_export2/conftest.py @@ -14,7 +14,7 @@ def find_available_test_data(): if TEST_DATA_DIR is None: if os.path.isdir(TEST_DATA_DIR_STAVANGER): return TEST_DATA_DIR_STAVANGER - elif os.path.isdir(TEST_DATA_DIR_BERGEN): + if os.path.isdir(TEST_DATA_DIR_BERGEN): return TEST_DATA_DIR_BERGEN return TEST_DATA_DIR diff --git a/tests/jobs/design2params/test_design2params.py b/tests/jobs/design2params/test_design2params.py index cdbb17480..e6704e79e 100644 --- a/tests/jobs/design2params/test_design2params.py +++ b/tests/jobs/design2params/test_design2params.py @@ -24,6 +24,7 @@ def input_data(tmpdir): os.chdir(cwd) +# pylint: disable=protected-access @pytest.mark.usefixtures("input_data") @pytest.mark.parametrize( "test_file, expected_file", diff --git a/tests/jobs/design_kw/test_design_kw.py b/tests/jobs/design_kw/test_design_kw.py index 844a28d5f..3510a21fe 100644 --- a/tests/jobs/design_kw/test_design_kw.py +++ b/tests/jobs/design_kw/test_design_kw.py @@ -245,6 +245,7 @@ def test_run(input_data, filenames): with open(reference_filename, "r", encoding="utf-8") as reference_file: reference = reference_file.read() + # pylint: disable=protected-access with open(design_kw._STATUS_FILE_NAME, "r", encoding="utf-8") as status_file: status = status_file.read() @@ -262,6 +263,7 @@ def test_run_unmatched(input_data): parameters_file_name="parameters.txt", ) + # pylint: disable=protected-access assert not os.path.isfile(design_kw._STATUS_FILE_NAME) @@ -276,4 +278,5 @@ def test_run_duplicate_keys(input_data): parameters_file_name="parameters_w_duplicates.txt", ) + # pylint: disable=protected-access assert not os.path.isfile(design_kw._STATUS_FILE_NAME) diff --git a/tests/jobs/localisation/test_configs/test_config.py b/tests/jobs/localisation/test_configs/test_config.py index 76ecbe949..2563d3b9d 100644 --- a/tests/jobs/localisation/test_configs/test_config.py +++ b/tests/jobs/localisation/test_configs/test_config.py @@ -1,15 +1,14 @@ from unittest.mock import MagicMock -import pytest import pydantic +import pytest +from res.enkf.enums.ert_impl_type_enum import ErtImplType from semeio.workflows.localisation.localisation_config import ( LocalisationConfig, - expand_wildcards, check_for_duplicated_correlation_specifications, + expand_wildcards, ) -from res.enkf.enums.ert_impl_type_enum import ErtImplType - ERT_OBS = ["OBS1", "OBS2", "OBS11", "OBS22", "OBS12", "OBS13", "OBS14", "OBS3"] ERT_PARAM = [ diff --git a/tests/jobs/localisation/test_configs/test_field_config.py b/tests/jobs/localisation/test_configs/test_field_config.py index 6306d6788..638ac236f 100644 --- a/tests/jobs/localisation/test_configs/test_field_config.py +++ b/tests/jobs/localisation/test_configs/test_field_config.py @@ -1,13 +1,16 @@ -import pytest -from hypothesis import strategies as st, given -import pydantic import pathlib +from unittest.mock import MagicMock + +import pydantic +import pytest +from hypothesis import given +from hypothesis import strategies as st + from semeio.workflows.localisation.localisation_config import ( - GaussianConfig, CorrelationConfig, ExponentialConfig, + GaussianConfig, ) -from unittest.mock import MagicMock @given( diff --git a/tests/jobs/localisation/test_integration.py b/tests/jobs/localisation/test_integration.py index 75273757c..8ea2bd385 100644 --- a/tests/jobs/localisation/test_integration.py +++ b/tests/jobs/localisation/test_integration.py @@ -1,13 +1,14 @@ -# pylint: disable=R0915 -import yaml +# pylint: disable=too-many-statements +import itertools + +import numpy as np import pytest +import xtgeo +import yaml from res.enkf import EnKFMain, ResConfig -from semeio.workflows.localisation.local_config_script import LocalisationConfigJob - from xtgeo.surface.regular_surface import RegularSurface -import xtgeo -import numpy as np -import itertools + +from semeio.workflows.localisation.local_config_script import LocalisationConfigJob @pytest.mark.parametrize( diff --git a/tests/jobs/localisation/test_methods.py b/tests/jobs/localisation/test_methods.py index 6975d8eeb..fd611eda0 100644 --- a/tests/jobs/localisation/test_methods.py +++ b/tests/jobs/localisation/test_methods.py @@ -1,18 +1,19 @@ -import numpy as np -from numpy import ma import itertools -import pytest + import cwrap +import numpy as np +import pytest +from ecl.grid.ecl_grid_generator import EclGridGenerator +from numpy import ma from semeio.workflows.localisation.local_script_lib import ( - smooth_parameter, - calculate_scaling_factors_in_regions, - GaussianDecay, - ExponentialDecay, - ConstGaussianDecay, ConstExponentialDecay, + ConstGaussianDecay, + ExponentialDecay, + GaussianDecay, + calculate_scaling_factors_in_regions, + smooth_parameter, ) -from ecl.grid.ecl_grid_generator import EclGridGenerator def create_box_grid( @@ -268,7 +269,7 @@ def test_calculate_scaling_factors_in_regions(snapshot): ( scaling_factor_param, active_region_values_used_masked, - regions_in_param, + _, ) = calculate_scaling_factors_in_regions( grid, region_param_masked, @@ -309,7 +310,7 @@ def test_smooth_parameter(snapshot): def test_decay_function_with_new_options(snapshot): - grid, nx, ny, nz = create_box_grid( + grid, _, _, _ = create_box_grid( nx=25, ny=25, nz=10, diff --git a/tests/jobs/localisation/test_valid_rst.py b/tests/jobs/localisation/test_valid_rst.py index 2dd664635..28d73a52e 100644 --- a/tests/jobs/localisation/test_valid_rst.py +++ b/tests/jobs/localisation/test_valid_rst.py @@ -1,6 +1,7 @@ -from semeio.workflows.localisation.local_config_script import DESCRIPTION, EXAMPLES -import rstcheck import pytest +import rstcheck + +from semeio.workflows.localisation.local_config_script import DESCRIPTION, EXAMPLES @pytest.mark.parametrize("rst_text", [DESCRIPTION, EXAMPLES]) diff --git a/tests/jobs/misfit_preprocessor/test_integration.py b/tests/jobs/misfit_preprocessor/test_integration.py index 36298cceb..52c5a7303 100644 --- a/tests/jobs/misfit_preprocessor/test_integration.py +++ b/tests/jobs/misfit_preprocessor/test_integration.py @@ -1,18 +1,18 @@ # pylint: disable=not-callable import os import shutil -import yaml -import pytest +from unittest.mock import MagicMock, Mock +import pytest +import yaml from res.enkf import EnKFMain, ResConfig import semeio -from semeio.workflows.misfit_preprocessor import misfit_preprocessor +from semeio.workflows.correlated_observations_scaling import cos from semeio.workflows.correlated_observations_scaling.exceptions import ( EmptyDatasetException, ) - -from unittest.mock import Mock +from semeio.workflows.misfit_preprocessor import misfit_preprocessor @pytest.mark.usefixtures("setup_tmpdir") @@ -224,8 +224,6 @@ def test_misfit_preprocessor_invalid_config(test_data_root): @pytest.mark.usefixtures("setup_tmpdir") def test_misfit_preprocessor_all_obs(test_data_root, monkeypatch): - from unittest.mock import MagicMock - from semeio.workflows.correlated_observations_scaling import cos test_data_dir = os.path.join(test_data_root, "snake_oil") diff --git a/tests/jobs/misfit_preprocessor/test_misfit_preprocessor.py b/tests/jobs/misfit_preprocessor/test_misfit_preprocessor.py index 95b44d457..f88108823 100644 --- a/tests/jobs/misfit_preprocessor/test_misfit_preprocessor.py +++ b/tests/jobs/misfit_preprocessor/test_misfit_preprocessor.py @@ -1,15 +1,15 @@ +import random +from unittest.mock import Mock + import numpy as np import pandas as pd import pytest -import random -from semeio.workflows import misfit_preprocessor - -from unittest.mock import Mock +from semeio.workflows import misfit_preprocessor from semeio.workflows.misfit_preprocessor import assemble_config -class MockedMeasuredData(object): +class MockedMeasuredData: def __init__(self, observations, responses): self._data = self._build_data(observations, responses) @@ -78,7 +78,7 @@ def generate_simulated_responses( ensemble_size, ): simulated = {} - for poly_idx, (poly_fm, states) in enumerate(zip(forward_polynomials, poly_states)): + for poly_idx, states in enumerate(poly_states): new_parameters = np.random.uniform(0, 10, 3 * ensemble_size) new_parameters.resize(ensemble_size, 3) simulated[f"poly_{poly_idx}"] = { diff --git a/tests/jobs/misfit_preprocessor/unit/test_cluster_functions.py b/tests/jobs/misfit_preprocessor/unit/test_cluster_functions.py index c7bd71a38..75d1d6d57 100644 --- a/tests/jobs/misfit_preprocessor/unit/test_cluster_functions.py +++ b/tests/jobs/misfit_preprocessor/unit/test_cluster_functions.py @@ -1,10 +1,11 @@ # pylint: disable=unbalanced-tuple-unpacking +import pytest +from sklearn.datasets import make_blobs + from semeio.workflows.spearman_correlation_job.cluster_analysis import ( - kmeans_analysis, fcluster_analysis, + kmeans_analysis, ) -from sklearn.datasets import make_blobs -import pytest @pytest.mark.parametrize( @@ -16,9 +17,7 @@ def test_same_format(func, kwargs): # but rather their format. Scipy clusters (labels) are 1-indexed while # sklearn are 0-indexed. We therefore set up a very simple dataset with # clearly defined clusters so the result will be the same for all functions. - features, true_labels = make_blobs( - n_samples=200, centers=3, cluster_std=0.1, random_state=42 - ) + features, _ = make_blobs(n_samples=200, centers=3, cluster_std=0.1, random_state=42) cluster_result = func(features, 3, **kwargs) # The clusters are typically the same, but the labels vary so we perform the # simplest test, just checking that the desired labels are present. diff --git a/tests/jobs/nosim/test_nosim.py b/tests/jobs/nosim/test_nosim.py index b9d43b506..4e95b8b41 100644 --- a/tests/jobs/nosim/test_nosim.py +++ b/tests/jobs/nosim/test_nosim.py @@ -1,12 +1,13 @@ -import sys import os import shutil import subprocess -import pytest +import sys +import ert_shared.hook_implementations +import pytest from ert_shared.plugins.plugin_manager import ErtPluginContext + import semeio.hook_implementations.jobs -import ert_shared.hook_implementations @pytest.mark.skipif( diff --git a/tests/jobs/overburden_timeshift/conftest.py b/tests/jobs/overburden_timeshift/conftest.py index 4dc41cb1b..0fe2fe3d3 100644 --- a/tests/jobs/overburden_timeshift/conftest.py +++ b/tests/jobs/overburden_timeshift/conftest.py @@ -1,7 +1,8 @@ import os -import pytest import shutil +import pytest + from tests import test_data TEST_NORNE_DIR = os.path.realpath(os.path.join(test_data.__path__[0], "norne")) diff --git a/tests/jobs/overburden_timeshift/ots_util.py b/tests/jobs/overburden_timeshift/ots_util.py index e626059dc..27bc78a19 100644 --- a/tests/jobs/overburden_timeshift/ots_util.py +++ b/tests/jobs/overburden_timeshift/ots_util.py @@ -1,10 +1,11 @@ -from ecl.eclfile import EclKW, openFortIO, FortIO -from ecl import EclDataType -from ecl.grid import EclGrid import os + +import numpy as np import segyio +from ecl import EclDataType +from ecl.eclfile import EclKW, FortIO, openFortIO +from ecl.grid import EclGrid from segyio import TraceField -import numpy as np def create_init(grid, case): diff --git a/tests/jobs/overburden_timeshift/test_ots.py b/tests/jobs/overburden_timeshift/test_ots.py index d4f96d465..4ed0998d0 100644 --- a/tests/jobs/overburden_timeshift/test_ots.py +++ b/tests/jobs/overburden_timeshift/test_ots.py @@ -1,14 +1,15 @@ -import os -import pytest import datetime +import os from collections import namedtuple -from ecl.grid import EclGridGenerator +import pytest import segyio +from ecl.grid import EclGridGenerator +from xtgeo import surface_from_file from semeio.jobs.overburden_timeshift.ots import OverburdenTimeshift + from .ots_util import create_init, create_restart, create_segy_file -from xtgeo import surface_from_file parms = namedtuple( "Parms", @@ -65,7 +66,7 @@ def set_up(): ) @pytest.mark.usefixtures("setup_tmpdir") def test_create_missing_ecl_file(set_up, missing_file, expected_error): - spec, actnum, params = set_up + _, _, params = set_up grid = EclGridGenerator.createRectangular(dims=(10, 10, 10), dV=(1, 1, 1)) grid.save_EGRID("TEST.EGRID") @@ -88,7 +89,7 @@ def test_create_missing_ecl_file(set_up, missing_file, expected_error): def test_create_invalid_input_missing_segy(set_up): - spec, actnum, parms = set_up + _, _, parms = set_up grid = EclGridGenerator.createRectangular(dims=(10, 10, 10), dV=(1, 1, 1)) grid.save_EGRID("TEST.EGRID") @@ -117,7 +118,7 @@ def test_create_invalid_input_missing_segy(set_up): ) @pytest.mark.usefixtures("setup_tmpdir") def test_create_valid(set_up, config_item, value): - spec, actnum, params = set_up + spec, _, params = set_up grid = EclGridGenerator.createRectangular(dims=(10, 10, 10), dV=(1, 1, 1)) grid.save_EGRID("TEST.EGRID") @@ -348,7 +349,7 @@ def test_geertsma_TS(set_up): @pytest.mark.usefixtures("setup_tmpdir") def test_dPV(set_up): - spec, actnum, parms = set_up + _, actnum, parms = set_up grid = EclGridGenerator.createRectangular( dims=(2, 2, 2), dV=(100, 100, 100), actnum=actnum ) @@ -426,6 +427,7 @@ def test_irap_surface(set_up): ) f_name = "irap.txt" + # pylint: disable=protected-access s = ots._create_surface() s.to_file(f_name) s = surface_from_file(f_name, fformat="irap_binary") diff --git a/tests/jobs/overburden_timeshift/test_ots_config.py b/tests/jobs/overburden_timeshift/test_ots_config.py index c6a016f10..f02b8cfa1 100644 --- a/tests/jobs/overburden_timeshift/test_ots_config.py +++ b/tests/jobs/overburden_timeshift/test_ots_config.py @@ -1,10 +1,11 @@ import shutil -import yaml +from pathlib import Path + import pytest +import yaml + from semeio._exceptions.exceptions import ConfigurationError from semeio.jobs.overburden_timeshift.ots import ots_load_params -from pathlib import Path - TEST_NORNE_DIR = Path(__file__).parent / ".." / ".." / "test_data" / "norne" diff --git a/tests/jobs/overburden_timeshift/test_ots_integration.py b/tests/jobs/overburden_timeshift/test_ots_integration.py index 8412433af..037d3d0cb 100644 --- a/tests/jobs/overburden_timeshift/test_ots_integration.py +++ b/tests/jobs/overburden_timeshift/test_ots_integration.py @@ -1,10 +1,13 @@ -from semeio.jobs.overburden_timeshift.ots import ots_run +import os + +import pytest import xtgeo +import yaml from ecl.grid import EclGrid -import pytest + +from semeio.jobs.overburden_timeshift.ots import ots_run + from .ots_util import mock_segy -import yaml -import os # pylint: disable=too-many-statements diff --git a/tests/jobs/overburden_timeshift/test_ots_reservoir.py b/tests/jobs/overburden_timeshift/test_ots_reservoir.py index a9ddca55d..ca47398ed 100644 --- a/tests/jobs/overburden_timeshift/test_ots_reservoir.py +++ b/tests/jobs/overburden_timeshift/test_ots_reservoir.py @@ -1,8 +1,10 @@ -import numpy as np -from semeio.jobs.overburden_timeshift.ots_res_surface import OTSResSurface -from ecl.grid import EclGrid, EclGridGenerator import os + +import numpy as np import pytest +from ecl.grid import EclGrid, EclGridGenerator + +from semeio.jobs.overburden_timeshift.ots_res_surface import OTSResSurface def get_source_ert(grid): diff --git a/tests/jobs/overburden_timeshift/test_ots_velocity.py b/tests/jobs/overburden_timeshift/test_ots_velocity.py index e12e49a16..afae01a51 100644 --- a/tests/jobs/overburden_timeshift/test_ots_velocity.py +++ b/tests/jobs/overburden_timeshift/test_ots_velocity.py @@ -1,9 +1,10 @@ import pytest - +import segyio from ecl.grid import EclGridGenerator -from semeio.jobs.overburden_timeshift.ots_vel_surface import OTSVelSurface + from semeio.jobs.overburden_timeshift.ots_res_surface import OTSResSurface -import segyio +from semeio.jobs.overburden_timeshift.ots_vel_surface import OTSVelSurface + from .ots_util import create_segy_file diff --git a/tests/jobs/rft/conftest.py b/tests/jobs/rft/conftest.py index b88746fb4..4e7741dae 100644 --- a/tests/jobs/rft/conftest.py +++ b/tests/jobs/rft/conftest.py @@ -2,7 +2,6 @@ import copy from distutils.dir_util import copy_tree -import numpy import pytest ECL_BASE_NORNE = os.path.join( @@ -41,23 +40,6 @@ def get_mock_data_content_norne(): return copy.deepcopy(MOCK_DATA_CONTENT_NORNE) -def _assert_almost_equal_line_by_line(file1, file2): - with open(file1, encoding="utf-8") as fh: - file1_content = fh.readlines() - - with open(file2, encoding="utf-8") as fh: - file2_content = fh.readlines() - - assert len(file1_content) == len(file2_content) - - for line1, line2 in zip(file1_content, file2_content): - try: - line1, line2 = float(line1), float(line2) - except ValueError: - continue - numpy.testing.assert_almost_equal(line1, line2, decimal=7) - - def _generate_mock_data_norne(write_directory): for fname, content in MOCK_DATA_CONTENT_NORNE.items(): with open(os.path.join(write_directory, fname), "w+", encoding="utf-8") as fh: diff --git a/tests/jobs/rft/test_gendata_rft.py b/tests/jobs/rft/test_gendata_rft.py index 063b9dd45..18e7bf427 100644 --- a/tests/jobs/rft/test_gendata_rft.py +++ b/tests/jobs/rft/test_gendata_rft.py @@ -10,6 +10,7 @@ import pandas as pd import pytest +import semeio.jobs from semeio.jobs.scripts.gendata_rft import _build_parser, main_entry_point from tests.jobs.rft import conftest @@ -182,7 +183,7 @@ def test_gendata_rft_entry_point(tmpdir, norne_data, monkeypatch): if not filename.endswith("inactive_info"): result_file = os.path.join(tmpdir.strpath, filename) - conftest._assert_almost_equal_line_by_line(expected_file, result_file) + _assert_almost_equal_line_by_line(expected_file, result_file) def test_multiple_report_steps(tmpdir, reek_data, monkeypatch): @@ -454,7 +455,6 @@ def test_defaults(): To avoid confusion, these should be in sync, enforced by this test""" # Navigate to the JOB_DESCRIPTION in the source code tree: - import semeio.jobs job_description_file = os.path.join( os.path.dirname(semeio.jobs.__file__), "config_jobs", "GENDATA_RFT_CONFIG" @@ -518,6 +518,7 @@ def test_ert_setup_one_well_one_rft_point(tmpdir): Path("parameter_prior").write_text("PARAM_1 UNIFORM 0 1\n") Path("parameter_template").write_text('{\n"a": \n}') # Write an ERT config file + # pylint: disable=line-too-long Path("config.ert").write_text( """ RUNPATH realization-%d/iter-%d @@ -624,6 +625,7 @@ def test_ert_setup_one_well_two_points_different_time_and_depth(tmpdir): Path("parameter_template").write_text('{\n"a": \n}') # Write an ERT config file + # pylint: disable=line-too-long Path("config.ert").write_text( dedent( """ @@ -679,3 +681,20 @@ def test_ert_setup_one_well_two_points_different_time_and_depth(tmpdir): assert Path("realization-0/iter-0/gendata_rft/RFT_OP_1_1").is_file() assert Path("realization-0/iter-0/OK").is_file() + + +def _assert_almost_equal_line_by_line(file1, file2): + with open(file1, encoding="utf-8") as fh: + file1_content = fh.readlines() + + with open(file2, encoding="utf-8") as fh: + file2_content = fh.readlines() + + assert len(file1_content) == len(file2_content) + + for line1, line2 in zip(file1_content, file2_content): + try: + line1, line2 = float(line1), float(line2) + except ValueError: + continue + numpy.testing.assert_almost_equal(line1, line2, decimal=7) diff --git a/tests/jobs/spearman_correlation_job/test_integration.py b/tests/jobs/spearman_correlation_job/test_integration.py index 660914fc9..897a172ed 100644 --- a/tests/jobs/spearman_correlation_job/test_integration.py +++ b/tests/jobs/spearman_correlation_job/test_integration.py @@ -1,18 +1,19 @@ # pylint: disable=not-callable +import json import os import shutil -import json -import pytest +from unittest.mock import Mock + import pandas as pd -import semeio.workflows.spearman_correlation_job.spearman_correlation as sc -from scipy import stats +import pytest from res.enkf import EnKFMain, ResConfig +from scipy import stats + +import semeio.workflows.spearman_correlation_job.spearman_correlation as sc from semeio.workflows.correlated_observations_scaling.exceptions import ( EmptyDatasetException, ) -from unittest.mock import Mock - @pytest.mark.usefixtures("setup_tmpdir") def test_main_entry_point_gen_data(monkeypatch, test_data_root): @@ -37,6 +38,7 @@ def test_main_entry_point_gen_data(monkeypatch, test_data_root): # again is a tuple containing the configuration which is a list of configs. assert len(list(run_mock.call_args)[0][0]) == 47, "wrong number of clusters" + # pylint: disable=protected-access cor_matrix_file = os.path.join( job._output_dir, "correlation_matrix.csv", @@ -149,6 +151,7 @@ def test_main_entry_point_syn_data(monkeypatch, facade, measured_data): job = sc.SpearmanCorrelationJob(ert_mock) job.run(*["-t", "1.0"]) cor_matrix_file = os.path.join( + # pylint: disable=protected-access job._output_dir, "correlation_matrix.csv", ) @@ -168,6 +171,7 @@ def test_main_entry_point_syn_data(monkeypatch, facade, measured_data): assert (expected_corr_matrix == corr_matrix.values).all() clusters_file = os.path.join( + # pylint: disable=protected-access job._output_dir, "clusters.json", ) diff --git a/tests/jobs/spearman_correlation_job/unit/test_spearman_job.py b/tests/jobs/spearman_correlation_job/unit/test_spearman_job.py index 58a201db0..9e5b175d2 100644 --- a/tests/jobs/spearman_correlation_job/unit/test_spearman_job.py +++ b/tests/jobs/spearman_correlation_job/unit/test_spearman_job.py @@ -1,9 +1,9 @@ -# -*- coding: utf-8 -*- import pytest from semeio.workflows.spearman_correlation_job import job as spearman +# pylint: disable=protected-access @pytest.mark.parametrize( "test_input,expected_result", [ diff --git a/tests/jobs/stea/test_stea.py b/tests/jobs/stea/test_stea.py index 2e302da60..8a72b5ab4 100644 --- a/tests/jobs/stea/test_stea.py +++ b/tests/jobs/stea/test_stea.py @@ -1,13 +1,14 @@ import json +import os +import shutil import sys from unittest import mock + import pytest -from semeio.jobs.scripts import fm_stea -import shutil -import os -from stea import SteaResult, SteaKeys import stea +from stea import SteaKeys, SteaResult +from semeio.jobs.scripts import fm_stea TEST_STEA_PATH, _ = os.path.split(os.path.abspath(__file__)) diff --git a/tests/jobs/test_scale_observations/test_scale_observations.py b/tests/jobs/test_scale_observations/test_scale_observations.py index 497ea6de3..edbdb1f3e 100644 --- a/tests/jobs/test_scale_observations/test_scale_observations.py +++ b/tests/jobs/test_scale_observations/test_scale_observations.py @@ -1,8 +1,9 @@ +import pytest from res.enkf import EnKFMain + from semeio.workflows.correlated_observations_scaling.update_scaling import ( scale_observations, ) -import pytest class Config: # pylint: disable=too-few-public-methods diff --git a/tests/legacy_test_data/failed_runs_in_storage/jobs/perlin.py b/tests/legacy_test_data/failed_runs_in_storage/jobs/perlin.py index 567a68e91..9322e61ee 100755 --- a/tests/legacy_test_data/failed_runs_in_storage/jobs/perlin.py +++ b/tests/legacy_test_data/failed_runs_in_storage/jobs/perlin.py @@ -397,7 +397,7 @@ ] -class PerlinNoise(object): +class PerlinNoise: def __init__( self, persistence=0.5, diff --git a/tests/test_ert_integration.py b/tests/test_ert_integration.py index 50e1f057b..a4eb8ed8d 100644 --- a/tests/test_ert_integration.py +++ b/tests/test_ert_integration.py @@ -1,10 +1,10 @@ import subprocess -import pytest -from ert_shared.plugins.plugin_manager import ErtPluginContext -import semeio.hook_implementations.jobs import ert_shared.hook_implementations +import pytest +from ert_shared.plugins.plugin_manager import ErtPluginContext +import semeio.hook_implementations.jobs default_config = """ JOBNAME TEST diff --git a/tox.ini b/tox.ini index 81ed4a761..67ba69d2f 100644 --- a/tox.ini +++ b/tox.ini @@ -30,7 +30,6 @@ per-file-ignores = tests/jobs/overburden_timeshift/test_ots_integration.py:E203 semeio/jobs/design2params/design2params.py:E501 semeio/jobs/overburden_timeshift/ots.py:E501 - semeio/hook_implementations/jobs.py:E501 [gh-actions] python =