From 64e91c96b00d260e092db0506d88d66d9c403b92 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Wed, 9 Oct 2024 10:38:11 +0200 Subject: [PATCH 1/8] =?UTF-8?q?Fix=20some=20warnings=20and=20refactoring?= =?UTF-8?q?=20in=20W=C3=B6hler=20analysis?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Johannes Mueller --- src/pylife/materialdata/woehler/elementary.py | 2 +- src/pylife/materialdata/woehler/maxlike.py | 35 +++++++------------ tests/materialdata/woehler/test_analyzer.py | 1 + 3 files changed, 14 insertions(+), 24 deletions(-) diff --git a/src/pylife/materialdata/woehler/elementary.py b/src/pylife/materialdata/woehler/elementary.py index df52cf8c..33716a91 100644 --- a/src/pylife/materialdata/woehler/elementary.py +++ b/src/pylife/materialdata/woehler/elementary.py @@ -70,7 +70,7 @@ def analyze(self, **kwargs): Parameters ---------- - \*\*kwargs : kwargs arguments + **kwargs : kwargs arguments Arguments to be passed to the derived class """ if len(self._fd.load.unique()) < 2: diff --git a/src/pylife/materialdata/woehler/maxlike.py b/src/pylife/materialdata/woehler/maxlike.py index 36844700..439398e9 100644 --- a/src/pylife/materialdata/woehler/maxlike.py +++ b/src/pylife/materialdata/woehler/maxlike.py @@ -125,42 +125,31 @@ def warn_and_fix_if_less_than_two_mixed_levels(): warn_and_fix_if_no_runouts() warn_and_fix_if_less_than_two_mixed_levels() - p_opt = initial_wcurve.to_dict() + prms_to_optimize = initial_wcurve.to_dict() for k in fixed_prms: - p_opt.pop(k) + prms_to_optimize.pop(k) - if not p_opt: + if not prms_to_optimize: raise AttributeError('You need to leave at least one parameter empty!') - var_opt = optimize.fmin( - self.__likelihood_wrapper, [*p_opt.values()], - args=([*p_opt], fixed_prms), + optimized_prms = optimize.fmin( + self.__likelihood_wrapper, [*prms_to_optimize.values()], + args=([*prms_to_optimize], fixed_prms), full_output=True, disp=False, maxiter=1e4, maxfun=1e4, - ) - res = {} - res.update(fixed_prms) - res.update(zip([*p_opt], var_opt[0])) + )[0] + result = fixed_prms | dict(zip(prms_to_optimize, optimized_prms)) - return self.__make_parameters(res) + return self.__make_parameters(result) def __make_parameters(self, params): - params['SD'] = np.abs(params['SD']) - params['TS'] = np.abs(params['TS']) - params['k_1'] = np.abs(params['k_1']) - params['ND'] = np.abs(params['ND']) - params['TN'] = np.abs(params['TN']) - return params + return {k: np.abs(v) for k, v in params.items()} def __likelihood_wrapper(self, var_args, var_keys, fix_args): ''' 1) Finds the start values to be optimized. The rest of the paramters are fixed by the user. 2) Calls function mali_sum_lolli to calculate the maximum likelihood of the current variable states. ''' - args = {} - args.update(fix_args) - args.update(zip(var_keys, var_args)) - args = self.__make_parameters(args) - - return -self._lh.likelihood_total(args['SD'], args['TS'], args['k_1'], args['ND'], args['TN']) + args = self.__make_parameters(fix_args | dict(zip(var_keys, var_args))) + return -self._lh.likelihood_total(**args) diff --git a/tests/materialdata/woehler/test_analyzer.py b/tests/materialdata/woehler/test_analyzer.py index 6dc889f6..554c12be 100644 --- a/tests/materialdata/woehler/test_analyzer.py +++ b/tests/materialdata/woehler/test_analyzer.py @@ -515,6 +515,7 @@ def test_woehler_max_likelihood_full_without_fixed_params(): np.testing.assert_almost_equal(we.bayesian_information_criterion(), bic, decimal=2) +@pytest.mark.filterwarnings("ignore:invalid value encountered in subtract") def test_woehler_max_likelihood_full_without_fixed_params_no_runouts(): expected = pd.Series({ 'SD': 0, From a54f38e9aa39140d844c735b5a73b20be990443c Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Wed, 9 Oct 2024 11:19:03 +0200 Subject: [PATCH 2/8] Fix warnings and refactor in surface detector Signed-off-by: Johannes Mueller --- src/pylife/mesh/surface.py | 171 ++++++++++++++++++------------------- tests/mesh/test_surface.py | 68 +++++++++++++-- 2 files changed, 143 insertions(+), 96 deletions(-) diff --git a/src/pylife/mesh/surface.py b/src/pylife/mesh/surface.py index 5e69d3f3..af269409 100644 --- a/src/pylife/mesh/surface.py +++ b/src/pylife/mesh/surface.py @@ -19,12 +19,10 @@ import numpy as np import pandas as pd -import warnings -import numpy as np -import numpy.linalg from .meshsignal import Mesh + @pd.api.extensions.register_dataframe_accessor('surface_3D') class Surface3D(Mesh): '''Determines nodes at the surface in a 3D mesh. @@ -39,52 +37,47 @@ class Surface3D(Mesh): with the names `node_id` and `element_id` ''' - def _solid_angle(self, x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3): + def _solid_angle(self, df): + n = len(df) - p0 = np.array([x0, y0, z0]).T - p1 = np.array([x1, y1, z1]).T - p2 = np.array([x2, y2, z2]).T - p3 = np.array([x3, y3, z3]).T + p0 = np.array([df.x_n0, df.y_n0, df.z_n0]).T + p1 = np.array([df.x_n1, df.y_n1, df.z_n1]).T + p2 = np.array([df.x_n2, df.y_n2, df.z_n2]).T + p3 = np.array([df.x_n3, df.y_n3, df.z_n3]).T # all vectors have shape (n,3) r0 = p1 - p0 r1 = p2 - p0 r2 = p3 - p0 - r0 /= np.linalg.norm(r0, axis=1)[:,None] * np.ones((1,3)) - r1 /= np.linalg.norm(r1, axis=1)[:,None] * np.ones((1,3)) - r2 /= np.linalg.norm(r2, axis=1)[:,None] * np.ones((1,3)) - - - a = np.arccos(np.sum(r0*r1, axis=1)) - b = np.arccos(np.sum(r0*r2, axis=1)) - c = np.arccos(np.sum(r1*r2, axis=1)) - - s = (a+b+c) / 2 - # silence invalid values error, the resulting nan values will be masked out at the end - old_settings = np.geterr() - np.seterr(invalid="ignore") + with np.errstate(invalid="ignore"): + r0 /= np.broadcast_to(np.linalg.norm(r0, axis=1), (3, n)).T + r1 /= np.broadcast_to(np.linalg.norm(r1, axis=1), (3, n)).T + r2 /= np.broadcast_to(np.linalg.norm(r2, axis=1), (3, n)).T + + a = np.arccos(np.sum(r0*r1, axis=1)) + b = np.arccos(np.sum(r0*r2, axis=1)) + c = np.arccos(np.sum(r1*r2, axis=1)) - sinA = np.sqrt( (np.sin(s-b) * np.sin(s-c)) / (np.sin(b)*np.sin(c)) ) - sinB = np.sqrt( (np.sin(s-a) * np.sin(s-c)) / (np.sin(a)*np.sin(c)) ) - sinC = np.sqrt( (np.sin(s-b) * np.sin(s-a)) / (np.sin(b)*np.sin(a)) ) + s = (a+b+c) / 2 - cosA = np.sqrt( (np.sin(s) * np.sin(s-a)) / (np.sin(b)*np.sin(c)) ) - cosB = np.sqrt( (np.sin(s) * np.sin(s-b)) / (np.sin(b)*np.sin(c)) ) - cosC = np.sqrt( (np.sin(s) * np.sin(s-c)) / (np.sin(b)*np.sin(c)) ) + sinA = np.sqrt((np.sin(s-b) * np.sin(s-c)) / (np.sin(b)*np.sin(c))) + sinB = np.sqrt((np.sin(s-a) * np.sin(s-c)) / (np.sin(a)*np.sin(c))) + sinC = np.sqrt((np.sin(s-b) * np.sin(s-a)) / (np.sin(b)*np.sin(a))) - # restore silenced warnings - np.seterr(**old_settings) + cosA = np.sqrt((np.sin(s) * np.sin(s-a)) / (np.sin(b)*np.sin(c))) + cosB = np.sqrt((np.sin(s) * np.sin(s-b)) / (np.sin(b)*np.sin(c))) + cosC = np.sqrt((np.sin(s) * np.sin(s-c)) / (np.sin(b)*np.sin(c))) # make large values to 1 - sinA = np.minimum(sinA, np.ones_like(sinA)) - sinB = np.minimum(sinB, np.ones_like(sinB)) - sinC = np.minimum(sinC, np.ones_like(sinC)) + sinA = np.minimum(sinA, 1.0) + sinB = np.minimum(sinB, 1.0) + sinC = np.minimum(sinC, 1.0) - cosA = np.minimum(cosA, np.ones_like(cosA)) - cosB = np.minimum(cosB, np.ones_like(cosB)) - cosC = np.minimum(cosC, np.ones_like(cosC)) + cosA = np.minimum(cosA, 1.0) + cosB = np.minimum(cosB, 1.0) + cosC = np.minimum(cosC, 1.0) A = np.where(np.isnan(sinA), 2 * np.arccos(cosA), 2 * np.arcsin(sinA)) B = np.where(np.isnan(sinB), 2 * np.arccos(cosB), 2 * np.arcsin(sinB)) @@ -95,58 +88,66 @@ def _solid_angle(self, x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3): return E - def _compute_normals(self, x0, y0, z0, x1, y1, z1, x2, y2, z2): - p0 = np.array([x0, y0, z0]).T - p1 = np.array([x1, y1, z1]).T - p2 = np.array([x2, y2, z2]).T + def _compute_normals(self, df): + n = len(df) + + p0 = np.array([df.x_n0_n0, df.y_n0_n0, df.z_n0_n0]).T + p1 = np.array([df.x_p1, df.y_p1, df.z_p1]).T + p2 = np.array([df.x_p2, df.y_p2, df.z_p2]).T # all vectors have shape (n,3) r0 = p1 - p0 r1 = p2 - p0 - r0 /= np.linalg.norm(r0, axis=1)[:,None] * np.ones((1,3)) - r1 /= np.linalg.norm(r1, axis=1)[:,None] * np.ones((1,3)) - normal = np.cross(r0,r1) - normal /= np.linalg.norm(normal, axis=1)[:,None] * np.ones((1,3)) + r0 /= np.broadcast_to(np.linalg.norm(r0, axis=1), (3, n)).T + r1 /= np.broadcast_to(np.linalg.norm(r1, axis=1), (3, n)).T - return normal[:,0], normal[:,1], normal[:,2] + normal = np.cross(r0, r1) + normal /= np.broadcast_to(np.linalg.norm(normal, axis=1), (3, n)).T - def _determine_is_at_surface(self): - df = self._obj[["x", "y", "z"]].reorder_levels(["element_id", "node_id"]).sort_index(level="element_id", sort_remaining=False) - df["node_id"] = df.index.get_level_values("node_id") + return normal + def _determine_is_at_surface(self): + df = ( + self.coordinates + #.reorder_levels(["element_id", "node_id"]) + #.sort_index(level="element_id", sort_remaining=False) + .assign(node_id=self._obj.index.get_level_values("node_id")) + ) # add two other nodes for every node - df_nodes = df.copy() - df_nodes = df_nodes.merge(df_nodes, on="element_id", how="outer", suffixes=["_n0", "_n1"]) - df_nodes = df_nodes[df_nodes["node_id_n0"] != df_nodes["node_id_n1"]] - - df_nodes = df_nodes.merge(df, on="element_id", how="outer").copy() - df_nodes = df_nodes[df_nodes["node_id_n1"] < df_nodes["node_id"]] - df_nodes = df_nodes.rename(columns={"node_id": "node_id_n2", "x": "x_n2", "y": "y_n2", "z": "z_n2"}) - - df_nodes = df_nodes.merge(df, on="element_id", how="outer", suffixes=["_n2", "_n3"]).copy() - df_nodes = df_nodes[df_nodes["node_id_n2"] < df_nodes["node_id"]] - df0 = df_nodes.rename(columns={"node_id": "node_id_n3", "x": "x_n3", "y": "y_n3", "z": "z_n3"}) - - df0.loc[:,"E"] \ - = self._solid_angle(df0.x_n0, df0.y_n0, df0.z_n0, df0.x_n1, df0.y_n1, df0.z_n1, df0.x_n2, df0.y_n2, df0.z_n2, df0.x_n3, df0.y_n3, df0.z_n3) + df0 = ( + df.merge(df, on="element_id", how="outer", suffixes=["_n0", "_n1"]) + .query("node_id_n0 != node_id_n1") + .merge(df, on="element_id", how="outer") + .query("node_id_n1 < node_id") + .rename( + columns={"node_id": "node_id_n2", "x": "x_n2", "y": "y_n2", "z": "z_n2"} + ) + .merge(df, on="element_id", how="outer", suffixes=["_n2", "_n3"]) + .query("node_id_n2 < node_id") + .rename( + columns={"node_id": "node_id_n3", "x": "x_n3", "y": "y_n3", "z": "z_n3"} + ) + .pipe(lambda df: df.assign(E=self._solid_angle(df))) + ) max_E = df0.groupby(["element_id", "node_id_n0"]).max()["E"] - df1 = df0.join(max_E, on=["element_id", "node_id_n0"], how="left", rsuffix="_max") - df2 = df1[df1["E"] == df1["E_max"]] - df2 = df2.groupby(["element_id", "node_id_n0"]).first() - - index_columns = df2.reset_index() - index_columns = index_columns[["element_id", "node_id_n0"]] - df2.index = pd.MultiIndex.from_frame(index_columns, names=["element_id", "node_id"]) - - df3 = df2[["x_n0", "y_n0", "z_n0", "E"]] + df1 = ( + df0.join(max_E, on=["element_id", "node_id_n0"], how="left", rsuffix="_max") + .query("E == E_max") + .groupby(["element_id", "node_id_n0"]) + .first() + .reset_index() + .rename(columns={"node_id_n0": "node_id"}) + .set_index(["element_id", "node_id"]) + ) - df3 = df3.join(df3.groupby(["node_id"]).sum().rename(columns={"E":"Esum"})["Esum"]) - df3.loc[:,"is_at_surface"] = df3["Esum"] < 4*np.pi-1e-5 + df3 = df1[["x_n0", "y_n0", "z_n0", "E"]].join( + df1.groupby(["node_id"]).sum().rename(columns={"E": "Esum"})["Esum"] + ) - df3["is_at_surface"].fillna(False, inplace = True) + df3.loc[:, "is_at_surface"] = df3["Esum"] < 4*np.pi-1e-5 return df3 @@ -214,18 +215,15 @@ def is_at_surface_with_normals(self): df = self._determine_is_at_surface() - df_at_surface = df[df["is_at_surface"]].reset_index().set_index("element_id") + df_at_surface = df[df["is_at_surface"]].reset_index("node_id") d = df_at_surface.merge(df_at_surface, on="element_id", how="left", suffixes=["_n0", "_n1"]) d = d[d["node_id_n0"] != d["node_id_n1"]] - p0 = d.groupby(["element_id", "node_id_n0"], group_keys=True).nth(0) - p1 = d.groupby(["element_id", "node_id_n0"], group_keys=True).nth(1) - d2 = d.groupby(["element_id", "node_id_n0"], group_keys=True).first() - - p0 = p0.reset_index().set_index(["element_id", "node_id_n0"]) - p1 = p1.reset_index().set_index(["element_id", "node_id_n0"]) - d2 = d2.reset_index().set_index(["element_id", "node_id_n0"]) + groups = d.groupby(["element_id", "node_id_n0"], group_keys=True) + p0 = groups.first() + p1 = groups.nth(1).set_index("node_id_n0", append=True) + d2 = groups.first() d2["node_id_p1"] = p0["node_id_n1"] d2["x_p1"] = p0["x_n0_n1"] @@ -237,14 +235,13 @@ def is_at_surface_with_normals(self): d2["y_p2"] = p1["y_n0_n1"] d2["z_p2"] = p1["z_n0_n1"] - d2.loc[:,"normal_x"], d2.loc[:,"normal_y"], d2.loc[:,"normal_z"] \ - = self._compute_normals(d2.x_n0_n0, d2.y_n0_n0, d2.z_n0_n0, d2.x_p1, d2.y_p1, d2.z_p1, d2.x_p2, d2.y_p2, d2.z_p2) - - df_with_normals = d2[["normal_x", "normal_y", "normal_z"]] + df_with_normals = pd.DataFrame( + self._compute_normals(d2), + columns=["normal_x", "normal_y", "normal_z"], + index=d2.index, + ) df_with_normals.index.names = ["element_id", "node_id"] df_result = df.join(df_with_normals)[["is_at_surface", "normal_x", "normal_y", "normal_z"]] return df_result - - diff --git a/tests/mesh/test_surface.py b/tests/mesh/test_surface.py index 8b2d475c..515a7aa1 100644 --- a/tests/mesh/test_surface.py +++ b/tests/mesh/test_surface.py @@ -79,15 +79,15 @@ def spherical_mesh(n_elements_phi = 6, n_elements_theta = 4, n_elements_r = 3, o @pytest.mark.parametrize('n_elements_phi, n_elements_theta, n_elements_r, offset', [ - (3, 3, 3, np.array([0,0,0])), - (3, 3, 3, np.array([0,1,0])), - (3, 3, 3, np.array([1.2,.3,4.5])), - (1, 3, 3, np.array([1.2,.3,4.5])), - (3, 1, 3, np.array([1.2,.3,4.5])), - (3, 3, 1, np.array([1.2,.3,4.5])), - (3, 1, 1, np.array([1.2,.3,4.5])), - (1, 1, 1, np.array([1.2,.3,4.5])), - (5, 4, 3, np.array([1.2,.3,4.5])), + (3, 3, 3, np.array([0, 0, 0])), + (3, 3, 3, np.array([0, 1, 0])), + (3, 3, 3, np.array([1.2, .3, 4.5])), + (1, 3, 3, np.array([1.2, .3, 4.5])), + (3, 1, 3, np.array([1.2, .3, 4.5])), + (3, 3, 1, np.array([1.2, .3, 4.5])), + (3, 1, 1, np.array([1.2, .3, 4.5])), + (1, 1, 1, np.array([1.2, .3, 4.5])), + (5, 4, 3, np.array([1.2, .3, 4.5])), ]) def test_surface_3D(n_elements_phi, n_elements_theta, n_elements_r, offset): @@ -99,3 +99,53 @@ def test_surface_3D(n_elements_phi, n_elements_theta, n_elements_r, offset): np.testing.assert_array_equal(is_at_surface_1, expected) np.testing.assert_array_equal(is_at_surface_2["is_at_surface"].values, expected) + + +@pytest.mark.parametrize('n_elements_phi, n_elements_theta, n_elements_r, offset', [ + (3, 3, 3, np.array([0, 0, 0])), + (3, 3, 3, np.array([0, 1, 0])), + (3, 3, 3, np.array([1.2, .3, 4.5])), + (1, 3, 3, np.array([1.2, .3, 4.5])), + (3, 1, 3, np.array([1.2, .3, 4.5])), + (3, 3, 1, np.array([1.2, .3, 4.5])), + (3, 1, 1, np.array([1.2, .3, 4.5])), + (1, 1, 1, np.array([1.2, .3, 4.5])), + (5, 4, 3, np.array([1.2, .3, 4.5])), +]) +def test_surface_3D_flipped_indeces(n_elements_phi, n_elements_theta, n_elements_r, offset): + + # mesh is a segment of a sphere + df_mesh, expected = spherical_mesh(n_elements_phi, n_elements_theta, n_elements_r, offset) + + df_mesh = df_mesh.reorder_levels(["element_id", "node_id"]) + + is_at_surface_1 = df_mesh.surface_3D.is_at_surface() + is_at_surface_2 = df_mesh.surface_3D.is_at_surface_with_normals() + + np.testing.assert_array_equal(is_at_surface_1, expected) + np.testing.assert_array_equal(is_at_surface_2["is_at_surface"].values, expected) + + +@pytest.mark.parametrize('n_elements_phi, n_elements_theta, n_elements_r, offset', [ + (3, 3, 3, np.array([0, 0, 0])), + (3, 3, 3, np.array([0, 1, 0])), + (3, 3, 3, np.array([1.2, .3, 4.5])), + (1, 3, 3, np.array([1.2, .3, 4.5])), + (3, 1, 3, np.array([1.2, .3, 4.5])), + (3, 3, 1, np.array([1.2, .3, 4.5])), + (3, 1, 1, np.array([1.2, .3, 4.5])), + (1, 1, 1, np.array([1.2, .3, 4.5])), + (5, 4, 3, np.array([1.2, .3, 4.5])), +]) +def test_surface_3D_additional_index(n_elements_phi, n_elements_theta, n_elements_r, offset): + + # mesh is a segment of a sphere + df_mesh, expected = spherical_mesh(n_elements_phi, n_elements_theta, n_elements_r, offset) + + df_mesh = df_mesh.assign(additional=1).set_index("additional", append=True) + + is_at_surface_1 = df_mesh.surface_3D.is_at_surface() + is_at_surface_2 = df_mesh.surface_3D.is_at_surface_with_normals() + + np.testing.assert_array_equal(is_at_surface_1, expected) + np.testing.assert_array_equal(is_at_surface_2["is_at_surface"].values, expected) From 29432e53356005ec2747e1097f9f5d872cd48cce Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Fri, 11 Oct 2024 09:06:24 +0200 Subject: [PATCH 3/8] Fix docstrings in fkm_load_distribution.py Signed-off-by: Johannes Mueller --- src/pylife/strength/fkm_load_distribution.py | 123 +++++++++++-------- 1 file changed, 73 insertions(+), 50 deletions(-) diff --git a/src/pylife/strength/fkm_load_distribution.py b/src/pylife/strength/fkm_load_distribution.py index 4c24e2be..2bf60f17 100644 --- a/src/pylife/strength/fkm_load_distribution.py +++ b/src/pylife/strength/fkm_load_distribution.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Scale up a load sequence to incorporate safety factors for FKM +r"""Scale up a load sequence to incorporate safety factors for FKM nonlinear lifetime assessment. Given a pandas Series of load values, return a scaled version where the safety @@ -26,10 +26,10 @@ The FKM nonlinear guideline defines three possible methods to consider the statistical distribution of the load: - * a normal distribution with given standard deviation, $s_L$ - * a logarithmic-normal distribution with given standard deviation $LSD_s$ + * a normal distribution with given standard deviation, :math:`s_L` + * a logarithmic-normal distribution with given standard deviation :math:`LSD_s` * an unknown distribution, use the constant factor :math:`\gamma_L=1.1` for - $P_L = 2.5\%$ + :math:`P_L = 2.5\%` For these three methods, there exist the three accessors `fkm_safety_normal_from_stddev`, `fkm_safety_lognormal_from_stddev`, and @@ -78,9 +78,11 @@ @pd.api.extensions.register_series_accessor("fkm_load_sequence") class FKMLoadSequence(PylifeSignal): """Base class used by the safety scaling method. It is used to compute the beta parameter - and to scale the load sequence by a constant gamma_L. + and to scale the load sequence by a constant ``gamma_L``. + + This class can be used from user code to scale a load sequence, potentially + on a mesh with other fields set for every node. - This class can be used from user code to scale a load sequence, potentially on a mesh with other fields set for every node. In such a case, the other fields are not modified. Example @@ -106,13 +108,16 @@ class FKMLoadSequence(PylifeSignal): """ def scaled_by_constant(self, gamma_L): - """ - Scales the load sequence by the given constant gamma_L. This method basically computes - `gamma_L * self._obj`. The data in `self._obj` is either a pandas.Series, a pandas.DataFrame - with a single column or a pandas.DataFrame with multiple columns (usually two for stress and stress gradient). - In the case of a Series or only one column, it simply scales all values by the factor gamma_L. - In the case of a DataFrame with multiple columns, it only scales the first column by the factor gamma_L - and keeps the other columns unchanged. + """Scale the load sequence by the given constant ``gamma_L``. + + This method basically computes ``gamma_L * self._obj``. The data in + ``self._obj`` is either a :class:`pandas.Series`, a + :class:`pandas.DataFrame` with a single column or a pandas.DataFrame + with multiple columns (usually two for stress and stress gradient). In + the case of a Series or only one column, it simply scales all values by + the factor ``gamma_L``. In the case of a DataFrame with multiple columns, + it only scales the first column by the factor gamma_L and keeps the + other columns unchanged. Returns a scaled copy of the data. @@ -140,21 +145,27 @@ def scaled_by_constant(self, gamma_L): return self._obj * gamma_L def maximum_absolute_load(self, max_load_independently_for_nodes=False): - """ - Get the maximum absolute load over all nodes and load steps. + """Get the maximum absolute load over all nodes and load steps. - This is implemented for pd.Series (where the index is just the index of the load step), - pd.DataFrame with one column (where the index is a MultiIndex of load_step and node_id), - and pd.DataFrame with multiple columns with the load is given in the first column. + This is implemented for pd.Series (where the index is just the index of + the load step), pd.DataFrame with one column (where the index is a + MultiIndex of load_step and node_id), and pd.DataFrame with multiple + columns with the load is given in the first column. Parameters ---------- max_load_independently_for_nodes : bool, optional - If the maximum absolute should be computed separately for every node. If set to False, a single maximum value is computed over all nodes. - The default is False, which means the whole mesh will be assessed by the same maximum load. This, however, means that calculating the FKM - nonlinear assessment for the whole mesh at once yields a different result than calculating the assessment for every single node one after - each other. (When doing it all at once, the maximum absolute load used for the failure probability is the maximum of the loads at every node, - when doing it only for a single node, the maximum absolute load value may be lower.) + If the maximum absolute should be computed separately for every node. + + If set to False, a single maximum value is computed over all nodes. + The default is False, which means the whole mesh will be assessed + by the same maximum load. This, however, means that calculating the + FKMnonlinear assessment for the whole mesh at once yields a + different result than calculating the assessment for every single + node one after each other. (When doing it all at once, the maximum + absolute load used for the failure probability is the maximum of + the loads at every node, when doing it only for a single node, the + maximum absolute load value may be lower.) Returns ------- @@ -202,12 +213,14 @@ def _validate_parameters(self, input_parameters, required_parameters): raise ValueError(f"Given parameters have to include \"{required_parameter}\".") def _get_beta(self, input_parameters): - """ - Compute a scaling factor for assessing a load sequence for a given failure probability, - for details, refer to the FKM nonlinear document. + """Compute a scaling factor for assessing a load sequence for a given failure probability. - The beta factors are also described in "A. Fischer. Bestimmung modifizierter Teilsicherheitsbeiwerte zur semiprobabilistischen - Bemessung von Stahlbetonkonstruktionen im Bestand. TU Kaiserslautern, 2010" + For details, refer to the FKM nonlinear document. + + The beta factors are also described in "A. Fischer. Bestimmung + modifizierter Teilsicherheitsbeiwerte zur semiprobabilistischen + Bemessung von Stahlbetonkonstruktionen im Bestand. TU Kaiserslautern, + 2010" Parameters ---------- @@ -243,25 +256,29 @@ def _get_beta(self, input_parameters): @pd.api.extensions.register_dataframe_accessor("fkm_safety_normal_from_stddev") @pd.api.extensions.register_series_accessor("fkm_safety_normal_from_stddev") class FKMLoadDistributionNormal(FKMLoadSequence): - r"""Series accessor to get a scaled up load series, i.e., a list of load values with included load safety, - as used in FKM nonlinear lifetime assessments. + r"""Series accessor to get a scaled up load series. - The loads are assumed to follow a **normal distribution** with standard deviation :math:`s_L`. - To incorporate safety, reduce the - values of the load series from :math:`P_L = 50\%` up to the given load probability :math:`P_L` - and the given failure probability :math:`P_A`. + A load series is a list of load values with included load safety, as used + in FKM nonlinear lifetime assessments. - For more information, see 2.3.2.1 of the FKM nonlinear guideline. + The loads are assumed to follow a **normal distribution** with standard + deviation :math:`s_L`. To incorporate safety, reduce the values of the + load series from :math:`P_L = 50\%` up to the given load probability + :math:`P_L` and the given failure probability :math:`P_A`. - See also + For more information, see 2.3.2.1 of the FKM nonlinear guideline. + + See Also -------- :class:`AbstractFKMLoadDistribution`: accesses meshes with connectivity information """ def gamma_L(self, input_parameters): - r"""Compute the scaling factor :math:`\gamma_L=(L_\text{max} + \alpha_L) / L_\text{max}`. - Note that for load sequences on multiple nodes (i.e. on a full mesh), :math:`L_\text{max}` - is the maximum load over all nodes and load steps, not different for different nodes. + r"""Compute the scaling factor :math:`\gamma_L = (L_\text{max} + \alpha_L) / L_\text{max}`. + + Note that for load sequences on multiple + nodes (i.e. on a full mesh), :math:`L_\text{max}` is the maximum load + over all nodes and load steps, not different for different nodes. Parameters ---------- @@ -341,19 +358,22 @@ def scaled_load_sequence(self, input_parameters): @pd.api.extensions.register_dataframe_accessor("fkm_safety_lognormal_from_stddev") @pd.api.extensions.register_series_accessor("fkm_safety_lognormal_from_stddev") class FKMLoadDistributionLognormal(FKMLoadSequence): - r"""Series accessor to get a scaled up load series, i.e., a list of load values with included load safety, - as used in FKM nonlinear lifetime assessments. + r"""Series accessor to get a scaled up load series. - The loads are assumed to follow a **lognormal distribution** with standard deviation :math:`LSD_s`. - To incorporate safety, reduce the - values of the load series from :math:`P_L = 50\%` up to the given load probability :math:`P_L` - and the given failure probability :math:`P_A`. + A load series is a list of load values with included load safety, as used + in FKM nonlinear lifetime assessments. - For more information, see 2.3.2.2 of the FKM nonlinear guideline. + The loads are assumed to follow a **lognormal distribution** with standard + deviation :math:`LSD_s`. To incorporate safety, reduce the values of the + load series from :math:`P_L = 50\%` up to the given load probability + :math:`P_L` and the given failure probability :math:`P_A`. - See also + For more information, see 2.3.2.2 of the FKM nonlinear guideline. + + See Also -------- :class:`AbstractFKMLoadDistribution`: accesses meshes with connectivity information + """ def gamma_L(self, input_parameters): @@ -476,8 +496,10 @@ def gamma_L(self, input_parameters): elif np.isclose(input_parameters.P_L, 50): gamma_L = 1.0 else: - raise ValueError("fkm_safety_blanket is only possible for P_L=2.5 % " \ - f"or P_L=50 %, not P_L={input_parameters.P_L} %") + raise ValueError( + "fkm_safety_blanket is only possible for P_L=2.5 % " + f"or P_L=50 %, not P_L={input_parameters.P_L} %" + ) return gamma_L @@ -491,7 +513,8 @@ def scaled_load_sequence(self, input_parameters): input_parameters : pandas Series The parameters to specify the upscaling method. - * ``input_parameters.P_L``: probability in [%] of the load for which to do the assessment, one of {2.5, 50} + * ``input_parameters.P_L``: probability in [%] of the load + for which to do the assessment, one of {2.5, 50} Raises ------ From 129791cc374cc9c6ecd91defb3997446bbb35e5c Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Fri, 11 Oct 2024 10:46:45 +0200 Subject: [PATCH 4/8] Remove print statments in tests that caused warnings Signed-off-by: Johannes Mueller --- tests/strength/fkm_nonlinear/test_fkm_nonlinear_hcm.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/strength/fkm_nonlinear/test_fkm_nonlinear_hcm.py b/tests/strength/fkm_nonlinear/test_fkm_nonlinear_hcm.py index 49e5eab7..1fa2ce41 100644 --- a/tests/strength/fkm_nonlinear/test_fkm_nonlinear_hcm.py +++ b/tests/strength/fkm_nonlinear/test_fkm_nonlinear_hcm.py @@ -563,9 +563,6 @@ def test_repeated_load_sequence_multiple_points_P_RAJ(original_load_sequence): result = pylife.strength.fkm_nonlinear.assessment_nonlinear_standard.perform_fkm_nonlinear_assessment(assessment_parameters, load_sequence, calculate_P_RAM=False, calculate_P_RAJ=True) N3 = result["P_RAJ_lifetime_n_cycles"] - print(f"N3: {N3}") - - print(f"{N1}, {N2}, {N3}, {(N1-N2)/N1*100}%, {(N1-N3)/N1*100}%") assert np.isclose(N1, N2, rtol=0.3).all() assert np.isclose(N1, N3, rtol=0.3).all() From 836c94d1dfdd638023a533f0f36170df496c1c8b Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Fri, 11 Oct 2024 11:58:03 +0200 Subject: [PATCH 5/8] Silence floating point warning in HCM recorder Signed-off-by: Johannes Mueller --- src/pylife/stress/rainflow/recorders.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/pylife/stress/rainflow/recorders.py b/src/pylife/stress/rainflow/recorders.py index a0c7dcea..b67a9f1d 100644 --- a/src/pylife/stress/rainflow/recorders.py +++ b/src/pylife/stress/rainflow/recorders.py @@ -253,8 +253,9 @@ def R(self): Only for hystereses resulting from Memory 3, the FKM nonlinear document defines ``R = -1`` (eq. 2.9-54). This is indicated by ``_is_zero_mean_stress_and_strain=True`̀ . For these hystereses, this function returns -1 instead of ``S_min / S_max``, which may be different. """ - return np.where(self.is_zero_mean_stress_and_strain, \ - -1, np.array(self._S_min) / np.array(self._S_max)) + with np.errstate(all="ignore"): + R = np.array(self._S_min) / np.array(self._S_max) + return np.where(self.is_zero_mean_stress_and_strain, -1, R) @property def is_closed_hysteresis(self): From 68567624a85ebd8e3d16f19246c77930e5597299 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Fri, 11 Oct 2024 12:08:30 +0200 Subject: [PATCH 6/8] Exclude deprecated modules from pytest discovery Signed-off-by: Johannes Mueller --- setup.cfg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.cfg b/setup.cfg index 8f53eb83..488ee2f6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -120,6 +120,8 @@ addopts = --cov src/pylife --cov-append -m "not slow_acceptance and not demos" --doctest-modules --ignore=src/pylife/materialdata/woehler/bayesian.py + --ignore=src/pylife/strength/helpers.py + --ignore=src/pylife/strength/sn_curve.py norecursedirs = dist From e21e8bab381fd47d4aedc1d0e969f30e946c0052 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Fri, 11 Oct 2024 12:13:58 +0200 Subject: [PATCH 7/8] Fix docstring in assessment_nonlinear_standard.py Signed-off-by: Johannes Mueller --- .../strength/fkm_nonlinear/assessment_nonlinear_standard.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pylife/strength/fkm_nonlinear/assessment_nonlinear_standard.py b/src/pylife/strength/fkm_nonlinear/assessment_nonlinear_standard.py index 5e29894e..060a8249 100644 --- a/src/pylife/strength/fkm_nonlinear/assessment_nonlinear_standard.py +++ b/src/pylife/strength/fkm_nonlinear/assessment_nonlinear_standard.py @@ -258,7 +258,7 @@ def _check_K_p_is_in_range(assessment_parameters): def _scale_load_sequence_according_to_probability(assessment_parameters, load_sequence): - """Scales the given load sequence according to one of three methods defined in the FKM nonlinear guideline. + r"""Scales the given load sequence according to one of three methods defined in the FKM nonlinear guideline. The FKM nonlinear guideline defines three possible methods to consider the statistical distribution of the load: From 64c99697220513199d456a04711fb164cb68f521 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Fri, 11 Oct 2024 13:34:01 +0200 Subject: [PATCH 8/8] Roll back to python-3.8 compliant code Signed-off-by: Johannes Mueller --- src/pylife/materialdata/woehler/maxlike.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/pylife/materialdata/woehler/maxlike.py b/src/pylife/materialdata/woehler/maxlike.py index 439398e9..dea6626d 100644 --- a/src/pylife/materialdata/woehler/maxlike.py +++ b/src/pylife/materialdata/woehler/maxlike.py @@ -139,7 +139,13 @@ def warn_and_fix_if_less_than_two_mixed_levels(): maxiter=1e4, maxfun=1e4, )[0] - result = fixed_prms | dict(zip(prms_to_optimize, optimized_prms)) + + # TODO: Change to following line when python 3.8 is dropped + #r esult = fixed_prms | dict(zip(prms_to_optimize, optimized_prms)) + + result = {} + result.update(fixed_prms) + result.update(zip(prms_to_optimize, optimized_prms)) return self.__make_parameters(result) @@ -147,9 +153,11 @@ def __make_parameters(self, params): return {k: np.abs(v) for k, v in params.items()} def __likelihood_wrapper(self, var_args, var_keys, fix_args): - ''' 1) Finds the start values to be optimized. The rest of the paramters are fixed by the user. - 2) Calls function mali_sum_lolli to calculate the maximum likelihood of the current - variable states. - ''' - args = self.__make_parameters(fix_args | dict(zip(var_keys, var_args))) + # TODO: Change to following line when python 3.8 is dropped + # args = self.__make_parameters(fix_args | dict(zip(var_keys, var_args))) + + args = {} + args.update(fix_args) + args.update(zip(var_keys, var_args)) + args = self.__make_parameters(args) return -self._lh.likelihood_total(**args)