From b3c9b779f8af5340a1039072afb703dda75df73a Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 16:28:00 -0400 Subject: [PATCH 001/121] New folder and init for DEM efforts --- xrt_dem_iterative/__init__.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 xrt_dem_iterative/__init__.py diff --git a/xrt_dem_iterative/__init__.py b/xrt_dem_iterative/__init__.py new file mode 100644 index 000000000..9bf5e0b5f --- /dev/null +++ b/xrt_dem_iterative/__init__.py @@ -0,0 +1,15 @@ +""" +Hinode XRT estimate differential emission measures (DEMs) Iterative Solver Module +""" + +from xrtpy.response.temperature_response import TemperatureResponseFundamental +from xrtpy.response.tools import generate_temperature_responses + +# Import main DEM solver class (to be created soon) +# from .solver import DEMSolver + +__all__ = [ + "TemperatureResponseFundamental", + "generate_temperature_responses", + # "DEMSolver", +] From d7b11abb752263b9b6eb97f1c527828add2c2121 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 16:39:08 -0400 Subject: [PATCH 002/121] Add new script that will hold DEM code --- xrt_dem_iterative/dem_solver.py | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 xrt_dem_iterative/dem_solver.py diff --git a/xrt_dem_iterative/dem_solver.py b/xrt_dem_iterative/dem_solver.py new file mode 100644 index 000000000..16de71f04 --- /dev/null +++ b/xrt_dem_iterative/dem_solver.py @@ -0,0 +1,11 @@ +__all__ = [ +# "", +] + +import astropy.time +import astropy.units as u +import matplotlib.pyplot as plt +import numpy as np +from lmfit import Parameters , minimize +from scipy.interpolate import interp1d, CubicSpline +from numpy import trapezoid #np.trapz Deprecation \ No newline at end of file From cdb20bcf18d3186c1b4d075abe7f8f08f13892c3 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 16:52:38 -0400 Subject: [PATCH 003/121] Fixing ruff rules --- xrt_dem_iterative/dem_solver.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/xrt_dem_iterative/dem_solver.py b/xrt_dem_iterative/dem_solver.py index 16de71f04..651037cea 100644 --- a/xrt_dem_iterative/dem_solver.py +++ b/xrt_dem_iterative/dem_solver.py @@ -1,11 +1,3 @@ __all__ = [ -# "", + # "", ] - -import astropy.time -import astropy.units as u -import matplotlib.pyplot as plt -import numpy as np -from lmfit import Parameters , minimize -from scipy.interpolate import interp1d, CubicSpline -from numpy import trapezoid #np.trapz Deprecation \ No newline at end of file From 50a375498006270facf68fc6894971bbe2820663 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 16:56:31 -0400 Subject: [PATCH 004/121] Adding to make new branch' --- xrt_dem_iterative/dem_solver.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/xrt_dem_iterative/dem_solver.py b/xrt_dem_iterative/dem_solver.py index 651037cea..7336b5c33 100644 --- a/xrt_dem_iterative/dem_solver.py +++ b/xrt_dem_iterative/dem_solver.py @@ -1,3 +1,13 @@ __all__ = [ - # "", +# "", ] + +import astropy.time +import astropy.units as u +import matplotlib.pyplot as plt +import numpy as np +from lmfit import Parameters , minimize +from scipy.interpolate import interp1d, CubicSpline + +from numpy import trapezoid #np.trapz Deprecation + From 646b267a058e7065da3e3dd635c973b941b44841 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 18:04:33 -0400 Subject: [PATCH 005/121] Move xrt_dem_iterative module into xrtpy/ --- {xrt_dem_iterative => xrtpy/xrt_dem_iterative}/__init__.py | 0 {xrt_dem_iterative => xrtpy/xrt_dem_iterative}/dem_solver.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {xrt_dem_iterative => xrtpy/xrt_dem_iterative}/__init__.py (100%) rename {xrt_dem_iterative => xrtpy/xrt_dem_iterative}/dem_solver.py (100%) diff --git a/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py similarity index 100% rename from xrt_dem_iterative/__init__.py rename to xrtpy/xrt_dem_iterative/__init__.py diff --git a/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py similarity index 100% rename from xrt_dem_iterative/dem_solver.py rename to xrtpy/xrt_dem_iterative/dem_solver.py From 3406ce67f0e936f72a8d7898701210477a425a94 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 18:12:39 -0400 Subject: [PATCH 006/121] Updating new function-calling --- xrtpy/xrt_dem_iterative/__init__.py | 5 +++++ xrtpy/xrt_dem_iterative/dem_solver.py | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index 9bf5e0b5f..08eef92fc 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -4,6 +4,8 @@ from xrtpy.response.temperature_response import TemperatureResponseFundamental from xrtpy.response.tools import generate_temperature_responses +from xrtpy.util.filters import solve_filter_name, validate_and_format_filters +from xrtpy.util.time import epoch # Import main DEM solver class (to be created soon) # from .solver import DEMSolver @@ -11,5 +13,8 @@ __all__ = [ "TemperatureResponseFundamental", "generate_temperature_responses", + "solve_filter_name", + "validate_and_format_filters", + "epoch", # "DEMSolver", ] diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 7336b5c33..d2d7f2e75 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -11,3 +11,7 @@ from numpy import trapezoid #np.trapz Deprecation +from xrtpy.util.filters import solve_filter_name, validate_and_format_filters +from xrtpy.util.time import epoch + + From b6f7a509ddb5f7c6312b1024266789d6c8a66829 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 16 Jul 2025 18:38:24 -0400 Subject: [PATCH 007/121] Define main elements --- xrtpy/xrt_dem_iterative/dem_solver.py | 63 +++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index d2d7f2e75..61639aeed 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -15,3 +15,66 @@ from xrtpy.util.time import epoch +class XRTDEMIterative: + """ + Estimate the differential emission measure (DEM) from Hinode/XRT data + using the iterative spline-based method. + + Parameters + ---------- + observed_channel : str or list of str + Filter names used in the observation (e.g., 'Al-mesh', 'Be-thin'). + observed_intensities : array-like + Observed intensities in DN/s/pix for each channel. + temperature_responses : list + List of `TemperatureResponseFundamental` objects matching the filters. + intensity_errors : array-like, optional + Intensity uncertainties. If None, will use a model-based estimate. + min_T : float + Minimum log10 temperature (default: 5.5). + max_T : float + Maximum log10 temperature (default: 8.0). + dT : float + Step size in log10 temperature space (default: 0.1). + min_error : float + Minimum absolute intensity error (default: 2.0 DN/s/pix). + relative_error : float + Relative error for model-based uncertainty estimate (default: 0.03). + """ + + def __init__( + self, + observed_channel, + observed_intensities, + temperature_responses, + intensity_errors=None, + min_T=5.5, + max_T=8.0, + dT=0.1, + min_error=2.0, + relative_error=0.03, + ): + # Validate and store filter names + self.observed_channel = validate_and_format_filters(observed_channel) + + # Store intensity and error arrays + self.observed_intensities = np.asarray(observed_intensities, dtype=float) + if intensity_errors is not None: + self.intensity_errors = np.asarray(intensity_errors, dtype=float) + if self.intensity_errors.shape != self.observed_intensities.shape: + raise ValueError("Length of intensity_errors must match observed_intensities.") + else: + self.intensity_errors = None # Will be computed later + + # Store temperature response objects + self.responses = temperature_responses + + # Store temperature grid parameters + self.min_T = min_T + self.max_T = max_T + self.dT = dT + self.logT = np.arange(min_T, max_T + dT / 2, dT) + + # Store error model parameters + self.min_error = min_error + self.relative_error = relative_error \ No newline at end of file From ebd02beccd31e9ffcfd38678ce0319e24d4ef850 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 25 Jul 2025 19:34:17 -0400 Subject: [PATCH 008/121] Adding observed_intensities function --- xrtpy/xrt_dem_iterative/dem_solver.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 61639aeed..2ef72ebbe 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -1,5 +1,5 @@ __all__ = [ -# "", + "XRTDEMIterative", ] import astropy.time @@ -77,4 +77,23 @@ def __init__( # Store error model parameters self.min_error = min_error - self.relative_error = relative_error \ No newline at end of file + self.relative_error = relative_error + + + @property #Removed if not used + def name(self) -> str: + """ + The XRT filter channel name, standardized (e.g. "Al-mesh"). + """ + return self._name + + @property + def observed_intensities(self) -> u.Quantity: + """ + Observed intensities with physical units. + Returns + ------- + `~astropy.units.Quantity` + Intensities in DN/s for each filter channel. + """ + return self._observed_intensities * (u.DN / u.s) \ No newline at end of file From 28db848cf692fc66d5a6940417df76b1f71a367e Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 25 Jul 2025 21:11:31 -0400 Subject: [PATCH 009/121] Adding observed_intensities function --- xrtpy/xrt_dem_iterative/dem_solver.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 2ef72ebbe..b4ba7a56b 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -15,6 +15,7 @@ from xrtpy.util.time import epoch + class XRTDEMIterative: """ Estimate the differential emission measure (DEM) from Hinode/XRT data @@ -88,7 +89,7 @@ def name(self) -> str: return self._name @property - def observed_intensities(self) -> u.Quantity: + def observed_intensities(self) -> u.Quantity: #Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. Returns @@ -96,4 +97,25 @@ def observed_intensities(self) -> u.Quantity: `~astropy.units.Quantity` Intensities in DN/s for each filter channel. """ - return self._observed_intensities * (u.DN / u.s) \ No newline at end of file + return self._observed_intensities * (u.DN / u.s) + + @property + def filter_names(self): + """ + Returns a list of filter names from the temperature responses. + """ + return [r.filter_name for r in self.responses] + + @property + def response_temperatures(self): + """ + Returns a list of temperature grids (K) for each filter response. + """ + return [r.temperature for r in self.responses] + + @property + def response_values(self): + """ + Returns a list of response values (DN cm^5 / pix / s) for each filter. + """ + return [r.response for r in self.responses] \ No newline at end of file From 892c8d58ac0339419be1a4878a9eabc0a61dc754 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 25 Jul 2025 21:16:36 -0400 Subject: [PATCH 010/121] Adding a quick for consistency; --- xrtpy/xrt_dem_iterative/dem_solver.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index b4ba7a56b..1c35fbfda 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -70,6 +70,18 @@ def __init__( # Store temperature response objects self.responses = temperature_responses + # Check consistency between inputs + if not ( + len(self._observed_intensities) + == len(self.responses) + == len(self._name) + ): + raise ValueError( + f"\nLength mismatch in inputs:\n" + f" Observed intensities: {len(self._observed_intensities)}\n" + f" Responses: {len(self.responses)}\n" + f" Filter channels: {len(self._name)}\n" + ) # Store temperature grid parameters self.min_T = min_T self.max_T = max_T From dd8d69d59b7aae627f5662ba4fe15347c3b6219d Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 12:53:29 -0400 Subject: [PATCH 011/121] Adding functions --- xrtpy/xrt_dem_iterative/dem_solver.py | 39 ++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 1c35fbfda..adefeba4b 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -83,14 +83,16 @@ def __init__( f" Filter channels: {len(self._name)}\n" ) # Store temperature grid parameters - self.min_T = min_T - self.max_T = max_T - self.dT = dT + self.logT = np.arange(min_T, max_T + dT / 2, dT) + self.min_T = float(min_T) + self.max_T = float(max_T) + self.dT = float(dT) + # Store error model parameters - self.min_error = min_error - self.relative_error = relative_error + self.min_error = float(min_error) + self.relative_error = float(relative_error) @property #Removed if not used @@ -130,4 +132,29 @@ def response_values(self): """ Returns a list of response values (DN cm^5 / pix / s) for each filter. """ - return [r.response for r in self.responses] \ No newline at end of file + return [r.response for r in self.responses] + + @property + def min_T(self): + """Lower bound of log10 temperature grid.""" + return self._min_T + + @property + def max_T(self): + """Upper bound of log10 temperature grid.""" + return self._max_T + + @property + def dT(self): + """Bin width of log10 temperature grid.""" + return self._dT + + @property + def min_error(self): + """Minimum error applied to DN/s when intensity error is not provided.""" + return self._min_error + + @property + def relative_error(self): + """Relative error (%) used to scale intensity if error is not provided.""" + return self._relative_error From 26bb0c2cce84785a9d29252d7f0263a856916e12 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 13:00:19 -0400 Subject: [PATCH 012/121] Updated method of naming functions --- xrtpy/xrt_dem_iterative/dem_solver.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index adefeba4b..435f822b1 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -60,6 +60,7 @@ def __init__( # Store intensity and error arrays self.observed_intensities = np.asarray(observed_intensities, dtype=float) + #Errors if intensity_errors is not None: self.intensity_errors = np.asarray(intensity_errors, dtype=float) if self.intensity_errors.shape != self.observed_intensities.shape: @@ -82,17 +83,17 @@ def __init__( f" Responses: {len(self.responses)}\n" f" Filter channels: {len(self._name)}\n" ) + # Store temperature grid parameters + self._min_T = float(min_T) + self._max_T = float(max_T) + self._dT = float(dT) + self.logT = np.arange(self._min_T, self._max_T + self._dT / 2, self._dT) - self.logT = np.arange(min_T, max_T + dT / 2, dT) - - self.min_T = float(min_T) - self.max_T = float(max_T) - self.dT = float(dT) # Store error model parameters - self.min_error = float(min_error) - self.relative_error = float(relative_error) + self._min_error = float(min_error) + self._relative_error = float(relative_error) @property #Removed if not used From 5b8ebed6494e1a5f534f9c2871b8854b438fb76a Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 14:26:32 -0400 Subject: [PATCH 013/121] Adding XRTDEMIterative class to the init --- xrtpy/xrt_dem_iterative/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index 08eef92fc..c2864bb7f 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -6,6 +6,8 @@ from xrtpy.response.tools import generate_temperature_responses from xrtpy.util.filters import solve_filter_name, validate_and_format_filters from xrtpy.util.time import epoch +from .dem_solver import XRTDEMIterative + # Import main DEM solver class (to be created soon) # from .solver import DEMSolver @@ -16,5 +18,5 @@ "solve_filter_name", "validate_and_format_filters", "epoch", - # "DEMSolver", + "XRTDEMIterative", ] From f5db31c81574a4a1bad506b30fee9e4e6b2883c2 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 14:28:13 -0400 Subject: [PATCH 014/121] Adding intensity_errors function --- xrtpy/xrt_dem_iterative/dem_solver.py | 69 ++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 13 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 435f822b1..b3fd287a6 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -59,14 +59,15 @@ def __init__( self.observed_channel = validate_and_format_filters(observed_channel) # Store intensity and error arrays - self.observed_intensities = np.asarray(observed_intensities, dtype=float) + self._observed_intensities = np.asarray(observed_intensities, dtype=float) + #Errors if intensity_errors is not None: - self.intensity_errors = np.asarray(intensity_errors, dtype=float) - if self.intensity_errors.shape != self.observed_intensities.shape: + self._intensity_errors = np.asarray(intensity_errors, dtype=float) + if self._intensity_errors.shape != self.observed_intensities.shape: raise ValueError("Length of intensity_errors must match observed_intensities.") else: - self.intensity_errors = None # Will be computed later + self._intensity_errors = None # Will be computed later # Store temperature response objects self.responses = temperature_responses @@ -75,13 +76,13 @@ def __init__( if not ( len(self._observed_intensities) == len(self.responses) - == len(self._name) + == len(self.observed_channel) ): raise ValueError( f"\nLength mismatch in inputs:\n" f" Observed intensities: {len(self._observed_intensities)}\n" f" Responses: {len(self.responses)}\n" - f" Filter channels: {len(self._name)}\n" + f" Filter channels: {len(self.observed_channel)}\n" ) # Store temperature grid parameters @@ -95,13 +96,20 @@ def __init__( self._min_error = float(min_error) self._relative_error = float(relative_error) - - @property #Removed if not used - def name(self) -> str: - """ - The XRT filter channel name, standardized (e.g. "Al-mesh"). - """ - return self._name + # Validate and store intensity errors + if intensity_errors is not None: + self._intensity_errors = np.asarray(intensity_errors, dtype=float) + if self._intensity_errors.shape != self._observed_intensities.shape: + raise ValueError("Length of intensity_errors must match observed_intensities.") + else: + self._intensity_errors = None + + # @property #Removed if not used + # def name(self) -> str: + # """ + # The XRT filter channel name, standardized (e.g. "Al-mesh"). + # """ + # return self._name @property def observed_intensities(self) -> u.Quantity: #Add method to account for known values not worth observed_intensities @@ -159,3 +167,38 @@ def min_error(self): def relative_error(self): """Relative error (%) used to scale intensity if error is not provided.""" return self._relative_error + + + @property + def intensity_errors(self) -> u.Quantity: + """ + Returns the intensity uncertainties, either user-provided or model-based. + + If not provided, errors are estimated using: + max(relative_error * observed_intensity, min_error) + + For details, see: + https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro + + Returns + ------- + `~astropy.units.Quantity` + Intensity errors in DN/s for each filter. + """ + if self._intensity_errors is not None: + return self._intensity_errors * (u.DN / u.s) + + print( + "\n[INFO] No intensity_errors provided. " + "Using default model: max(relative_error * observed_intensity, min_error)\n" + " => relative_error = {:.2f}, min_error = {:.1f} DN/s\n" + " => For details: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro\n".format( + self.relative_error, self.min_error + ) + ) + + estimated = np.maximum( + self.relative_error * self._observed_intensities, + self.min_error, + ) + return estimated * (u.DN / u.s) From 3c0458027aced9d928585ee381ead5f0abdfe799 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 14:50:56 -0400 Subject: [PATCH 015/121] Adding conditions to values --- xrtpy/xrt_dem_iterative/dem_solver.py | 46 ++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 5 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index b3fd287a6..17fd20bca 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -68,9 +68,31 @@ def __init__( raise ValueError("Length of intensity_errors must match observed_intensities.") else: self._intensity_errors = None # Will be computed later - + + + # Store temperature grid parameters + self._min_T = float(min_T) + self._max_T = float(max_T) + self._dT = float(dT) + + # Check dT is positive + if self._dT <= 0: + raise ValueError("dT must be a positive scalar.") + + # Store temperature response objects self.responses = temperature_responses + + # Validate that the temperature grid falls within the responses + for r in self.responses: + logT_grid = np.log10(r.temperature.value) + if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): + raise ValueError( + f"The specified temperature range [{min_T}, {max_T}] is outside the bounds of one or more filter response grids.\n" + "Please ensure the temperature range fits within all responses.\n" + "Hint: Default response range is logT = 5.5 to 8.0. You can view each response's logT range via: [r.temperature for r in responses]" + ) + # Check consistency between inputs if not ( @@ -85,10 +107,6 @@ def __init__( f" Filter channels: {len(self.observed_channel)}\n" ) - # Store temperature grid parameters - self._min_T = float(min_T) - self._max_T = float(max_T) - self._dT = float(dT) self.logT = np.arange(self._min_T, self._max_T + self._dT / 2, self._dT) @@ -103,6 +121,10 @@ def __init__( raise ValueError("Length of intensity_errors must match observed_intensities.") else: self._intensity_errors = None + + + def __repr__(self): + return f"" # @property #Removed if not used # def name(self) -> str: @@ -202,3 +224,17 @@ def intensity_errors(self) -> u.Quantity: self.min_error, ) return estimated * (u.DN / u.s) + + + def summary(self): + print("XRTpy DEM Iterative Setup Summary") + print("-" * 40) + print(f" Filters: {self.filter_names}") + print(f" Obs Intensities: {self.observed_intensities}") + print(f" Intensity Errors: {self.intensity_errors}") + print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") + print(f" Temp bins: {len(self.logT)}") + print(f" Error model used: {'User-provided' if self._intensity_errors is not None else 'Auto (obs * 0.03, min=2 DN/s)'}") + if self._intensity_errors is None: + print("For more info: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro") + print("-" * 40) From 5dd94c9c3a2ff82bda138f351af2259c1b1bcfcd Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 15:30:57 -0400 Subject: [PATCH 016/121] Adding more basic info for using in mandatory functions and check of input --- xrtpy/xrt_dem_iterative/dem_solver.py | 56 ++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 17fd20bca..0fc57409e 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -8,7 +8,7 @@ import numpy as np from lmfit import Parameters , minimize from scipy.interpolate import interp1d, CubicSpline - +import warnings from numpy import trapezoid #np.trapz Deprecation from xrtpy.util.filters import solve_filter_name, validate_and_format_filters @@ -55,12 +55,46 @@ def __init__( min_error=2.0, relative_error=0.03, ): + """ + Args: + observed_channel (_type_): _description_ + observed_intensities (_type_): _description_ + temperature_responses (_type_): _description_ + intensity_errors (_type_, optional): _description_. Defaults to None. + min_T (float, optional): _description_. Defaults to 5.5. + max_T (float, optional): _description_. Defaults to 8.0. + dT (float, optional): _description_. Defaults to 0.1. + min_error (float, optional): _description_. Defaults to 2.0. + relative_error (float, optional): _description_. Defaults to 0.03. + + Notes + ----- + - All input lists (`observed_channel`, `observed_intensities`, and `temperature_responses`) + must be the same length. Each entry should correspond to one filter. + + - The temperature grid range (`min_T`, `max_T`) must lie entirely within the + response temperature ranges for **all** filters provided. + + - If `intensity_errors` is not provided, a model-based error estimate is used: + max(relative_error * observed_intensity, min_error), as in the IDL original. + + - Default XRT filter names include: + {'Al-mesh', 'Al-poly', 'C-poly', 'Ti-poly', 'Be-thin', 'Be-med', 'Al-med', 'Al-thick', 'Be-thick', + 'Al-poly/Al-mesh', 'Al-poly/Ti-poly', 'Al-poly/Al-thick', 'Al-poly/Be-thick'} + """ # Validate and store filter names self.observed_channel = validate_and_format_filters(observed_channel) + if observed_channel is None or len(observed_channel) == 0: + raise ValueError("`observed_channel` is required and cannot be empty.") + # Store intensity and error arrays self._observed_intensities = np.asarray(observed_intensities, dtype=float) + if observed_intensities is None or len(observed_intensities) == 0: + raise ValueError("`observed_intensities` is required and cannot be empty.") + + #Errors if intensity_errors is not None: self._intensity_errors = np.asarray(intensity_errors, dtype=float) @@ -82,6 +116,9 @@ def __init__( # Store temperature response objects self.responses = temperature_responses + + if temperature_responses is None or len(temperature_responses) == 0: + raise ValueError("`temperature_responses` is required and cannot be empty.") # Validate that the temperature grid falls within the responses for r in self.responses: @@ -209,15 +246,13 @@ def intensity_errors(self) -> u.Quantity: """ if self._intensity_errors is not None: return self._intensity_errors * (u.DN / u.s) - - print( - "\n[INFO] No intensity_errors provided. " - "Using default model: max(relative_error * observed_intensity, min_error)\n" - " => relative_error = {:.2f}, min_error = {:.1f} DN/s\n" - " => For details: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro\n".format( - self.relative_error, self.min_error - ) - ) + + warnings.warn( + "No intensity_errors provided. Using default model: " + f"max(relative_error * observed_intensity, min_error)\n" + f"=> relative_error = {self.relative_error}, min_error = {self.min_error} DN/s\n" + "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", + UserWarning) estimated = np.maximum( self.relative_error * self._observed_intensities, @@ -231,6 +266,7 @@ def summary(self): print("-" * 40) print(f" Filters: {self.filter_names}") print(f" Obs Intensities: {self.observed_intensities}") + print(f" Number of observations (Nobs): {len(self._observed_intensities)}") print(f" Intensity Errors: {self.intensity_errors}") print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") print(f" Temp bins: {len(self.logT)}") From a26dbde2c04b99912db1858b252907cd10dfc9ce Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 17:09:49 -0400 Subject: [PATCH 017/121] Applied ruff --- xrtpy/xrt_dem_iterative/__init__.py | 2 +- xrtpy/xrt_dem_iterative/dem_solver.py | 97 +++++++++++++++------------ 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index c2864bb7f..0b0a9fad5 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -6,8 +6,8 @@ from xrtpy.response.tools import generate_temperature_responses from xrtpy.util.filters import solve_filter_name, validate_and_format_filters from xrtpy.util.time import epoch -from .dem_solver import XRTDEMIterative +from .dem_solver import XRTDEMIterative # Import main DEM solver class (to be created soon) # from .solver import DEMSolver diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 0fc57409e..016de79ad 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -2,18 +2,12 @@ "XRTDEMIterative", ] -import astropy.time -import astropy.units as u -import matplotlib.pyplot as plt -import numpy as np -from lmfit import Parameters , minimize -from scipy.interpolate import interp1d, CubicSpline import warnings -from numpy import trapezoid #np.trapz Deprecation -from xrtpy.util.filters import solve_filter_name, validate_and_format_filters -from xrtpy.util.time import epoch +import astropy.units as u +import numpy as np +from xrtpy.util.filters import validate_and_format_filters class XRTDEMIterative: @@ -66,7 +60,7 @@ def __init__( dT (float, optional): _description_. Defaults to 0.1. min_error (float, optional): _description_. Defaults to 2.0. relative_error (float, optional): _description_. Defaults to 0.03. - + Notes ----- - All input lists (`observed_channel`, `observed_intensities`, and `temperature_responses`) @@ -94,32 +88,31 @@ def __init__( if observed_intensities is None or len(observed_intensities) == 0: raise ValueError("`observed_intensities` is required and cannot be empty.") - - #Errors + # Errors if intensity_errors is not None: self._intensity_errors = np.asarray(intensity_errors, dtype=float) if self._intensity_errors.shape != self.observed_intensities.shape: - raise ValueError("Length of intensity_errors must match observed_intensities.") + raise ValueError( + "Length of intensity_errors must match observed_intensities." + ) else: self._intensity_errors = None # Will be computed later - - + # Store temperature grid parameters self._min_T = float(min_T) self._max_T = float(max_T) self._dT = float(dT) - + # Check dT is positive if self._dT <= 0: raise ValueError("dT must be a positive scalar.") - - + # Store temperature response objects self.responses = temperature_responses if temperature_responses is None or len(temperature_responses) == 0: raise ValueError("`temperature_responses` is required and cannot be empty.") - + # Validate that the temperature grid falls within the responses for r in self.responses: logT_grid = np.log10(r.temperature.value) @@ -128,8 +121,7 @@ def __init__( f"The specified temperature range [{min_T}, {max_T}] is outside the bounds of one or more filter response grids.\n" "Please ensure the temperature range fits within all responses.\n" "Hint: Default response range is logT = 5.5 to 8.0. You can view each response's logT range via: [r.temperature for r in responses]" - ) - + ) # Check consistency between inputs if not ( @@ -143,26 +135,26 @@ def __init__( f" Responses: {len(self.responses)}\n" f" Filter channels: {len(self.observed_channel)}\n" ) - + self.logT = np.arange(self._min_T, self._max_T + self._dT / 2, self._dT) - # Store error model parameters self._min_error = float(min_error) self._relative_error = float(relative_error) - + # Validate and store intensity errors if intensity_errors is not None: self._intensity_errors = np.asarray(intensity_errors, dtype=float) if self._intensity_errors.shape != self._observed_intensities.shape: - raise ValueError("Length of intensity_errors must match observed_intensities.") + raise ValueError( + "Length of intensity_errors must match observed_intensities." + ) else: self._intensity_errors = None - def __repr__(self): return f"" - + # @property #Removed if not used # def name(self) -> str: # """ @@ -171,7 +163,11 @@ def __repr__(self): # return self._name @property - def observed_intensities(self) -> u.Quantity: #Add method to account for known values not worth observed_intensities + def observed_intensities( + self, + ) -> ( + u.Quantity + ): # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. Returns @@ -180,7 +176,7 @@ def observed_intensities(self) -> u.Quantity: #Add method to account for known v Intensities in DN/s for each filter channel. """ return self._observed_intensities * (u.DN / u.s) - + @property def filter_names(self): """ @@ -201,33 +197,42 @@ def response_values(self): Returns a list of response values (DN cm^5 / pix / s) for each filter. """ return [r.response for r in self.responses] - + @property def min_T(self): - """Lower bound of log10 temperature grid.""" + """ + Lower bound of log10 temperature grid. + """ return self._min_T @property def max_T(self): - """Upper bound of log10 temperature grid.""" + """ + Upper bound of log10 temperature grid. + """ return self._max_T @property def dT(self): - """Bin width of log10 temperature grid.""" + """ + Bin width of log10 temperature grid. + """ return self._dT - + @property def min_error(self): - """Minimum error applied to DN/s when intensity error is not provided.""" + """ + Minimum error applied to DN/s when intensity error is not provided. + """ return self._min_error @property def relative_error(self): - """Relative error (%) used to scale intensity if error is not provided.""" + """ + Relative error (%) used to scale intensity if error is not provided. + """ return self._relative_error - - + @property def intensity_errors(self) -> u.Quantity: """ @@ -236,7 +241,7 @@ def intensity_errors(self) -> u.Quantity: If not provided, errors are estimated using: max(relative_error * observed_intensity, min_error) - For details, see: + For details, see: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro Returns @@ -246,13 +251,14 @@ def intensity_errors(self) -> u.Quantity: """ if self._intensity_errors is not None: return self._intensity_errors * (u.DN / u.s) - + warnings.warn( "No intensity_errors provided. Using default model: " f"max(relative_error * observed_intensity, min_error)\n" f"=> relative_error = {self.relative_error}, min_error = {self.min_error} DN/s\n" "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", - UserWarning) + UserWarning, + ) estimated = np.maximum( self.relative_error * self._observed_intensities, @@ -260,7 +266,6 @@ def intensity_errors(self) -> u.Quantity: ) return estimated * (u.DN / u.s) - def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) @@ -270,7 +275,11 @@ def summary(self): print(f" Intensity Errors: {self.intensity_errors}") print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") print(f" Temp bins: {len(self.logT)}") - print(f" Error model used: {'User-provided' if self._intensity_errors is not None else 'Auto (obs * 0.03, min=2 DN/s)'}") + print( + f" Error model used: {'User-provided' if self._intensity_errors is not None else 'Auto (obs * 0.03, min=2 DN/s)'}" + ) if self._intensity_errors is None: - print("For more info: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro") + print( + "For more info: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro" + ) print("-" * 40) From f97703eef621afb252425f90cd03a70f2a91610a Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 18:02:12 -0400 Subject: [PATCH 018/121] Add basic function to recorde number of MC user wantst to run --- xrtpy/xrt_dem_iterative/dem_solver.py | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 016de79ad..b7af43e62 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -35,6 +35,10 @@ class XRTDEMIterative: Minimum absolute intensity error (default: 2.0 DN/s/pix). relative_error : float Relative error for model-based uncertainty estimate (default: 0.03). + monte_carlo_runs : int, optional + Number of Monte Carlo runs to perform (default: 0, disabled). + Each run perturbs `observed_intensities` using `intensity_errors` + as Gaussian sigma and re-solves the DEM. """ def __init__( @@ -48,6 +52,7 @@ def __init__( dT=0.1, min_error=2.0, relative_error=0.03, + monte_carlo_runs =0 ): """ Args: @@ -102,6 +107,27 @@ def __init__( self._min_T = float(min_T) self._max_T = float(max_T) self._dT = float(dT) + + # Validate Monte Carlo setting + if isinstance(monte_carlo_runs, bool): + raise ValueError("monte_carlo_runs must be a non-negative whole number, not a boolean.") + elif isinstance(monte_carlo_runs, (int, np.integer)): + self._monte_carlo_runs = int(monte_carlo_runs) + elif isinstance(monte_carlo_runs, float) and monte_carlo_runs.is_integer(): + self._monte_carlo_runs = int(monte_carlo_runs) + else: + raise ValueError( + "monte_carlo_runs must be a non-negative whole number (e.g., 0, 1, 100). " + "Decimal values are not allowed." + ) + + if self._monte_carlo_runs < 0: + raise ValueError("monte_carlo_runs must be ≥ 0.") + + + + + # Check dT is positive if self._dT <= 0: @@ -151,7 +177,9 @@ def __init__( ) else: self._intensity_errors = None + + def __repr__(self): return f"" @@ -266,12 +294,22 @@ def intensity_errors(self) -> u.Quantity: ) return estimated * (u.DN / u.s) + @property + def monte_carlo_runs(self) -> int: + """ + Number of Monte Carlo runs to perform (0 = disabled). + """ + return self._monte_carlo_runs + + + def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) print(f" Filters: {self.filter_names}") print(f" Obs Intensities: {self.observed_intensities}") print(f" Number of observations (Nobs): {len(self._observed_intensities)}") + print(f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}") print(f" Intensity Errors: {self.intensity_errors}") print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") print(f" Temp bins: {len(self.logT)}") From 5a52acf68cdf60523e3af8a270d19bc893c60897 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 29 Jul 2025 18:10:12 -0400 Subject: [PATCH 019/121] Applied black --- xrtpy/xrt_dem_iterative/dem_solver.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index b7af43e62..75ae3faa0 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -37,7 +37,7 @@ class XRTDEMIterative: Relative error for model-based uncertainty estimate (default: 0.03). monte_carlo_runs : int, optional Number of Monte Carlo runs to perform (default: 0, disabled). - Each run perturbs `observed_intensities` using `intensity_errors` + Each run perturbs `observed_intensities` using `intensity_errors` as Gaussian sigma and re-solves the DEM. """ @@ -52,7 +52,7 @@ def __init__( dT=0.1, min_error=2.0, relative_error=0.03, - monte_carlo_runs =0 + monte_carlo_runs=0, ): """ Args: @@ -107,10 +107,12 @@ def __init__( self._min_T = float(min_T) self._max_T = float(max_T) self._dT = float(dT) - + # Validate Monte Carlo setting if isinstance(monte_carlo_runs, bool): - raise ValueError("monte_carlo_runs must be a non-negative whole number, not a boolean.") + raise ValueError( + "monte_carlo_runs must be a non-negative whole number, not a boolean." + ) elif isinstance(monte_carlo_runs, (int, np.integer)): self._monte_carlo_runs = int(monte_carlo_runs) elif isinstance(monte_carlo_runs, float) and monte_carlo_runs.is_integer(): @@ -124,11 +126,6 @@ def __init__( if self._monte_carlo_runs < 0: raise ValueError("monte_carlo_runs must be ≥ 0.") - - - - - # Check dT is positive if self._dT <= 0: raise ValueError("dT must be a positive scalar.") @@ -177,9 +174,7 @@ def __init__( ) else: self._intensity_errors = None - - def __repr__(self): return f"" @@ -301,15 +296,15 @@ def monte_carlo_runs(self) -> int: """ return self._monte_carlo_runs - - def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) print(f" Filters: {self.filter_names}") print(f" Obs Intensities: {self.observed_intensities}") print(f" Number of observations (Nobs): {len(self._observed_intensities)}") - print(f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}") + print( + f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}" + ) print(f" Intensity Errors: {self.intensity_errors}") print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") print(f" Temp bins: {len(self.logT)}") From 60d2cf2cd00c065eca410d89fa1e18ffb87d66a9 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 30 Jul 2025 19:20:33 -0400 Subject: [PATCH 020/121] Adding max iter --- xrtpy/xrt_dem_iterative/dem_solver.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 75ae3faa0..90981dd07 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -53,6 +53,7 @@ def __init__( min_error=2.0, relative_error=0.03, monte_carlo_runs=0, + max_iterations=2000, ): """ Args: @@ -125,6 +126,14 @@ def __init__( if self._monte_carlo_runs < 0: raise ValueError("monte_carlo_runs must be ≥ 0.") + + # Validate max_iterations + if not isinstance(max_iterations, (int, np.integer)) or max_iterations <= 0: + raise ValueError("max_iterations must be a positive integer.") + + self._max_iterations = int(max_iterations) + + # Check dT is positive if self._dT <= 0: @@ -295,6 +304,14 @@ def monte_carlo_runs(self) -> int: Number of Monte Carlo runs to perform (0 = disabled). """ return self._monte_carlo_runs + + @property + def max_iterations(self): + """ + Maximum number of iterations used in the least-squares DEM solver. + """ + return self._max_iterations + def summary(self): print("XRTpy DEM Iterative Setup Summary") @@ -305,6 +322,7 @@ def summary(self): print( f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}" ) + print(f" Max Iterations: {self.max_iterations}") print(f" Intensity Errors: {self.intensity_errors}") print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") print(f" Temp bins: {len(self.logT)}") From a03a6233c7e0ed2537d9487421ed43ebf2635b76 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 31 Jul 2025 14:32:17 -0400 Subject: [PATCH 021/121] Adding create_logT_grid --- xrtpy/xrt_dem_iterative/dem_solver.py | 42 ++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 8 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 90981dd07..57611688b 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -54,6 +54,7 @@ def __init__( relative_error=0.03, monte_carlo_runs=0, max_iterations=2000, + solv_factor=1e21 ): """ Args: @@ -114,9 +115,7 @@ def __init__( raise ValueError( "monte_carlo_runs must be a non-negative whole number, not a boolean." ) - elif isinstance(monte_carlo_runs, (int, np.integer)): - self._monte_carlo_runs = int(monte_carlo_runs) - elif isinstance(monte_carlo_runs, float) and monte_carlo_runs.is_integer(): + elif isinstance(monte_carlo_runs, (int, np.integer)) or isinstance(monte_carlo_runs, float) and monte_carlo_runs.is_integer(): self._monte_carlo_runs = int(monte_carlo_runs) else: raise ValueError( @@ -126,15 +125,13 @@ def __init__( if self._monte_carlo_runs < 0: raise ValueError("monte_carlo_runs must be ≥ 0.") - + # Validate max_iterations if not isinstance(max_iterations, (int, np.integer)) or max_iterations <= 0: raise ValueError("max_iterations must be a positive integer.") self._max_iterations = int(max_iterations) - - # Check dT is positive if self._dT <= 0: raise ValueError("dT must be a positive scalar.") @@ -184,6 +181,14 @@ def __init__( else: self._intensity_errors = None + + try: + self._solv_factor = float(solv_factor) + if self._solv_factor <= 0: + raise ValueError("solv_factor must be a positive number.") + except Exception as e: + raise ValueError(f"Invalid solv_factor: {e}") + def __repr__(self): return f"" @@ -251,7 +256,7 @@ def dT(self): """ return self._dT - @property + @property def min_error(self): """ Minimum error applied to DN/s when intensity error is not provided. @@ -304,7 +309,16 @@ def monte_carlo_runs(self) -> int: Number of Monte Carlo runs to perform (0 = disabled). """ return self._monte_carlo_runs - + + @property + def solv_factor(self): + """ + Normalization factor used during DEM fitting to stabilize the solver. + Default is 1e21. + """ + return self._solv_factor + + @property def max_iterations(self): """ @@ -313,12 +327,24 @@ def max_iterations(self): return self._max_iterations + def create_logT_grid(self): + """ + Build the DEM temperature grid *exactly* from min to max in steps of dT. + """ + n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 + self.logT = np.linspace(self._min_T, self._max_T, n_bins) + self.T = (10**self.logT) * u.K + + + + def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) print(f" Filters: {self.filter_names}") print(f" Obs Intensities: {self.observed_intensities}") print(f" Number of observations (Nobs): {len(self._observed_intensities)}") + print(f" Solver Normalization Factor: {self.solv_factor:.1e}") print( f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}" ) From 2798577ab2e794a3a676235640e9748a2640cdf9 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 31 Jul 2025 15:50:23 -0400 Subject: [PATCH 022/121] Adding _build_response_matrix --- xrtpy/xrt_dem_iterative/dem_solver.py | 51 +++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 57611688b..8fd8e56df 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -336,6 +336,57 @@ def create_logT_grid(self): self.T = (10**self.logT) * u.K + def _interpolate_responses_to_grid(self): #This mirrors what xrt_dem_iter_estim.pro does. + """ + Interpolates each filter's temperature response onto the DEM temperature grid (self.logT). + """ + self.interpolated_responses = [] + + for i, (T_orig, R_orig, fname) in enumerate( + zip(self.response_temperatures, self.response_values, self.filter_names) + ): + logT_orig = np.log10(T_orig.to_value(u.K)) + response_vals = R_orig.to_value(u.DN * u.cm**5 / (u.pix * u.s)) + + interp_func = interp1d( + logT_orig, + response_vals, + kind="linear", + bounds_error=False, + fill_value=0.0, + ) + + R_interp = interp_func(self.logT) + self.interpolated_responses.append(R_interp) + + + + + def _build_response_matrix(self): + """ + Builds the response matrix from interpolated responses. + + Sets: + ------- + self.response_matrix : ndarray + 2D array of shape (n_filters, n_temperatures) + Stack your self.interpolated_responses into a 2D NumPy array + + Personal notes: The response matrix is a 2D array that relates temperature to observed intensity + For numerical DEM: + -You approximate the integral as a matrix multiplication + -Each filter contributes one equation (row) + -Each temperature bin contributes one unknown (column) + - Intergal DEM(T) * R(T) dT = sum[DEM_i * R_i * dT] + """ + if not hasattr(self, "interpolated_responses"): + raise RuntimeError("Call _interpolate_responses_to_grid() before building the response matrix.") + + #self._response_matrix = np.vstack(self.interpolated_responses) # matrix + self._response_matrix = np.vstack(self.interpolated_responses).astype(float) # matrix + + print(f"Built response matrix: shape = {self._response_matrix.shape} (filters * logT bins)") + def summary(self): From fa3d7fda6f3316ac45cade936a9881de728c57d4 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 31 Jul 2025 17:20:16 -0400 Subject: [PATCH 023/121] Trying to get a working initial estimate of how much plasma is emitting at each temperature --- xrtpy/xrt_dem_iterative/dem_solver.py | 83 +++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 8fd8e56df..87369e5d4 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -6,6 +6,7 @@ import astropy.units as u import numpy as np +from scipy.interpolate import interp1d, CubicSpline from xrtpy.util.filters import validate_and_format_filters @@ -360,6 +361,16 @@ def _interpolate_responses_to_grid(self): #This mirrors what xrt_dem_iter_estim. self.interpolated_responses.append(R_interp) + @property + def response_matrix(self): + """ + Returns the response matrix after interpolation. + + Shape: (n_filters, n_temperatures) + """ + if not hasattr(self, "_response_matrix"): + raise AttributeError("Response matrix has not been built yet. Call _build_response_matrix().") + return self._response_matrix def _build_response_matrix(self): @@ -389,6 +400,78 @@ def _build_response_matrix(self): + def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirrors xrt_dem_iter_estim.pro + """ + Estimates an initial DEM by inverting I ≈ R @ DEM. + An initial estimate of how much plasma is emitting at each temperature + + Sets + ----- + self.initial_dem : np.ndarray + First-guess DEM estimate across logT grid (length = n_temperatures) + + Parameters + ---------- + smooth : bool + If True, applies mild Gaussian smoothing (future option). + logscale : bool + If True, computes log10(DEM) and exponentiates to suppress spikes. + plot : bool + If True, shows a diagnostic plot of the initial DEM. + """ + + #Define inputs - xrt_dem_iter_estim.pro + if not hasattr(self, "response_matrix"): + raise RuntimeError("Run _build_response_matrix() before estimating DEM.") + + I_obs = np.asarray(self._observed_intensities, dtype=float) + R = self.response_matrix + n_filters, n_temps = R.shape + + print(f"Estimating DEM from {n_filters} intensities across {n_temps} temperature bins...") + + with np.errstate(divide="ignore", invalid="ignore"): + estimates = np.zeros(n_temps) + for i in range(n_filters): + row = R[i, :] + ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) + estimates += ratio + + estimates /= n_filters + estimates /= self._dT # Convert to per-logT-bin definition + + if logscale: + # Suppress large dynamic range and spikes + estimates = 10 ** np.log10(estimates + 1e-30) + + if smooth: + from scipy.ndimage import gaussian_filter1d + estimates = gaussian_filter1d(estimates, sigma=1.0) + + # Apply units + self.initial_dem = estimates * (u.cm**-5 / u.K) + + # Diagnostics + print(f"Initial DEM estimate complete") + print(f"Peak DEM: {self.initial_dem.max():.2e}") + print(f" Mean DEM: {self.initial_dem.mean():.2e}") + + # Plotting + if plot: + import matplotlib.pyplot as plt + plt.figure(figsize=(8, 4)) + plt.plot(self.logT, self.initial_dem.value, drawstyle="steps-mid", label="Initial DEM") + plt.xlabel("log₁₀ T [K]") + plt.ylabel("DEM [cm⁻⁵ K⁻¹]") + plt.title("Initial DEM Estimate") + plt.grid(True) + plt.legend() + plt.tight_layout() + plt.show() + + print("Initial DEM estimate complete") + + def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) From bc5ec29e9e91a1d41f93b203b04c405793e4cdfe Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 25 Aug 2025 16:24:23 -0400 Subject: [PATCH 024/121] Creating a script for monte_carlo_iteration' --- .../monte_carlo_iteration.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 xrtpy/xrt_dem_iterative/monte_carlo_iteration.py diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py new file mode 100644 index 000000000..6c2f67b88 --- /dev/null +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -0,0 +1,78 @@ +__all__ = [ + "Monte_Carlo_Iteration", +] + +import numpy as np + + +class Monte_Carlo_Iteration: + + + #def __init__( ): + + + def generate_mc_realizations(self, n_realizations=100, seed=None): + """ + Generate randomized intensity realizations for Monte Carlo uncertainty estimation. + + Parameters + ---------- + n_realizations : int + Number of Monte Carlo runs to generate. + seed : int or None + Random seed for reproducibility. + + Sets + ---- + self.mc_intensity_sets : np.ndarray + Shape (n_realizations, n_filters), randomized intensities. + """ + if seed is not None: + np.random.seed(seed) + + # Compute error bars + abs_error = np.maximum(self.min_error, self.relative_error * self._observed_intensities) + + # Draw random perturbations for each intensity + self.mc_intensity_sets = np.random.normal( + loc=self._observed_intensities, + scale=abs_error, + size=(n_realizations, len(self._observed_intensities)) + ) + + + def run_mc_simulation(self, n_realizations=100, seed=None): + """ + Run Monte Carlo simulations to estimate DEM uncertainties. + + Parameters + ---------- + n_realizations : int + Number of Monte Carlo realizations to run. + seed : int or None + Optional seed for reproducibility. + + Sets + ---- + self.mc_dems : np.ndarray + Shape (n_temps, n_realizations). Each column is a DEM realization. + """ + if seed is not None: + np.random.seed(seed) + + + #Use user-provided or fallback error model + if self._intensity_errors is not None: + errors = np.array(self._intensity_errors,dtype=float)#Covering given user error in pyton array + #errors = self._intensity_errors + else: + errors = np.maximum( + self.min_error, + self.relative_error * self._observed_intensities + ) + + self.mc_intensity_sets = np.random.normal( + loc=self._observed_intensities[:, None], # shape (5, 1) + scale=errors[:, None], # shape (5, 1) + size=(len(self._observed_intensities), n_realizations) # shape (5, 20) + ) From 46dece7c2aae95c3228e1fc614edc44b104d82ce Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 25 Aug 2025 16:28:04 -0400 Subject: [PATCH 025/121] Adding init --- xrtpy/xrt_dem_iterative/__init__.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index 0b0a9fad5..e69de29bb 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -1,22 +0,0 @@ -""" -Hinode XRT estimate differential emission measures (DEMs) Iterative Solver Module -""" - -from xrtpy.response.temperature_response import TemperatureResponseFundamental -from xrtpy.response.tools import generate_temperature_responses -from xrtpy.util.filters import solve_filter_name, validate_and_format_filters -from xrtpy.util.time import epoch - -from .dem_solver import XRTDEMIterative - -# Import main DEM solver class (to be created soon) -# from .solver import DEMSolver - -__all__ = [ - "TemperatureResponseFundamental", - "generate_temperature_responses", - "solve_filter_name", - "validate_and_format_filters", - "epoch", - "XRTDEMIterative", -] From e2fe9a019694102bed6e9b09ea67e3831829bd3b Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 25 Aug 2025 16:30:29 -0400 Subject: [PATCH 026/121] WIP: saving changes before rebase --- xrtpy/xrt_dem_iterative/dem_solver.py | 208 +++++++++++++++++- xrtpy/xrt_dem_iterative/test/__init__.py | 8 + .../test/test_dem_input_validation.py | 113 ++++++++++ 3 files changed, 324 insertions(+), 5 deletions(-) create mode 100644 xrtpy/xrt_dem_iterative/test/__init__.py create mode 100644 xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 87369e5d4..fb1b5a662 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -6,6 +6,7 @@ import astropy.units as u import numpy as np +from lmfit import Parameters, minimize from scipy.interpolate import interp1d, CubicSpline from xrtpy.util.filters import validate_and_format_filters @@ -355,7 +356,7 @@ def _interpolate_responses_to_grid(self): #This mirrors what xrt_dem_iter_estim. kind="linear", bounds_error=False, fill_value=0.0, - ) + ) #kind = 'cubic' ) kind="linear", R_interp = interp_func(self.logT) self.interpolated_responses.append(R_interp) @@ -419,6 +420,7 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro plot : bool If True, shows a diagnostic plot of the initial DEM. """ + print(self.response_temperatures, self.response_values, self.filter_names) #Define inputs - xrt_dem_iter_estim.pro if not hasattr(self, "response_matrix"): @@ -439,6 +441,8 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro estimates /= n_filters estimates /= self._dT # Convert to per-logT-bin definition + assert estimates.shape[0] == len(self.logT) + if logscale: # Suppress large dynamic range and spikes @@ -450,28 +454,222 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro # Apply units self.initial_dem = estimates * (u.cm**-5 / u.K) + print(" Max:", np.max(estimates)) + print(" Min:", np.min(estimates)) + print(" dT (logT bin size):", self._dT) # Diagnostics print(f"Initial DEM estimate complete") print(f"Peak DEM: {self.initial_dem.max():.2e}") print(f" Mean DEM: {self.initial_dem.mean():.2e}") + print(f"I_obs: {I_obs}") # Observed intensities + print(f"R (response matrix): {R.shape}") + print(f"Sum of response rows: {[np.sum(R[i]) for i in range(R.shape[0])]}") + print(f"dT: {self._dT}") + print("[DEBUG] DEM before dT division:") + + print("[DEBUG] Response row sums:") + for i, row in enumerate(R): + print(f" {self.filter_names[i]}: sum={np.sum(row):.2e}, max={np.max(row):.2e}") + + print(f"[DEBUG] dT: {self._dT:.3f}") + + + + + + # Plotting if plot: import matplotlib.pyplot as plt plt.figure(figsize=(8, 4)) - plt.plot(self.logT, self.initial_dem.value, drawstyle="steps-mid", label="Initial DEM") + ylabel = "DEM [cm⁻⁵ K⁻¹]" + + # Custom label with filters and date + filters_str = ", ".join(self.observed_channel) + label_str = f"Initial DEM\n{filters_str}\n"#{self.date_obs}" + + if logscale: + log_dem_vals = np.log10(self.initial_dem.value + 1e-30) + plt.plot(self.logT, log_dem_vals, drawstyle="steps-mid", label=label_str, color="purple") + ylabel = "log₁₀ DEM [cm⁻⁵ K⁻¹]" + else: + plt.plot(self.logT, self.initial_dem.value, drawstyle="steps-mid", label=label_str, color="purple") + plt.yscale("log") + plt.xlabel("log₁₀ T [K]") - plt.ylabel("DEM [cm⁻⁵ K⁻¹]") + plt.ylabel(ylabel) plt.title("Initial DEM Estimate") plt.grid(True) - plt.legend() + plt.legend(loc="upper right", fontsize=8) plt.tight_layout() plt.show() - + print("Initial DEM estimate complete") + + + + #STEP 1 - Each temperature bin gets its own parameter, initialized with your initial DEM estimate + def _build_lmfit_parameters(self): + """ + Initializes lmfit Parameters from the initial DEM guess. + + Sets: + ------- + self.lmfit_params : lmfit.Parameters + Each temperature bin gets a parameter (free by default). + """ + if not hasattr(self, "initial_dem"): + raise RuntimeError("Call _estimate_initial_dem() before building parameters.") + + params = Parameters() + + # for i, val in enumerate(self.initial_dem): + # # You could add bounds here if needed (e.g., min=0) + # params.add(f"dem_{i}", value=val, min=0) + for i, val in enumerate(self.initial_dem): + # Convert to float if it's a Quantity + if hasattr(val, 'unit'): + val = val.to_value() # default: returns value in current unit + params.add(f"dem_{i}", value=val, min=0) + + + self.lmfit_params = params + print(f"Built {len(params)} lmfit parameters for DEM fit") + + #STEP 2: Build the residual function + #This function computes how far off your DEM model’s predicted intensities are from your observed ones, normalized by the uncertainty. + def _residuals(self, params): + """ + Computes the residuals between modeled and observed intensities. + + Parameters + ---------- + params : lmfit.Parameters + DEM values at each temperature bin. + + Returns + ------- + np.ndarray + Residuals = (I_model - I_obs) / sigma + """ + # 1. Get DEM vector from lmfit Parameters + dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) + + # 2. Compute modeled intensities: I_model = R · DEM + I_model = self.response_matrix @ dem_vector + + # 3. Determine observational errors (user-provided or fallback) + if self._intensity_errors is not None: + errors = np.array(self._intensity_errors) + else: + errors = np.maximum( + self.min_error, + self.relative_error * self._observed_intensities + ) + + # 4. Return normalized residuals + residuals = (I_model - self._observed_intensities) / errors + print("[•] Residuals stats → mean: {:.2e}, std: {:.2e}".format(np.mean(residuals), np.std(residuals))) + return residuals + + def fit_dem(self): + """ + Runs the DEM fitting using lmfit's least-squares minimization. + + Sets: + ------- + self.fitted_dem : np.ndarray + Best-fit DEM solution (length = n_temps) + self.result : lmfit.MinimizerResult + Full fit result object from lmfit + """ + # if not hasattr(self, "lmfit_params"): + # self._build_lmfit_parameters() + + if not hasattr(self, "lmfit_params"): + raise RuntimeError("Call _build_lmfit_parameters() before fitting.") + + print( "Starting DEM optimization..") + result = minimize(self._residuals, self.lmfit_params, method='least_squares', max_nfev=self.max_iterations) + + self.result = result + + if not result.success: + print("[⚠️] DEM fit did not fully converge:") + print(" →", result.message) + + #self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) + self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) * (u.cm**-5 / u.K) + + print(f"[✓] DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") + + print("[✓] DEM fit complete") + print(f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") + print(f" → Total iterations: {result.nfev}") + + return result + + + def print_residual_diagnostics(self, params): + dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) + I_model = self.response_matrix @ dem_vector + residuals = (I_model - self._observed_intensities) / self._intensity_errors + + print("Observed Intensities:", self._observed_intensities) + print("Modeled Intensities:", I_model) + print("Errors:", self._intensity_errors) + print("Residuals:", residuals) + print(f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}") + + def plot_dem_fit(self, logscale=True): + """ + Plots the initial and fitted DEM on the same logT grid. + + Parameters + ---------- + logscale : bool + If True, uses a logarithmic y-axis. + """ + import matplotlib.pyplot as plt + + if not hasattr(self, "initial_dem"): + raise RuntimeError("Initial DEM not computed. Run _estimate_initial_dem() first.") + if not hasattr(self, "result"): + raise RuntimeError("DEM fit result not available. Run fit_dem() first.") + + # Extract best-fit DEM from lmfit result + best_fit_vals = np.array([self.result.params[f"dem_{i}"].value for i in range(len(self.logT))]) + initial_dem_vals = self.initial_dem.value if hasattr(self.initial_dem, "value") else self.initial_dem + #log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) + + + + + plt.figure(figsize=(10, 5)) + plt.plot(self.logT, initial_dem_vals, drawstyle="steps-mid", label="Initial DEM", linestyle="--", color="gray") + plt.plot(self.logT, best_fit_vals, drawstyle="steps-mid", label="Fitted DEM", color="blue") + + plt.xlabel("log₁₀ T [K]") + #plt.ylabel("DEM [cm⁻⁵ K⁻¹]") + plt.title("Initial vs Fitted DEM") + plt.legend() + plt.grid(True) + if logscale: + plt.yscale("log") + plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$] (log-scaled)") + else: + plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$]") + plt.tight_layout() + plt.show() + + print(f"[Plot] Peak Initial DEM: {np.max(initial_dem_vals):.2e}") + print(f"[Plot] Peak Fitted DEM: {np.max(best_fit_vals):.2e}") + + def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) diff --git a/xrtpy/xrt_dem_iterative/test/__init__.py b/xrtpy/xrt_dem_iterative/test/__init__.py new file mode 100644 index 000000000..2bd939b8a --- /dev/null +++ b/xrtpy/xrt_dem_iterative/test/__init__.py @@ -0,0 +1,8 @@ +""" +Test suite for xrtpy.xrt_dem_iterative and related DEM tools. + +This file marks the 'tests' directory as a Python package and allows +test modules to import from xrtpy and related packages cleanly. + +Use this space to define shared fixtures or helper utilities for tests if needed. +""" diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py new file mode 100644 index 000000000..d994f1ac8 --- /dev/null +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -0,0 +1,113 @@ +from pathlib import Path + +import numpy as np +import pytest +import sunpy +import sunpy.io.special +import sunpy.map + +from xrtpy.response.channel import Channel + +channel_names = [ + "Al-mesh", + "Al-poly", + "C-poly", + "Ti-poly", + "Be-thin", + "Be-med", + "Al-med", + "Al-thick", + "Be-thick", + "Al-poly/Al-mesh", + "Al-poly/Ti-poly", + "Al-poly/Al-thick", + "Al-poly/Be-thick", + "C-poly/Ti-poly", +] + + +@pytest.mark.parametrize("channel_name", channel_names) +def test_channel_name(channel_name): + channel = Channel(channel_name) + assert channel.name == channel_name + + +filename = Path(__file__).parent.parent.absolute() / "data" / "xrt_channels_v0017.genx" + +v6_genx = sunpy.io.special.genx.read_genx(filename) +v6_genx_s = v6_genx["SAVEGEN0"] + +_channel_name_to_index_mapping = { + "Al-mesh": 0, + "Al-poly": 1, + "C-poly": 2, + "Ti-poly": 3, + "Be-thin": 4, + "Be-med": 5, + "Al-med": 6, + "Al-thick": 7, + "Be-thick": 8, + "Al-poly/Al-mesh": 9, + "Al-poly/Ti-poly": 10, + "Al-poly/Al-thick": 11, + "Al-poly/Be-thick": 12, + "C-poly/Ti-poly": 13, +} + +def validate_inputs(self): + """ + Run all internal validation checks again. Raises if any inputs are invalid. + Useful for debugging or after programmatic changes. + """ + # Check shape of intensity_errors + if self._intensity_errors is not None: + if self._intensity_errors.shape != self._observed_intensities.shape: + raise ValueError("Length of intensity_errors must match observed_intensities.") + + # Check consistency between filters, intensities, and responses + if not ( + len(self._observed_intensities) + == len(self.responses) + == len(self.observed_channel) + ): + raise ValueError( + f"\nLength mismatch in inputs:\n" + f" Observed intensities: {len(self._observed_intensities)}\n" + f" Responses: {len(self.responses)}\n" + f" Filter channels: {len(self.observed_channel)}\n" + ) + + # Check temperature grid + if self._dT <= 0: + raise ValueError("dT must be a positive scalar.") + + for r in self.responses: + logT_grid = np.log10(r.temperature.value) + if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): + raise ValueError( + f"The specified temperature range [{self._min_T}, {self._max_T}] is outside the bounds of one or more filter response grids.\n" + "Please ensure the temperature range fits within all responses." + ) + + +import pytest + +from xrtpy.response.tools import generate_temperature_responses +from xrtpy.xrt_dem_iterative import XRTDEMIterative + + +def test_validate_inputs_good_case(): + filters = ["Be-thin", "Be-med"] + i_obs = [10000.0, 20000.0] + resp = generate_temperature_responses(filters, obs_date="2007-07-10") + dem = XRTDEMIterative(filters, i_obs, resp) + dem.validate_inputs() # Should NOT raise + +def test_validate_inputs_mismatched_errors(): + filters = ["Be-thin", "Be-med"] + i_obs = [10000.0, 20000.0] + i_err = [100.0] # Wrong length + resp = generate_temperature_responses(filters, obs_date="2007-07-10") + dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) + with pytest.raises(ValueError, match="intensity_errors must match"): + dem.validate_inputs() From 5e704b03325b63f36adc6e723dbc3677b3da8d2c Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 25 Aug 2025 16:41:34 -0400 Subject: [PATCH 027/121] Updating path the data file --- xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index d994f1ac8..cb66dc1f9 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -5,6 +5,7 @@ import sunpy import sunpy.io.special import sunpy.map +from importlib.resources import files from xrtpy.response.channel import Channel @@ -26,13 +27,18 @@ ] + + + + @pytest.mark.parametrize("channel_name", channel_names) def test_channel_name(channel_name): channel = Channel(channel_name) assert channel.name == channel_name -filename = Path(__file__).parent.parent.absolute() / "data" / "xrt_channels_v0017.genx" +#filename = Path(__file__).parent.parent.absolute() / "data" / "xrt_channels_v0017.genx" +filename = files("xrtpy.response.data") / "xrt_channels_v0017.genx" v6_genx = sunpy.io.special.genx.read_genx(filename) v6_genx_s = v6_genx["SAVEGEN0"] From 25a43944c5a448963e727d7c3eff4a3fdc347408 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 25 Aug 2025 16:45:58 -0400 Subject: [PATCH 028/121] Updating init info --- xrtpy/xrt_dem_iterative/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index e69de29bb..6f19ff5a8 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -0,0 +1,7 @@ +""" +XRT DEM Iterative Solver +""" + +from .dem_solver import XRTDEMIterative + +__all__ = ["XRTDEMIterative"] From 3f7aa5b3b00c0df7309438a40963c6b13602f116 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 26 Aug 2025 09:19:06 -0400 Subject: [PATCH 029/121] Applied Black --- xrtpy/xrt_dem_iterative/dem_solver.py | 198 +++++++++++------- .../monte_carlo_iteration.py | 29 ++- 2 files changed, 137 insertions(+), 90 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index fb1b5a662..c2edb2e24 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -56,7 +56,7 @@ def __init__( relative_error=0.03, monte_carlo_runs=0, max_iterations=2000, - solv_factor=1e21 + solv_factor=1e21, ): """ Args: @@ -117,7 +117,11 @@ def __init__( raise ValueError( "monte_carlo_runs must be a non-negative whole number, not a boolean." ) - elif isinstance(monte_carlo_runs, (int, np.integer)) or isinstance(monte_carlo_runs, float) and monte_carlo_runs.is_integer(): + elif ( + isinstance(monte_carlo_runs, (int, np.integer)) + or isinstance(monte_carlo_runs, float) + and monte_carlo_runs.is_integer() + ): self._monte_carlo_runs = int(monte_carlo_runs) else: raise ValueError( @@ -183,14 +187,13 @@ def __init__( else: self._intensity_errors = None - try: self._solv_factor = float(solv_factor) if self._solv_factor <= 0: raise ValueError("solv_factor must be a positive number.") except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") - + def __repr__(self): return f"" @@ -258,7 +261,7 @@ def dT(self): """ return self._dT - @property + @property def min_error(self): """ Minimum error applied to DN/s when intensity error is not provided. @@ -320,7 +323,6 @@ def solv_factor(self): """ return self._solv_factor - @property def max_iterations(self): """ @@ -328,7 +330,6 @@ def max_iterations(self): """ return self._max_iterations - def create_logT_grid(self): """ Build the DEM temperature grid *exactly* from min to max in steps of dT. @@ -337,8 +338,9 @@ def create_logT_grid(self): self.logT = np.linspace(self._min_T, self._max_T, n_bins) self.T = (10**self.logT) * u.K - - def _interpolate_responses_to_grid(self): #This mirrors what xrt_dem_iter_estim.pro does. + def _interpolate_responses_to_grid( + self, + ): # This mirrors what xrt_dem_iter_estim.pro does. """ Interpolates each filter's temperature response onto the DEM temperature grid (self.logT). """ @@ -356,12 +358,11 @@ def _interpolate_responses_to_grid(self): #This mirrors what xrt_dem_iter_estim. kind="linear", bounds_error=False, fill_value=0.0, - ) #kind = 'cubic' ) kind="linear", + ) # kind = 'cubic' ) kind="linear", R_interp = interp_func(self.logT) self.interpolated_responses.append(R_interp) - @property def response_matrix(self): """ @@ -370,20 +371,21 @@ def response_matrix(self): Shape: (n_filters, n_temperatures) """ if not hasattr(self, "_response_matrix"): - raise AttributeError("Response matrix has not been built yet. Call _build_response_matrix().") + raise AttributeError( + "Response matrix has not been built yet. Call _build_response_matrix()." + ) return self._response_matrix - def _build_response_matrix(self): """ Builds the response matrix from interpolated responses. - + Sets: ------- self.response_matrix : ndarray 2D array of shape (n_filters, n_temperatures) Stack your self.interpolated_responses into a 2D NumPy array - + Personal notes: The response matrix is a 2D array that relates temperature to observed intensity For numerical DEM: -You approximate the integral as a matrix multiplication @@ -392,20 +394,26 @@ def _build_response_matrix(self): - Intergal DEM(T) * R(T) dT = sum[DEM_i * R_i * dT] """ if not hasattr(self, "interpolated_responses"): - raise RuntimeError("Call _interpolate_responses_to_grid() before building the response matrix.") - - #self._response_matrix = np.vstack(self.interpolated_responses) # matrix - self._response_matrix = np.vstack(self.interpolated_responses).astype(float) # matrix - - print(f"Built response matrix: shape = {self._response_matrix.shape} (filters * logT bins)") + raise RuntimeError( + "Call _interpolate_responses_to_grid() before building the response matrix." + ) + # self._response_matrix = np.vstack(self.interpolated_responses) # matrix + self._response_matrix = np.vstack(self.interpolated_responses).astype( + float + ) # matrix + print( + f"Built response matrix: shape = {self._response_matrix.shape} (filters * logT bins)" + ) - def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirrors xrt_dem_iter_estim.pro + def _estimate_initial_dem( + self, smooth=False, logscale=False, plot=True + ): # mirrors xrt_dem_iter_estim.pro """ - Estimates an initial DEM by inverting I ≈ R @ DEM. + Estimates an initial DEM by inverting I ≈ R @ DEM. An initial estimate of how much plasma is emitting at each temperature - + Sets ----- self.initial_dem : np.ndarray @@ -422,7 +430,7 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro """ print(self.response_temperatures, self.response_values, self.filter_names) - #Define inputs - xrt_dem_iter_estim.pro + # Define inputs - xrt_dem_iter_estim.pro if not hasattr(self, "response_matrix"): raise RuntimeError("Run _build_response_matrix() before estimating DEM.") @@ -430,7 +438,9 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro R = self.response_matrix n_filters, n_temps = R.shape - print(f"Estimating DEM from {n_filters} intensities across {n_temps} temperature bins...") + print( + f"Estimating DEM from {n_filters} intensities across {n_temps} temperature bins..." + ) with np.errstate(divide="ignore", invalid="ignore"): estimates = np.zeros(n_temps) @@ -443,13 +453,13 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro estimates /= self._dT # Convert to per-logT-bin definition assert estimates.shape[0] == len(self.logT) - if logscale: # Suppress large dynamic range and spikes estimates = 10 ** np.log10(estimates + 1e-30) if smooth: from scipy.ndimage import gaussian_filter1d + estimates = gaussian_filter1d(estimates, sigma=1.0) # Apply units @@ -470,32 +480,41 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro print("[DEBUG] Response row sums:") for i, row in enumerate(R): - print(f" {self.filter_names[i]}: sum={np.sum(row):.2e}, max={np.max(row):.2e}") - - print(f"[DEBUG] dT: {self._dT:.3f}") - - - - - + print( + f" {self.filter_names[i]}: sum={np.sum(row):.2e}, max={np.max(row):.2e}" + ) + print(f"[DEBUG] dT: {self._dT:.3f}") # Plotting if plot: import matplotlib.pyplot as plt + plt.figure(figsize=(8, 4)) ylabel = "DEM [cm⁻⁵ K⁻¹]" # Custom label with filters and date filters_str = ", ".join(self.observed_channel) - label_str = f"Initial DEM\n{filters_str}\n"#{self.date_obs}" + label_str = f"Initial DEM\n{filters_str}\n" # {self.date_obs}" if logscale: log_dem_vals = np.log10(self.initial_dem.value + 1e-30) - plt.plot(self.logT, log_dem_vals, drawstyle="steps-mid", label=label_str, color="purple") + plt.plot( + self.logT, + log_dem_vals, + drawstyle="steps-mid", + label=label_str, + color="purple", + ) ylabel = "log₁₀ DEM [cm⁻⁵ K⁻¹]" else: - plt.plot(self.logT, self.initial_dem.value, drawstyle="steps-mid", label=label_str, color="purple") + plt.plot( + self.logT, + self.initial_dem.value, + drawstyle="steps-mid", + label=label_str, + color="purple", + ) plt.yscale("log") plt.xlabel("log₁₀ T [K]") @@ -508,11 +527,7 @@ def _estimate_initial_dem(self, smooth=False, logscale=False, plot=True): #mirro print("Initial DEM estimate complete") - - - - - #STEP 1 - Each temperature bin gets its own parameter, initialized with your initial DEM estimate + # STEP 1 - Each temperature bin gets its own parameter, initialized with your initial DEM estimate def _build_lmfit_parameters(self): """ Initializes lmfit Parameters from the initial DEM guess. @@ -523,25 +538,26 @@ def _build_lmfit_parameters(self): Each temperature bin gets a parameter (free by default). """ if not hasattr(self, "initial_dem"): - raise RuntimeError("Call _estimate_initial_dem() before building parameters.") + raise RuntimeError( + "Call _estimate_initial_dem() before building parameters." + ) params = Parameters() - + # for i, val in enumerate(self.initial_dem): # # You could add bounds here if needed (e.g., min=0) # params.add(f"dem_{i}", value=val, min=0) for i, val in enumerate(self.initial_dem): # Convert to float if it's a Quantity - if hasattr(val, 'unit'): + if hasattr(val, "unit"): val = val.to_value() # default: returns value in current unit params.add(f"dem_{i}", value=val, min=0) - - + self.lmfit_params = params print(f"Built {len(params)} lmfit parameters for DEM fit") - #STEP 2: Build the residual function - #This function computes how far off your DEM model’s predicted intensities are from your observed ones, normalized by the uncertainty. + # STEP 2: Build the residual function + # This function computes how far off your DEM model’s predicted intensities are from your observed ones, normalized by the uncertainty. def _residuals(self, params): """ Computes the residuals between modeled and observed intensities. @@ -567,13 +583,16 @@ def _residuals(self, params): errors = np.array(self._intensity_errors) else: errors = np.maximum( - self.min_error, - self.relative_error * self._observed_intensities + self.min_error, self.relative_error * self._observed_intensities ) # 4. Return normalized residuals residuals = (I_model - self._observed_intensities) / errors - print("[•] Residuals stats → mean: {:.2e}, std: {:.2e}".format(np.mean(residuals), np.std(residuals))) + print( + "[•] Residuals stats → mean: {:.2e}, std: {:.2e}".format( + np.mean(residuals), np.std(residuals) + ) + ) return residuals def fit_dem(self): @@ -593,27 +612,37 @@ def fit_dem(self): if not hasattr(self, "lmfit_params"): raise RuntimeError("Call _build_lmfit_parameters() before fitting.") - print( "Starting DEM optimization..") - result = minimize(self._residuals, self.lmfit_params, method='least_squares', max_nfev=self.max_iterations) + print("Starting DEM optimization..") + result = minimize( + self._residuals, + self.lmfit_params, + method="least_squares", + max_nfev=self.max_iterations, + ) self.result = result if not result.success: print("[⚠️] DEM fit did not fully converge:") print(" →", result.message) - - #self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) - self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) * (u.cm**-5 / u.K) - print(f"[✓] DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") - + # self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) + self.fitted_dem = np.array( + [result.params[f"dem_{i}"].value for i in range(len(self.logT))] + ) * (u.cm**-5 / u.K) + + print( + f"[✓] DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" + ) + print("[✓] DEM fit complete") - print(f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") + print( + f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" + ) print(f" → Total iterations: {result.nfev}") return result - def print_residual_diagnostics(self, params): dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) I_model = self.response_matrix @ dem_vector @@ -623,7 +652,9 @@ def print_residual_diagnostics(self, params): print("Modeled Intensities:", I_model) print("Errors:", self._intensity_errors) print("Residuals:", residuals) - print(f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}") + print( + f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" + ) def plot_dem_fit(self, logscale=True): """ @@ -637,24 +668,42 @@ def plot_dem_fit(self, logscale=True): import matplotlib.pyplot as plt if not hasattr(self, "initial_dem"): - raise RuntimeError("Initial DEM not computed. Run _estimate_initial_dem() first.") + raise RuntimeError( + "Initial DEM not computed. Run _estimate_initial_dem() first." + ) if not hasattr(self, "result"): raise RuntimeError("DEM fit result not available. Run fit_dem() first.") # Extract best-fit DEM from lmfit result - best_fit_vals = np.array([self.result.params[f"dem_{i}"].value for i in range(len(self.logT))]) - initial_dem_vals = self.initial_dem.value if hasattr(self.initial_dem, "value") else self.initial_dem - #log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) - - - + best_fit_vals = np.array( + [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] + ) + initial_dem_vals = ( + self.initial_dem.value + if hasattr(self.initial_dem, "value") + else self.initial_dem + ) + # log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) plt.figure(figsize=(10, 5)) - plt.plot(self.logT, initial_dem_vals, drawstyle="steps-mid", label="Initial DEM", linestyle="--", color="gray") - plt.plot(self.logT, best_fit_vals, drawstyle="steps-mid", label="Fitted DEM", color="blue") + plt.plot( + self.logT, + initial_dem_vals, + drawstyle="steps-mid", + label="Initial DEM", + linestyle="--", + color="gray", + ) + plt.plot( + self.logT, + best_fit_vals, + drawstyle="steps-mid", + label="Fitted DEM", + color="blue", + ) plt.xlabel("log₁₀ T [K]") - #plt.ylabel("DEM [cm⁻⁵ K⁻¹]") + # plt.ylabel("DEM [cm⁻⁵ K⁻¹]") plt.title("Initial vs Fitted DEM") plt.legend() plt.grid(True) @@ -665,11 +714,10 @@ def plot_dem_fit(self, logscale=True): plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$]") plt.tight_layout() plt.show() - + print(f"[Plot] Peak Initial DEM: {np.max(initial_dem_vals):.2e}") print(f"[Plot] Peak Fitted DEM: {np.max(best_fit_vals):.2e}") - def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py index 6c2f67b88..685aa6b8c 100644 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -7,9 +7,7 @@ class Monte_Carlo_Iteration: - - #def __init__( ): - + # def __init__( ): def generate_mc_realizations(self, n_realizations=100, seed=None): """ @@ -31,16 +29,17 @@ def generate_mc_realizations(self, n_realizations=100, seed=None): np.random.seed(seed) # Compute error bars - abs_error = np.maximum(self.min_error, self.relative_error * self._observed_intensities) + abs_error = np.maximum( + self.min_error, self.relative_error * self._observed_intensities + ) # Draw random perturbations for each intensity self.mc_intensity_sets = np.random.normal( loc=self._observed_intensities, scale=abs_error, - size=(n_realizations, len(self._observed_intensities)) + size=(n_realizations, len(self._observed_intensities)), ) - def run_mc_simulation(self, n_realizations=100, seed=None): """ Run Monte Carlo simulations to estimate DEM uncertainties. @@ -60,19 +59,19 @@ def run_mc_simulation(self, n_realizations=100, seed=None): if seed is not None: np.random.seed(seed) - - #Use user-provided or fallback error model + # Use user-provided or fallback error model if self._intensity_errors is not None: - errors = np.array(self._intensity_errors,dtype=float)#Covering given user error in pyton array - #errors = self._intensity_errors + errors = np.array( + self._intensity_errors, dtype=float + ) # Covering given user error in pyton array + # errors = self._intensity_errors else: errors = np.maximum( - self.min_error, - self.relative_error * self._observed_intensities + self.min_error, self.relative_error * self._observed_intensities ) self.mc_intensity_sets = np.random.normal( loc=self._observed_intensities[:, None], # shape (5, 1) - scale=errors[:, None], # shape (5, 1) - size=(len(self._observed_intensities), n_realizations) # shape (5, 20) - ) + scale=errors[:, None], # shape (5, 1) + size=(len(self._observed_intensities), n_realizations), # shape (5, 20) + ) From cc06e615c04d4c1a1fc19782229bc6f06ac44466 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 27 Aug 2025 17:30:55 -0400 Subject: [PATCH 030/121] Working on solving DEM scale issue --- xrtpy/xrt_dem_iterative/dem_solver.py | 165 ++++++++++++++++++-------- 1 file changed, 116 insertions(+), 49 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index c2edb2e24..358f7fa82 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -337,6 +337,9 @@ def create_logT_grid(self): n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 self.logT = np.linspace(self._min_T, self._max_T, n_bins) self.T = (10**self.logT) * u.K + self.dlogT = self._dT # dimensionless - convenience-27 + + def _interpolate_responses_to_grid( self, @@ -443,25 +446,48 @@ def _estimate_initial_dem( ) with np.errstate(divide="ignore", invalid="ignore"): - estimates = np.zeros(n_temps) + # First estimate DEM per-logT (cm^-5) + dem_logT = np.zeros(n_temps) #27 for i in range(n_filters): row = R[i, :] - ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) - estimates += ratio - - estimates /= n_filters - estimates /= self._dT # Convert to per-logT-bin definition - assert estimates.shape[0] == len(self.logT) - - if logscale: - # Suppress large dynamic range and spikes - estimates = 10 ** np.log10(estimates + 1e-30) - - if smooth: - from scipy.ndimage import gaussian_filter1d - - estimates = gaussian_filter1d(estimates, sigma=1.0) - + ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) # cm^-5 + dem_logT += ratio + + dem_logT /= n_filters + + # DO NOT divide by self._dT here. + # Optional smoothing in per-logT space: + if smooth: + from scipy.ndimage import gaussian_filter1d + dem_logT = gaussian_filter1d(dem_logT, sigma=1.0) + + # Store canonical DEM (per-logT) for fitting/forward model + self.initial_dem_logT = dem_logT * (u.cm**-5) + + # For plotting PER-K if you want that axis: + dem_perK = (dem_logT / (np.log(10.0) * self.T.to_value(u.K))) # cm^-5 K^-1 + self.initial_dem = dem_perK * (u.cm**-5 / u.K) + + #estimates = np.zeros(n_temps) + # for i in range(n_filters): + # row = R[i, :] + # ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) + # estimates += ratio + + # estimates /= n_filters + # estimates /= self._dT # Convert to per-logT-bin definition + # assert estimates.shape[0] == len(self.logT) + + # if logscale: + # # Suppress large dynamic range and spikes + # estimates = 10 ** np.log10(estimates + 1e-30) + + # if smooth: + # from scipy.ndimage import gaussian_filter1d + + # estimates = gaussian_filter1d(estimates, sigma=1.0) + + estimates =dem_logT # Apply units self.initial_dem = estimates * (u.cm**-5 / u.K) print(" Max:", np.max(estimates)) @@ -498,25 +524,34 @@ def _estimate_initial_dem( label_str = f"Initial DEM\n{filters_str}\n" # {self.date_obs}" if logscale: - log_dem_vals = np.log10(self.initial_dem.value + 1e-30) - plt.plot( - self.logT, - log_dem_vals, - drawstyle="steps-mid", - label=label_str, - color="purple", - ) - ylabel = "log₁₀ DEM [cm⁻⁵ K⁻¹]" + plt.plot(self.logT, np.log10(self.initial_dem.value), drawstyle="steps-mid", + label=label_str, color="purple") + ylabel = r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]" else: - plt.plot( - self.logT, - self.initial_dem.value, - drawstyle="steps-mid", - label=label_str, - color="purple", - ) + plt.plot(self.logT, self.initial_dem.value, drawstyle="steps-mid", + label=label_str, color="purple") plt.yscale("log") + # if logscale: + # log_dem_vals = np.log10(self.initial_dem.value + 1e-30) + # plt.plot( + # self.logT, + # log_dem_vals, + # drawstyle="steps-mid", + # label=label_str, + # color="purple", + # ) + # ylabel = "log₁₀ DEM [cm⁻⁵ K⁻¹]" + # else: + # plt.plot( + # self.logT, + # self.initial_dem.value, + # drawstyle="steps-mid", + # label=label_str, + # color="purple", + # ) + # plt.yscale("log") + plt.xlabel("log₁₀ T [K]") plt.ylabel(ylabel) plt.title("Initial DEM Estimate") @@ -547,12 +582,20 @@ def _build_lmfit_parameters(self): # for i, val in enumerate(self.initial_dem): # # You could add bounds here if needed (e.g., min=0) # params.add(f"dem_{i}", value=val, min=0) - for i, val in enumerate(self.initial_dem): - # Convert to float if it's a Quantity - if hasattr(val, "unit"): - val = val.to_value() # default: returns value in current unit - params.add(f"dem_{i}", value=val, min=0) + + #27 + # for i, val in enumerate(self.initial_dem): + # # Convert to float if it's a Quantity + # if hasattr(val, "unit"): + # val = val.to_value() # default: returns value in current unit + # params.add(f"dem_{i}", value=val, min=0) + + # self.lmfit_params = params + # print(f"Built {len(params)} lmfit parameters for DEM fit") + + for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): + params.add(f"dem_{i}", value=float(val), min=0.0) self.lmfit_params = params print(f"Built {len(params)} lmfit parameters for DEM fit") @@ -575,8 +618,13 @@ def _residuals(self, params): # 1. Get DEM vector from lmfit Parameters dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) + # DEM per-logT (cm^-5) + dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) + + # 2. Compute modeled intensities: I_model = R · DEM - I_model = self.response_matrix @ dem_vector + #I_model = self.response_matrix @ dem_vector + I_model = self.response_matrix @ (dem_logT * self.dlogT) #27 # 3. Determine observational errors (user-provided or fallback) if self._intensity_errors is not None: @@ -609,8 +657,16 @@ def fit_dem(self): # if not hasattr(self, "lmfit_params"): # self._build_lmfit_parameters() - if not hasattr(self, "lmfit_params"): - raise RuntimeError("Call _build_lmfit_parameters() before fitting.") + # if not hasattr(self, "lmfit_params"): + # raise RuntimeError("Call _build_lmfit_parameters() before fitting.") + + #27 + if not hasattr(self, "initial_dem_logT"): + raise RuntimeError("Call _estimate_initial_dem() first.") + params = Parameters() + for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): + params.add(f"dem_{i}", value=float(val), min=0.0) + self.lmfit_params = params print("Starting DEM optimization..") result = minimize( @@ -621,6 +677,11 @@ def fit_dem(self): ) self.result = result + + dem_best_logT = np.array([self.result.params[f"dem_{i}"].value for i in range(len(self.logT))]) + + self.fitted_dem_logT = dem_best_logT * (u.cm**-5) + self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * (u.cm**-5 / u.K) if not result.success: print("[⚠️] DEM fit did not fully converge:") @@ -675,16 +736,21 @@ def plot_dem_fit(self, logscale=True): raise RuntimeError("DEM fit result not available. Run fit_dem() first.") # Extract best-fit DEM from lmfit result - best_fit_vals = np.array( - [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] - ) - initial_dem_vals = ( - self.initial_dem.value - if hasattr(self.initial_dem, "value") - else self.initial_dem - ) + # best_fit_vals = np.array( + # [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] + # ) + # initial_dem_vals = ( + # self.initial_dem.value + # if hasattr(self.initial_dem, "value") + # else self.initial_dem + # ) # log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) + initial_vals = self.initial_dem_logT.to_value(u.cm**-5) + best_vals = self.fitted_dem_logT.to_value(u.cm**-5) + + + plt.figure(figsize=(10, 5)) plt.plot( self.logT, @@ -704,6 +770,7 @@ def plot_dem_fit(self, logscale=True): plt.xlabel("log₁₀ T [K]") # plt.ylabel("DEM [cm⁻⁵ K⁻¹]") + ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" plt.title("Initial vs Fitted DEM") plt.legend() plt.grid(True) From 3ed8726555cd6a87153316a87268c77de28e1038 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 15:15:30 -0400 Subject: [PATCH 031/121] Working on create_logT_grid --- xrtpy/xrt_dem_iterative/dem_solver.py | 185 ++++++++++++++++---------- 1 file changed, 115 insertions(+), 70 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 358f7fa82..9231e2a96 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -338,8 +338,14 @@ def create_logT_grid(self): self.logT = np.linspace(self._min_T, self._max_T, n_bins) self.T = (10**self.logT) * u.K self.dlogT = self._dT # dimensionless - convenience-27 + self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals + def _dem_per_log10T(self, dem_per_K): + """Convert DEM per K → DEM per log10 T (cm^-5).""" + + return (np.log(10.0) * self.T) * dem_per_K + def _interpolate_responses_to_grid( self, @@ -487,17 +493,23 @@ def _estimate_initial_dem( # estimates = gaussian_filter1d(estimates, sigma=1.0) - estimates =dem_logT - # Apply units - self.initial_dem = estimates * (u.cm**-5 / u.K) - print(" Max:", np.max(estimates)) - print(" Min:", np.min(estimates)) - print(" dT (logT bin size):", self._dT) + # estimates =dem_logT + # # Apply units + # self.initial_dem = estimates * (u.cm**-5 / u.K) + # print(" Max:", np.max(estimates)) + # print(" Min:", np.min(estimates)) + # print(" dT (logT bin size):", self._dT) + + print(" Max DEM_per_logT:", np.max(dem_logT)) + print(" Min DEM_per_logT:", np.min(dem_logT)) + print(" dlog10T:", self.dlogT) + + print("Initial DEM estimate complete") + print(f"Peak DEM_per_logT: {self.initial_dem_logT.max():.2e}") + print(f" Mean DEM_per_logT: {self.initial_dem_logT.mean():.2e}") # Diagnostics - print(f"Initial DEM estimate complete") - print(f"Peak DEM: {self.initial_dem.max():.2e}") - print(f" Mean DEM: {self.initial_dem.mean():.2e}") + print(f"I_obs: {I_obs}") # Observed intensities print(f"R (response matrix): {R.shape}") print(f"Sum of response rows: {[np.sum(R[i]) for i in range(R.shape[0])]}") @@ -572,12 +584,12 @@ def _build_lmfit_parameters(self): self.lmfit_params : lmfit.Parameters Each temperature bin gets a parameter (free by default). """ - if not hasattr(self, "initial_dem"): - raise RuntimeError( - "Call _estimate_initial_dem() before building parameters." - ) + # if not hasattr(self, "initial_dem"): + # raise RuntimeError( + # "Call _estimate_initial_dem() before building parameters." + # ) - params = Parameters() + # params = Parameters() # for i, val in enumerate(self.initial_dem): # # You could add bounds here if needed (e.g., min=0) @@ -594,11 +606,16 @@ def _build_lmfit_parameters(self): # print(f"Built {len(params)} lmfit parameters for DEM fit") + + if not hasattr(self, "initial_dem_logT"): + raise RuntimeError("Call _estimate_initial_dem() before building parameters.") + params = Parameters() for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): params.add(f"dem_{i}", value=float(val), min=0.0) self.lmfit_params = params print(f"Built {len(params)} lmfit parameters for DEM fit") + # STEP 2: Build the residual function # This function computes how far off your DEM model’s predicted intensities are from your observed ones, normalized by the uncertainty. def _residuals(self, params): @@ -615,8 +632,8 @@ def _residuals(self, params): np.ndarray Residuals = (I_model - I_obs) / sigma """ - # 1. Get DEM vector from lmfit Parameters - dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) + # # 1. Get DEM vector from lmfit Parameters + # dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) # DEM per-logT (cm^-5) dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) @@ -688,9 +705,6 @@ def fit_dem(self): print(" →", result.message) # self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) - self.fitted_dem = np.array( - [result.params[f"dem_{i}"].value for i in range(len(self.logT))] - ) * (u.cm**-5 / u.K) print( f"[✓] DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" @@ -705,10 +719,11 @@ def fit_dem(self): return result def print_residual_diagnostics(self, params): - dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) - I_model = self.response_matrix @ dem_vector - residuals = (I_model - self._observed_intensities) / self._intensity_errors + dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) + I_model = self.response_matrix @ (dem_logT * self.dlogT) + residuals = (I_model - self._observed_intensities) / self._intensity_errors + print("Observed Intensities:", self._observed_intensities) print("Modeled Intensities:", I_model) print("Errors:", self._intensity_errors) @@ -718,72 +733,102 @@ def print_residual_diagnostics(self, params): ) def plot_dem_fit(self, logscale=True): - """ - Plots the initial and fitted DEM on the same logT grid. - - Parameters - ---------- - logscale : bool - If True, uses a logarithmic y-axis. - """ import matplotlib.pyplot as plt - - if not hasattr(self, "initial_dem"): - raise RuntimeError( - "Initial DEM not computed. Run _estimate_initial_dem() first." - ) + if not hasattr(self, "initial_dem_logT"): + raise RuntimeError("Initial DEM not computed. Run _estimate_initial_dem() first.") if not hasattr(self, "result"): raise RuntimeError("DEM fit result not available. Run fit_dem() first.") - # Extract best-fit DEM from lmfit result - # best_fit_vals = np.array( - # [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] - # ) - # initial_dem_vals = ( - # self.initial_dem.value - # if hasattr(self.initial_dem, "value") - # else self.initial_dem - # ) - # log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) - + # Choose per-logT plotting (recommended for XRT) initial_vals = self.initial_dem_logT.to_value(u.cm**-5) best_vals = self.fitted_dem_logT.to_value(u.cm**-5) - - + ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" plt.figure(figsize=(10, 5)) - plt.plot( - self.logT, - initial_dem_vals, - drawstyle="steps-mid", - label="Initial DEM", - linestyle="--", - color="gray", - ) - plt.plot( - self.logT, - best_fit_vals, - drawstyle="steps-mid", - label="Fitted DEM", - color="blue", - ) + plt.plot(self.logT, initial_vals, drawstyle="steps-mid", label="Initial DEM", linestyle="--", color="gray") + plt.plot(self.logT, best_vals, drawstyle="steps-mid", label="Fitted DEM", color="blue") plt.xlabel("log₁₀ T [K]") - # plt.ylabel("DEM [cm⁻⁵ K⁻¹]") - ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" plt.title("Initial vs Fitted DEM") plt.legend() plt.grid(True) if logscale: plt.yscale("log") - plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$] (log-scaled)") - else: - plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$]") + plt.ylabel(ylabel) plt.tight_layout() plt.show() - print(f"[Plot] Peak Initial DEM: {np.max(initial_dem_vals):.2e}") - print(f"[Plot] Peak Fitted DEM: {np.max(best_fit_vals):.2e}") + print(f"[Plot] Peak Initial DEM (per-logT): {np.max(initial_vals):.2e}") + print(f"[Plot] Peak Fitted DEM (per-logT): {np.max(best_vals):.2e}") + + + # def plot_dem_fit(self, logscale=True): + # """ + # Plots the initial and fitted DEM on the same logT grid. + + # Parameters + # ---------- + # logscale : bool + # If True, uses a logarithmic y-axis. + # """ + # import matplotlib.pyplot as plt + + # if not hasattr(self, "initial_dem"): + # raise RuntimeError( + # "Initial DEM not computed. Run _estimate_initial_dem() first." + # ) + # if not hasattr(self, "result"): + # raise RuntimeError("DEM fit result not available. Run fit_dem() first.") + + # # Extract best-fit DEM from lmfit result + # # best_fit_vals = np.array( + # # [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] + # # ) + # # initial_dem_vals = ( + # # self.initial_dem.value + # # if hasattr(self.initial_dem, "value") + # # else self.initial_dem + # # ) + # # log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) + + # initial_vals = self.initial_dem_logT.to_value(u.cm**-5) + # best_vals = self.fitted_dem_logT.to_value(u.cm**-5) + + + + # plt.figure(figsize=(10, 5)) + # plt.plot( + # self.logT, + # initial_dem_vals, + # drawstyle="steps-mid", + # label="Initial DEM", + # linestyle="--", + # color="gray", + # ) + # plt.plot( + # self.logT, + # best_fit_vals, + # drawstyle="steps-mid", + # label="Fitted DEM", + # color="blue", + # ) + + # plt.xlabel("log₁₀ T [K]") + # # plt.ylabel("DEM [cm⁻⁵ K⁻¹]") + # ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" + # plt.title("Initial vs Fitted DEM") + # plt.legend() + # plt.grid(True) + # if logscale: + # plt.yscale("log") + # plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$] (log-scaled)") + # else: + # plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$]") + # plt.tight_layout() + # plt.show() + + # print(f"[Plot] Peak Initial DEM: {np.max(initial_dem_vals):.2e}") + # print(f"[Plot] Peak Fitted DEM: {np.max(best_fit_vals):.2e}") def summary(self): print("XRTpy DEM Iterative Setup Summary") From 526e3c23be6f3ed3fa3c0f0480390e26cf4de55b Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:26:00 -0400 Subject: [PATCH 032/121] move the emptiness check above the formatter. --- xrtpy/xrt_dem_iterative/dem_solver.py | 174 +++++++++++++++++++++----- 1 file changed, 142 insertions(+), 32 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 9231e2a96..13b39fc92 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -85,11 +85,12 @@ def __init__( {'Al-mesh', 'Al-poly', 'C-poly', 'Ti-poly', 'Be-thin', 'Be-med', 'Al-med', 'Al-thick', 'Be-thick', 'Al-poly/Al-mesh', 'Al-poly/Ti-poly', 'Al-poly/Al-thick', 'Al-poly/Be-thick'} """ + # Validate and store filter names + if observed_channel is None or (hasattr(observed_channel, "__len__") and len(observed_channel) == 0): + raise ValueError("`observed_channel` is required and cannot be empty.") self.observed_channel = validate_and_format_filters(observed_channel) - if observed_channel is None or len(observed_channel) == 0: - raise ValueError("`observed_channel` is required and cannot be empty.") # Store intensity and error arrays self._observed_intensities = np.asarray(observed_intensities, dtype=float) @@ -340,7 +341,6 @@ def create_logT_grid(self): self.dlogT = self._dT # dimensionless - convenience-27 self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals - def _dem_per_log10T(self, dem_per_K): """Convert DEM per K → DEM per log10 T (cm^-5).""" @@ -468,11 +468,15 @@ def _estimate_initial_dem( dem_logT = gaussian_filter1d(dem_logT, sigma=1.0) # Store canonical DEM (per-logT) for fitting/forward model - self.initial_dem_logT = dem_logT * (u.cm**-5) + self.initial_dem_logT = dem_logT * (u.cm**-5) ################################## + #self.initial_dem_logT = (np.log(10.0) * self.T) * self.dem_initial * u.cm**-5 # (N,), per-log10T #28 # For plotting PER-K if you want that axis: dem_perK = (dem_logT / (np.log(10.0) * self.T.to_value(u.K))) # cm^-5 K^-1 self.initial_dem = dem_perK * (u.cm**-5 / u.K) + + #self.dem_initial = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1), np.ndarray or Quantity + #estimates = np.zeros(n_temps) # for i in range(n_filters): @@ -697,8 +701,11 @@ def fit_dem(self): dem_best_logT = np.array([self.result.params[f"dem_{i}"].value for i in range(len(self.logT))]) - self.fitted_dem_logT = dem_best_logT * (u.cm**-5) + self.fitted_dem_logT = dem_best_logT * (u.cm**-5) #28 self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * (u.cm**-5 / u.K) + + #self.dem_fit = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1) + #self.fitted_dem_logT = (np.log(10.0) * self.T) * self.dem_fit * u.cm**-5 # (N,), per-log10T if not result.success: print("[⚠️] DEM fit did not fully converge:") @@ -732,34 +739,139 @@ def print_residual_diagnostics(self, params): f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" ) - def plot_dem_fit(self, logscale=True): + + def plot_dem_fit( + self, + logscale: bool = True, + scale: str = "per_K", # "per_K" or "per_log10T" + show_initial: bool = True, + ax=None, + title: str | None = None, + ): + """ + Plot the fitted DEM (and optional initial DEM) using a consistent scale. + + logscale=True -> semilogy of linear values + logscale=False -> linear plot of log10(values) + + scale="per_K" -> DEM [cm^-5 K^-1] + scale="per_log10T" -> phi = DEM * T * ln(10) [cm^-5] + """ + import numpy as np import matplotlib.pyplot as plt - if not hasattr(self, "initial_dem_logT"): - raise RuntimeError("Initial DEM not computed. Run _estimate_initial_dem() first.") - if not hasattr(self, "result"): - raise RuntimeError("DEM fit result not available. Run fit_dem() first.") - - # Choose per-logT plotting (recommended for XRT) - initial_vals = self.initial_dem_logT.to_value(u.cm**-5) - best_vals = self.fitted_dem_logT.to_value(u.cm**-5) - ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" - - plt.figure(figsize=(10, 5)) - plt.plot(self.logT, initial_vals, drawstyle="steps-mid", label="Initial DEM", linestyle="--", color="gray") - plt.plot(self.logT, best_vals, drawstyle="steps-mid", label="Fitted DEM", color="blue") - - plt.xlabel("log₁₀ T [K]") - plt.title("Initial vs Fitted DEM") - plt.legend() - plt.grid(True) + + if ax is None: + fig, ax = plt.subplots() + + # Grid + logT = self.logT + T = 10.0 ** logT + + # --- find fitted DEM (prefer canonical names; fall back to legacy) --- + linear_candidates = ["dem_fit", "dem", "fitted_dem", "dem_solved", "dem_solution"] + log_candidates = ["logdem_fit", "logdem", "fitted_logdem", "logdem_solved"] + + dem_fit_perK = None + for name in linear_candidates: + if hasattr(self, name) and getattr(self, name) is not None: + dem_fit_perK = np.asarray(getattr(self, name), dtype=float) + break + if dem_fit_perK is None: + for name in log_candidates: + if hasattr(self, name) and getattr(self, name) is not None: + dem_fit_perK = 10.0 ** np.asarray(getattr(self, name), dtype=float) + break + if dem_fit_perK is None: + raise RuntimeError( + "No fitted DEM found. Expected one of " + f"{linear_candidates + log_candidates} to exist on the object." + ) + + # --- initial DEM (optional) --- + dem_init_perK = None + if show_initial: + init_linear_candidates = ["initial_dem", "dem_initial"] + init_log_candidates = ["initial_logdem", "logdem_initial"] + for name in init_linear_candidates: + if hasattr(self, name) and getattr(self, name) is not None: + dem_init_perK = np.asarray(getattr(self, name), dtype=float) + break + if dem_init_perK is None: + for name in init_log_candidates: + if hasattr(self, name) and getattr(self, name) is not None: + dem_init_perK = 10.0 ** np.asarray(getattr(self, name), dtype=float) + break + + # --- choose scientific scale --- + if scale == "per_K": + y_fit_linear = np.clip(dem_fit_perK, np.finfo(float).tiny, None) + y_init_linear = None if dem_init_perK is None else np.clip(dem_init_perK, np.finfo(float).tiny, None) + y_label_lin = r"DEM per K [cm$^{-5}$ K$^{-1}$]" + y_label_log10 = r"$\log_{10}$ DEM per K [cm$^{-5}$ K$^{-1}$]" + elif scale == "per_log10T": + y_fit_linear = np.clip(dem_fit_perK * T * np.log(10.0), np.finfo(float).tiny, None) + y_init_linear = None if dem_init_perK is None else np.clip(dem_init_perK * T * np.log(10.0), np.finfo(float).tiny, None) + y_label_lin = r"DEM per $\log_{10}T$ [cm$^{-5}$]" + y_label_log10 = r"$\log_{10}$ DEM per $\log_{10}T$ [cm$^{-5}$]" + else: + raise ValueError("scale must be 'per_K' or 'per_log10T'") + + # --- plot without double-logging --- if logscale: - plt.yscale("log") - plt.ylabel(ylabel) - plt.tight_layout() - plt.show() + ax.semilogy(logT, y_fit_linear, label="Fitted DEM") + if y_init_linear is not None: + ax.semilogy(logT, y_init_linear, "--", alpha=0.7, label="Initial DEM") + ax.set_ylabel(y_label_lin) + else: + ax.plot(logT, np.log10(y_fit_linear), label="Fitted DEM") + if y_init_linear is not None: + ax.plot(logT, np.log10(y_init_linear), "--", alpha=0.7, label="Initial DEM") + ax.set_ylabel(y_label_log10) - print(f"[Plot] Peak Initial DEM (per-logT): {np.max(initial_vals):.2e}") - print(f"[Plot] Peak Fitted DEM (per-logT): {np.max(best_vals):.2e}") + ax.set_xlabel(r"$\log_{10} T$ [K]") + ax.set_title(title or ("Initial vs Fitted DEM" if show_initial else "Fitted DEM")) + ax.legend() + ax.grid(True, alpha=0.3) + + + # def plot_dem_fit(self, logscale=True): + # import matplotlib.pyplot as plt + # # if not hasattr(self, "initial_dem_logT"): + # # raise RuntimeError("Initial DEM not computed. Run _estimate_initial_dem() first.") + # # if not hasattr(self, "result"): + # # raise RuntimeError("DEM fit result not available. Run fit_dem() first.") + + + # # --- grid sanity --- + # if not hasattr(self, "logT"): + # raise RuntimeError("Temperature grid missing. Run create_logT_grid() first.") + # # ensure we have linear T for conversion if needed + # if not hasattr(self, "T"): + # self.T = np.power(10.0, self.logT) + + # ln10T = np.log(10.0) * self.T + + # # Choose per-logT plotting (recommended for XRT) + # initial_vals = self.initial_dem_logT.to_value(u.cm**-5) + # best_vals = self.fitted_dem_logT.to_value(u.cm**-5) + # ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" + + # plt.figure(figsize=(10, 5)) + # plt.plot(self.logT, initial_vals, drawstyle="steps-mid", label="Initial DEM", linestyle="--", color="gray") + # plt.plot(self.logT, best_vals, drawstyle="steps-mid", label="Fitted DEM", color="blue") + + # plt.xlabel("log₁₀ T [K]") + # plt.title("Initial vs Fitted DEM") + # plt.legend() + # plt.grid(True) + # if logscale: + # plt.yscale("log") + # plt.ylabel(ylabel) + # plt.tight_layout() + # plt.show() + + # print(f"[Plot] Peak Initial DEM (per-logT): {np.max(initial_vals):.2e}") + # print(f"[Plot] Peak Fitted DEM (per-logT): {np.max(best_vals):.2e}") # def plot_dem_fit(self, logscale=True): @@ -793,8 +905,6 @@ def plot_dem_fit(self, logscale=True): # initial_vals = self.initial_dem_logT.to_value(u.cm**-5) # best_vals = self.fitted_dem_logT.to_value(u.cm**-5) - - # plt.figure(figsize=(10, 5)) # plt.plot( From 3cb632a4a8ff1e08c10cd62268fe57e7a796be32 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:34:21 -0400 Subject: [PATCH 033/121] Tighten the first error block --- xrtpy/xrt_dem_iterative/dem_solver.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 13b39fc92..463a66fc3 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -97,20 +97,29 @@ def __init__( if observed_intensities is None or len(observed_intensities) == 0: raise ValueError("`observed_intensities` is required and cannot be empty.") + + if not np.all(np.isfinite(self._observed_intensities)): + raise ValueError("`observed_intensities` must be finite numbers.") # Errors if intensity_errors is not None: self._intensity_errors = np.asarray(intensity_errors, dtype=float) - if self._intensity_errors.shape != self.observed_intensities.shape: - raise ValueError( - "Length of intensity_errors must match observed_intensities." - ) + if self._intensity_errors.shape != self._observed_intensities.shape: + raise ValueError("Length of intensity_errors must match observed_intensities.") + if not np.all(np.isfinite(self._intensity_errors)) or np.any(self._intensity_errors < 0): + raise ValueError("`intensity_errors` must be finite and >= 0.") else: - self._intensity_errors = None # Will be computed later + self._intensity_errors = None # Will be computed later + + + # Store temperature grid parameters self._min_T = float(min_T) self._max_T = float(max_T) + if not (self._min_T < self._max_T): + raise ValueError("min_T must be < max_T.") + self._dT = float(dT) # Validate Monte Carlo setting From abc8dc8eec2c118212034c54537a6a56f58549e4 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:37:58 -0400 Subject: [PATCH 034/121] little snippet is just a guardrail to make sure your log-T grid is real and big enough --- xrtpy/xrt_dem_iterative/dem_solver.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 463a66fc3..52614c805 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -119,7 +119,9 @@ def __init__( self._max_T = float(max_T) if not (self._min_T < self._max_T): raise ValueError("min_T must be < max_T.") - + if n_pts < 4: + raise ValueError("Temperature grid must have at least 4 points.") + self._dT = float(dT) # Validate Monte Carlo setting From 2c854436fd95b58862ed6019772a870eac6b7c1c Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:39:37 -0400 Subject: [PATCH 035/121] Forgot to define - n_pts --- xrtpy/xrt_dem_iterative/dem_solver.py | 193 +++++++++++++++----------- 1 file changed, 115 insertions(+), 78 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 52614c805..8fc33ecbc 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -85,19 +85,20 @@ def __init__( {'Al-mesh', 'Al-poly', 'C-poly', 'Ti-poly', 'Be-thin', 'Be-med', 'Al-med', 'Al-thick', 'Be-thick', 'Al-poly/Al-mesh', 'Al-poly/Ti-poly', 'Al-poly/Al-thick', 'Al-poly/Be-thick'} """ - + # Validate and store filter names - if observed_channel is None or (hasattr(observed_channel, "__len__") and len(observed_channel) == 0): + if observed_channel is None or ( + hasattr(observed_channel, "__len__") and len(observed_channel) == 0 + ): raise ValueError("`observed_channel` is required and cannot be empty.") self.observed_channel = validate_and_format_filters(observed_channel) - # Store intensity and error arrays self._observed_intensities = np.asarray(observed_intensities, dtype=float) if observed_intensities is None or len(observed_intensities) == 0: raise ValueError("`observed_intensities` is required and cannot be empty.") - + if not np.all(np.isfinite(self._observed_intensities)): raise ValueError("`observed_intensities` must be finite numbers.") @@ -105,20 +106,23 @@ def __init__( if intensity_errors is not None: self._intensity_errors = np.asarray(intensity_errors, dtype=float) if self._intensity_errors.shape != self._observed_intensities.shape: - raise ValueError("Length of intensity_errors must match observed_intensities.") - if not np.all(np.isfinite(self._intensity_errors)) or np.any(self._intensity_errors < 0): + raise ValueError( + "Length of intensity_errors must match observed_intensities." + ) + if not np.all(np.isfinite(self._intensity_errors)) or np.any( + self._intensity_errors < 0 + ): raise ValueError("`intensity_errors` must be finite and >= 0.") else: - self._intensity_errors = None # Will be computed later - - - + self._intensity_errors = None # Will be computed later # Store temperature grid parameters self._min_T = float(min_T) self._max_T = float(max_T) if not (self._min_T < self._max_T): raise ValueError("min_T must be < max_T.") + + n_pts = int(np.floor((self._max_T - self._min_T) / dT + 1e-9)) + 1 if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") @@ -351,13 +355,12 @@ def create_logT_grid(self): self.T = (10**self.logT) * u.K self.dlogT = self._dT # dimensionless - convenience-27 self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals - + def _dem_per_log10T(self, dem_per_K): """Convert DEM per K → DEM per log10 T (cm^-5).""" return (np.log(10.0) * self.T) * dem_per_K - def _interpolate_responses_to_grid( self, ): # This mirrors what xrt_dem_iter_estim.pro does. @@ -464,49 +467,51 @@ def _estimate_initial_dem( with np.errstate(divide="ignore", invalid="ignore"): # First estimate DEM per-logT (cm^-5) - dem_logT = np.zeros(n_temps) #27 + dem_logT = np.zeros(n_temps) # 27 for i in range(n_filters): row = R[i, :] ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) # cm^-5 dem_logT += ratio - + dem_logT /= n_filters - + # DO NOT divide by self._dT here. # Optional smoothing in per-logT space: if smooth: from scipy.ndimage import gaussian_filter1d + dem_logT = gaussian_filter1d(dem_logT, sigma=1.0) # Store canonical DEM (per-logT) for fitting/forward model - self.initial_dem_logT = dem_logT * (u.cm**-5) ################################## - #self.initial_dem_logT = (np.log(10.0) * self.T) * self.dem_initial * u.cm**-5 # (N,), per-log10T #28 + self.initial_dem_logT = dem_logT * ( + u.cm**-5 + ) ################################## + # self.initial_dem_logT = (np.log(10.0) * self.T) * self.dem_initial * u.cm**-5 # (N,), per-log10T #28 # For plotting PER-K if you want that axis: - dem_perK = (dem_logT / (np.log(10.0) * self.T.to_value(u.K))) # cm^-5 K^-1 + dem_perK = dem_logT / (np.log(10.0) * self.T.to_value(u.K)) # cm^-5 K^-1 self.initial_dem = dem_perK * (u.cm**-5 / u.K) - - #self.dem_initial = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1), np.ndarray or Quantity - - - #estimates = np.zeros(n_temps) - # for i in range(n_filters): - # row = R[i, :] - # ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) - # estimates += ratio - # estimates /= n_filters - # estimates /= self._dT # Convert to per-logT-bin definition - # assert estimates.shape[0] == len(self.logT) + # self.dem_initial = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1), np.ndarray or Quantity - # if logscale: - # # Suppress large dynamic range and spikes - # estimates = 10 ** np.log10(estimates + 1e-30) + # estimates = np.zeros(n_temps) + # for i in range(n_filters): + # row = R[i, :] + # ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) + # estimates += ratio + + # estimates /= n_filters + # estimates /= self._dT # Convert to per-logT-bin definition + # assert estimates.shape[0] == len(self.logT) - # if smooth: - # from scipy.ndimage import gaussian_filter1d + # if logscale: + # # Suppress large dynamic range and spikes + # estimates = 10 ** np.log10(estimates + 1e-30) - # estimates = gaussian_filter1d(estimates, sigma=1.0) + # if smooth: + # from scipy.ndimage import gaussian_filter1d + + # estimates = gaussian_filter1d(estimates, sigma=1.0) # estimates =dem_logT # # Apply units @@ -551,12 +556,22 @@ def _estimate_initial_dem( label_str = f"Initial DEM\n{filters_str}\n" # {self.date_obs}" if logscale: - plt.plot(self.logT, np.log10(self.initial_dem.value), drawstyle="steps-mid", - label=label_str, color="purple") + plt.plot( + self.logT, + np.log10(self.initial_dem.value), + drawstyle="steps-mid", + label=label_str, + color="purple", + ) ylabel = r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]" else: - plt.plot(self.logT, self.initial_dem.value, drawstyle="steps-mid", - label=label_str, color="purple") + plt.plot( + self.logT, + self.initial_dem.value, + drawstyle="steps-mid", + label=label_str, + color="purple", + ) plt.yscale("log") # if logscale: @@ -609,8 +624,8 @@ def _build_lmfit_parameters(self): # for i, val in enumerate(self.initial_dem): # # You could add bounds here if needed (e.g., min=0) # params.add(f"dem_{i}", value=val, min=0) - - #27 + + # 27 # for i, val in enumerate(self.initial_dem): # # Convert to float if it's a Quantity # if hasattr(val, "unit"): @@ -620,17 +635,16 @@ def _build_lmfit_parameters(self): # self.lmfit_params = params # print(f"Built {len(params)} lmfit parameters for DEM fit") - - if not hasattr(self, "initial_dem_logT"): - raise RuntimeError("Call _estimate_initial_dem() before building parameters.") + raise RuntimeError( + "Call _estimate_initial_dem() before building parameters." + ) params = Parameters() for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): params.add(f"dem_{i}", value=float(val), min=0.0) self.lmfit_params = params print(f"Built {len(params)} lmfit parameters for DEM fit") - # STEP 2: Build the residual function # This function computes how far off your DEM model’s predicted intensities are from your observed ones, normalized by the uncertainty. def _residuals(self, params): @@ -653,10 +667,9 @@ def _residuals(self, params): # DEM per-logT (cm^-5) dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) - # 2. Compute modeled intensities: I_model = R · DEM - #I_model = self.response_matrix @ dem_vector - I_model = self.response_matrix @ (dem_logT * self.dlogT) #27 + # I_model = self.response_matrix @ dem_vector + I_model = self.response_matrix @ (dem_logT * self.dlogT) # 27 # 3. Determine observational errors (user-provided or fallback) if self._intensity_errors is not None: @@ -692,7 +705,7 @@ def fit_dem(self): # if not hasattr(self, "lmfit_params"): # raise RuntimeError("Call _build_lmfit_parameters() before fitting.") - #27 + # 27 if not hasattr(self, "initial_dem_logT"): raise RuntimeError("Call _estimate_initial_dem() first.") params = Parameters() @@ -709,14 +722,18 @@ def fit_dem(self): ) self.result = result - - dem_best_logT = np.array([self.result.params[f"dem_{i}"].value for i in range(len(self.logT))]) - self.fitted_dem_logT = dem_best_logT * (u.cm**-5) #28 - self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * (u.cm**-5 / u.K) - - #self.dem_fit = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1) - #self.fitted_dem_logT = (np.log(10.0) * self.T) * self.dem_fit * u.cm**-5 # (N,), per-log10T + dem_best_logT = np.array( + [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] + ) + + self.fitted_dem_logT = dem_best_logT * (u.cm**-5) # 28 + self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * ( + u.cm**-5 / u.K + ) + + # self.dem_fit = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1) + # self.fitted_dem_logT = (np.log(10.0) * self.T) * self.dem_fit * u.cm**-5 # (N,), per-log10T if not result.success: print("[⚠️] DEM fit did not fully converge:") @@ -741,7 +758,7 @@ def print_residual_diagnostics(self, params): dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) I_model = self.response_matrix @ (dem_logT * self.dlogT) residuals = (I_model - self._observed_intensities) / self._intensity_errors - + print("Observed Intensities:", self._observed_intensities) print("Modeled Intensities:", I_model) print("Errors:", self._intensity_errors) @@ -750,11 +767,10 @@ def print_residual_diagnostics(self, params): f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" ) - def plot_dem_fit( self, logscale: bool = True, - scale: str = "per_K", # "per_K" or "per_log10T" + scale: str = "per_K", # "per_K" or "per_log10T" show_initial: bool = True, ax=None, title: str | None = None, @@ -776,11 +792,17 @@ def plot_dem_fit( # Grid logT = self.logT - T = 10.0 ** logT + T = 10.0**logT # --- find fitted DEM (prefer canonical names; fall back to legacy) --- - linear_candidates = ["dem_fit", "dem", "fitted_dem", "dem_solved", "dem_solution"] - log_candidates = ["logdem_fit", "logdem", "fitted_logdem", "logdem_solved"] + linear_candidates = [ + "dem_fit", + "dem", + "fitted_dem", + "dem_solved", + "dem_solution", + ] + log_candidates = ["logdem_fit", "logdem", "fitted_logdem", "logdem_solved"] dem_fit_perK = None for name in linear_candidates: @@ -802,7 +824,7 @@ def plot_dem_fit( dem_init_perK = None if show_initial: init_linear_candidates = ["initial_dem", "dem_initial"] - init_log_candidates = ["initial_logdem", "logdem_initial"] + init_log_candidates = ["initial_logdem", "logdem_initial"] for name in init_linear_candidates: if hasattr(self, name) and getattr(self, name) is not None: dem_init_perK = np.asarray(getattr(self, name), dtype=float) @@ -810,19 +832,33 @@ def plot_dem_fit( if dem_init_perK is None: for name in init_log_candidates: if hasattr(self, name) and getattr(self, name) is not None: - dem_init_perK = 10.0 ** np.asarray(getattr(self, name), dtype=float) + dem_init_perK = 10.0 ** np.asarray( + getattr(self, name), dtype=float + ) break # --- choose scientific scale --- if scale == "per_K": - y_fit_linear = np.clip(dem_fit_perK, np.finfo(float).tiny, None) - y_init_linear = None if dem_init_perK is None else np.clip(dem_init_perK, np.finfo(float).tiny, None) - y_label_lin = r"DEM per K [cm$^{-5}$ K$^{-1}$]" + y_fit_linear = np.clip(dem_fit_perK, np.finfo(float).tiny, None) + y_init_linear = ( + None + if dem_init_perK is None + else np.clip(dem_init_perK, np.finfo(float).tiny, None) + ) + y_label_lin = r"DEM per K [cm$^{-5}$ K$^{-1}$]" y_label_log10 = r"$\log_{10}$ DEM per K [cm$^{-5}$ K$^{-1}$]" elif scale == "per_log10T": - y_fit_linear = np.clip(dem_fit_perK * T * np.log(10.0), np.finfo(float).tiny, None) - y_init_linear = None if dem_init_perK is None else np.clip(dem_init_perK * T * np.log(10.0), np.finfo(float).tiny, None) - y_label_lin = r"DEM per $\log_{10}T$ [cm$^{-5}$]" + y_fit_linear = np.clip( + dem_fit_perK * T * np.log(10.0), np.finfo(float).tiny, None + ) + y_init_linear = ( + None + if dem_init_perK is None + else np.clip( + dem_init_perK * T * np.log(10.0), np.finfo(float).tiny, None + ) + ) + y_label_lin = r"DEM per $\log_{10}T$ [cm$^{-5}$]" y_label_log10 = r"$\log_{10}$ DEM per $\log_{10}T$ [cm$^{-5}$]" else: raise ValueError("scale must be 'per_K' or 'per_log10T'") @@ -836,15 +872,18 @@ def plot_dem_fit( else: ax.plot(logT, np.log10(y_fit_linear), label="Fitted DEM") if y_init_linear is not None: - ax.plot(logT, np.log10(y_init_linear), "--", alpha=0.7, label="Initial DEM") + ax.plot( + logT, np.log10(y_init_linear), "--", alpha=0.7, label="Initial DEM" + ) ax.set_ylabel(y_label_log10) ax.set_xlabel(r"$\log_{10} T$ [K]") - ax.set_title(title or ("Initial vs Fitted DEM" if show_initial else "Fitted DEM")) + ax.set_title( + title or ("Initial vs Fitted DEM" if show_initial else "Fitted DEM") + ) ax.legend() ax.grid(True, alpha=0.3) - # def plot_dem_fit(self, logscale=True): # import matplotlib.pyplot as plt # # if not hasattr(self, "initial_dem_logT"): @@ -852,16 +891,15 @@ def plot_dem_fit( # # if not hasattr(self, "result"): # # raise RuntimeError("DEM fit result not available. Run fit_dem() first.") - # # --- grid sanity --- # if not hasattr(self, "logT"): # raise RuntimeError("Temperature grid missing. Run create_logT_grid() first.") # # ensure we have linear T for conversion if needed # if not hasattr(self, "T"): # self.T = np.power(10.0, self.logT) - + # ln10T = np.log(10.0) * self.T - + # # Choose per-logT plotting (recommended for XRT) # initial_vals = self.initial_dem_logT.to_value(u.cm**-5) # best_vals = self.fitted_dem_logT.to_value(u.cm**-5) @@ -884,7 +922,6 @@ def plot_dem_fit( # print(f"[Plot] Peak Initial DEM (per-logT): {np.max(initial_vals):.2e}") # print(f"[Plot] Peak Fitted DEM (per-logT): {np.max(best_vals):.2e}") - # def plot_dem_fit(self, logscale=True): # """ # Plots the initial and fitted DEM on the same logT grid. From f1df9881c63f0b4f432545f7539526f2d00802a3 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:42:10 -0400 Subject: [PATCH 036/121] Redundant error check block at the end --- xrtpy/xrt_dem_iterative/dem_solver.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 8fc33ecbc..a6011aaaf 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -193,16 +193,6 @@ def __init__( self._min_error = float(min_error) self._relative_error = float(relative_error) - # Validate and store intensity errors - if intensity_errors is not None: - self._intensity_errors = np.asarray(intensity_errors, dtype=float) - if self._intensity_errors.shape != self._observed_intensities.shape: - raise ValueError( - "Length of intensity_errors must match observed_intensities." - ) - else: - self._intensity_errors = None - try: self._solv_factor = float(solv_factor) if self._solv_factor <= 0: From 810a5c135f5c2de2083e540501e9137e129fad74 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:47:04 -0400 Subject: [PATCH 037/121] Minnor clean up --- xrtpy/xrt_dem_iterative/dem_solver.py | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index a6011aaaf..25a152898 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -59,17 +59,6 @@ def __init__( solv_factor=1e21, ): """ - Args: - observed_channel (_type_): _description_ - observed_intensities (_type_): _description_ - temperature_responses (_type_): _description_ - intensity_errors (_type_, optional): _description_. Defaults to None. - min_T (float, optional): _description_. Defaults to 5.5. - max_T (float, optional): _description_. Defaults to 8.0. - dT (float, optional): _description_. Defaults to 0.1. - min_error (float, optional): _description_. Defaults to 2.0. - relative_error (float, optional): _description_. Defaults to 0.03. - Notes ----- - All input lists (`observed_channel`, `observed_intensities`, and `temperature_responses`) @@ -93,11 +82,11 @@ def __init__( raise ValueError("`observed_channel` is required and cannot be empty.") self.observed_channel = validate_and_format_filters(observed_channel) - # Store intensity and error arrays - self._observed_intensities = np.asarray(observed_intensities, dtype=float) - + # Store intensity and error arrays if observed_intensities is None or len(observed_intensities) == 0: raise ValueError("`observed_intensities` is required and cannot be empty.") + self._observed_intensities = np.asarray(observed_intensities, dtype=float) + if not np.all(np.isfinite(self._observed_intensities)): raise ValueError("`observed_intensities` must be finite numbers.") @@ -117,6 +106,7 @@ def __init__( self._intensity_errors = None # Will be computed later # Store temperature grid parameters + self._dT = float(dT) self._min_T = float(min_T) self._max_T = float(max_T) if not (self._min_T < self._max_T): @@ -126,7 +116,7 @@ def __init__( if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") - self._dT = float(dT) + # Validate Monte Carlo setting if isinstance(monte_carlo_runs, bool): @@ -166,7 +156,7 @@ def __init__( # Validate that the temperature grid falls within the responses for r in self.responses: - logT_grid = np.log10(r.temperature.value) + logT_grid = np.log10(r.temperature.to_value(u.K)) if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): raise ValueError( f"The specified temperature range [{min_T}, {max_T}] is outside the bounds of one or more filter response grids.\n" From d7fe72977fb2d3fc6274459d89a46e4dfe4a0d8b Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:51:55 -0400 Subject: [PATCH 038/121] Clean up on docstrings --- xrtpy/xrt_dem_iterative/dem_solver.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 25a152898..07d9e7996 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -191,7 +191,10 @@ def __init__( raise ValueError(f"Invalid solv_factor: {e}") def __repr__(self): - return f"" + return ( + f"" + ) # @property #Removed if not used # def name(self) -> str: @@ -211,28 +214,28 @@ def observed_intensities( Returns ------- `~astropy.units.Quantity` - Intensities in DN/s for each filter channel. + Intensities in DN/s/pix for each filter channel. """ return self._observed_intensities * (u.DN / u.s) @property def filter_names(self): """ - Returns a list of filter names from the temperature responses. + List of filter names from the temperature responses. """ return [r.filter_name for r in self.responses] @property def response_temperatures(self): """ - Returns a list of temperature grids (K) for each filter response. + List of temperature grids (K) for each filter response. """ return [r.temperature for r in self.responses] @property def response_values(self): """ - Returns a list of response values (DN cm^5 / pix / s) for each filter. + List of response values (DN cm^5 / pix / s) for each filter. """ return [r.response for r in self.responses] @@ -267,7 +270,7 @@ def min_error(self): @property def relative_error(self): """ - Relative error (%) used to scale intensity if error is not provided. + Relative error used to scale intensity if error is not provided. """ return self._relative_error From 2f0ec9a747e88d6422ced89e4706b945b7e9cc9c Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:55:10 -0400 Subject: [PATCH 039/121] Trigger the warning only once, store the result --- xrtpy/xrt_dem_iterative/dem_solver.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 07d9e7996..97a09ab01 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -293,13 +293,15 @@ def intensity_errors(self) -> u.Quantity: if self._intensity_errors is not None: return self._intensity_errors * (u.DN / u.s) - warnings.warn( - "No intensity_errors provided. Using default model: " - f"max(relative_error * observed_intensity, min_error)\n" - f"=> relative_error = {self.relative_error}, min_error = {self.min_error} DN/s\n" - "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", - UserWarning, - ) + if self._using_estimated_errors: + warnings.warn( + "No intensity_errors provided. Using default model: " + f"max(relative_error * observed_intensity, min_error)\n" + f"=> relative_error = {self.relative_error}, min_error = {self.min_error} DN/s\n" + "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", + UserWarning, + ) + self._using_estimated_errors = False # suppress future warnings estimated = np.maximum( self.relative_error * self._observed_intensities, From aa56f80cfdc5a10423e9069453979557405ae9df Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 16:58:34 -0400 Subject: [PATCH 040/121] Clean up on docstrings --- xrtpy/xrt_dem_iterative/dem_solver.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 97a09ab01..2614bf80a 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -312,22 +312,26 @@ def intensity_errors(self) -> u.Quantity: @property def monte_carlo_runs(self) -> int: """ - Number of Monte Carlo runs to perform (0 = disabled). + Return + ------ + int + Number of Monte Carlo runs to perform (0 = disabled). """ return self._monte_carlo_runs @property def solv_factor(self): """ - Normalization factor used during DEM fitting to stabilize the solver. - Default is 1e21. + Scaling factor used during DEM optimization to stabilize the spline fit. + Corresponds to `solv_factor` in IDL (typically 1e21). """ return self._solv_factor @property def max_iterations(self): """ - Maximum number of iterations used in the least-squares DEM solver. + Maximum number of iterations allowed in the least-squares DEM solver + (e.g., when using `lmfit.minimize`). Default is 2000. """ return self._max_iterations From cf2ee0aaf9627fa8e7d6ef5ccc69d5c6feb0c8ee Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 17:04:12 -0400 Subject: [PATCH 041/121] Clean up text --- xrtpy/xrt_dem_iterative/dem_solver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 2614bf80a..9953a0f57 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -347,8 +347,8 @@ def create_logT_grid(self): def _dem_per_log10T(self, dem_per_K): """Convert DEM per K → DEM per log10 T (cm^-5).""" - - return (np.log(10.0) * self.T) * dem_per_K + #return (np.log(10.0) * self.T) * dem_per_K + return np.log(10.0) * self.T.to_value(u.K) * dem_per_K def _interpolate_responses_to_grid( self, From e43df5a47774e124eb69203051aeba412ff42066 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 18:29:43 -0400 Subject: [PATCH 042/121] Updating create_logT_grid --- xrtpy/xrt_dem_iterative/dem_solver.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 9953a0f57..0f85f4e6c 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -340,11 +340,20 @@ def create_logT_grid(self): Build the DEM temperature grid *exactly* from min to max in steps of dT. """ n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 - self.logT = np.linspace(self._min_T, self._max_T, n_bins) - self.T = (10**self.logT) * u.K - self.dlogT = self._dT # dimensionless - convenience-27 - self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals + #self.logT = np.linspace(self._min_T, self._max_T, n_bins) #28 + #self.T = (10**self.logT) * u.K #28 + #self.dlogT = self._dT # dimensionless - convenience-27 + #self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals + + # inclusive grid with float-safe endpoint + self.logT = np.arange(self._min_T, self._max_T + self._dT/2.0, self._dT) + self.T = (10.0 ** self.logT) * u.K + + # scalar spacing (dimensionless) + self.dlogT = float(self._dT) + self.dlnT = np.log(10.0) * self.dlogT # for IDL-style ∫ DEM(T) * R(T) * T dlnT + def _dem_per_log10T(self, dem_per_K): """Convert DEM per K → DEM per log10 T (cm^-5).""" #return (np.log(10.0) * self.T) * dem_per_K From 234b9a8713b273572ef96aa107bbde0c3151c520 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 18:30:52 -0400 Subject: [PATCH 043/121] Applying black --- xrtpy/xrt_dem_iterative/dem_solver.py | 28 +++++++++++++-------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 0f85f4e6c..0091f0432 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -82,12 +82,11 @@ def __init__( raise ValueError("`observed_channel` is required and cannot be empty.") self.observed_channel = validate_and_format_filters(observed_channel) - # Store intensity and error arrays + # Store intensity and error arrays if observed_intensities is None or len(observed_intensities) == 0: raise ValueError("`observed_intensities` is required and cannot be empty.") self._observed_intensities = np.asarray(observed_intensities, dtype=float) - if not np.all(np.isfinite(self._observed_intensities)): raise ValueError("`observed_intensities` must be finite numbers.") @@ -111,13 +110,11 @@ def __init__( self._max_T = float(max_T) if not (self._min_T < self._max_T): raise ValueError("min_T must be < max_T.") - + n_pts = int(np.floor((self._max_T - self._min_T) / dT + 1e-9)) + 1 if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") - - # Validate Monte Carlo setting if isinstance(monte_carlo_runs, bool): raise ValueError( @@ -340,23 +337,24 @@ def create_logT_grid(self): Build the DEM temperature grid *exactly* from min to max in steps of dT. """ n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 - #self.logT = np.linspace(self._min_T, self._max_T, n_bins) #28 - #self.T = (10**self.logT) * u.K #28 - #self.dlogT = self._dT # dimensionless - convenience-27 - #self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals - + # self.logT = np.linspace(self._min_T, self._max_T, n_bins) #28 + # self.T = (10**self.logT) * u.K #28 + # self.dlogT = self._dT # dimensionless - convenience-27 + # self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals + # inclusive grid with float-safe endpoint - self.logT = np.arange(self._min_T, self._max_T + self._dT/2.0, self._dT) - self.T = (10.0 ** self.logT) * u.K + self.logT = np.arange(self._min_T, self._max_T + self._dT / 2.0, self._dT) + self.T = (10.0**self.logT) * u.K # scalar spacing (dimensionless) self.dlogT = float(self._dT) - self.dlnT = np.log(10.0) * self.dlogT # for IDL-style ∫ DEM(T) * R(T) * T dlnT + self.dlnT = ( + np.log(10.0) * self.dlogT + ) # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” - def _dem_per_log10T(self, dem_per_K): """Convert DEM per K → DEM per log10 T (cm^-5).""" - #return (np.log(10.0) * self.T) * dem_per_K + # return (np.log(10.0) * self.T) * dem_per_K return np.log(10.0) * self.T.to_value(u.K) * dem_per_K def _interpolate_responses_to_grid( From d549f0bc09a71a495ad286e68eceb461eb219254 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 18:46:24 -0400 Subject: [PATCH 044/121] Updated _interpolate_responses_to_grid --- xrtpy/xrt_dem_iterative/dem_solver.py | 57 ++++++++++++++++----------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 0091f0432..1831c8818 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -352,36 +352,49 @@ def create_logT_grid(self): np.log(10.0) * self.dlogT ) # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” - def _dem_per_log10T(self, dem_per_K): - """Convert DEM per K → DEM per log10 T (cm^-5).""" - # return (np.log(10.0) * self.T) * dem_per_K - return np.log(10.0) * self.T.to_value(u.K) * dem_per_K - def _interpolate_responses_to_grid( - self, - ): # This mirrors what xrt_dem_iter_estim.pro does. + def _interpolate_responses_to_grid(self): """ - Interpolates each filter's temperature response onto the DEM temperature grid (self.logT). + Interpolate each filter's temperature response onto self.logT (log10 K). + Stores a dense matrix with shape (n_filters, n_temperatures), unitless numeric. """ - self.interpolated_responses = [] + if not hasattr(self, "logT"): + raise AttributeError("Temperature grid missing. Call create_logT_grid() first.") - for i, (T_orig, R_orig, fname) in enumerate( - zip(self.response_temperatures, self.response_values, self.filter_names) - ): + rows = [] + for i, (T_orig, R_orig, fname) in enumerate(zip(self.response_temperatures, self.response_values, self.filter_names)): logT_orig = np.log10(T_orig.to_value(u.K)) - response_vals = R_orig.to_value(u.DN * u.cm**5 / (u.pix * u.s)) + #response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) + response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) + + #Remove later + print(f"→ Channel {i}: {fname}") + print(f" logT_orig.shape = {logT_orig.shape}, response_vals.shape = {response_vals.shape}") + print(f" logT range: {logT_orig.min():.2f}–{logT_orig.max():.2f}, grid: {self.logT.min():.2f}–{self.logT.max():.2f}") + + try: + interp_func = interp1d( + logT_orig, + response_vals, + kind="linear", + bounds_error=False, + fill_value=0.0, + assume_sorted=True, + ) + interp_row = interp_func(self.logT) + print(f" Interpolated length: {len(interp_row)}") + rows.append(interp_row) + except Exception as e: + print(f" Interpolation failed: {e}") + raise - interp_func = interp1d( - logT_orig, - response_vals, - kind="linear", - bounds_error=False, - fill_value=0.0, - ) # kind = 'cubic' ) kind="linear", + self.interpolated_responses = rows + self._response_matrix = np.vstack(rows) - R_interp = interp_func(self.logT) - self.interpolated_responses.append(R_interp) + if self._response_matrix.shape != (len(self.responses), self.logT.size): + raise RuntimeError("Interpolated response matrix has unexpected shape.") + @property def response_matrix(self): """ From 4f3b4a2284440093fba19badd0cde8d675950c0b Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 19:26:02 -0400 Subject: [PATCH 045/121] Removing code where it is used twice for the same reason --- xrtpy/xrt_dem_iterative/dem_solver.py | 68 ++++++++++----------------- 1 file changed, 25 insertions(+), 43 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 1831c8818..5a578265e 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -404,7 +404,7 @@ def response_matrix(self): """ if not hasattr(self, "_response_matrix"): raise AttributeError( - "Response matrix has not been built yet. Call _build_response_matrix()." + "Response matrix has not been built . Call _build_response_matrix() first." ) return self._response_matrix @@ -433,7 +433,7 @@ def _build_response_matrix(self): # self._response_matrix = np.vstack(self.interpolated_responses) # matrix self._response_matrix = np.vstack(self.interpolated_responses).astype( float - ) # matrix + ) # matrix - this is what IDL does when stacking per-filter response vectors. print( f"Built response matrix: shape = {self._response_matrix.shape} (filters * logT bins)" @@ -479,11 +479,13 @@ def _estimate_initial_dem( dem_logT = np.zeros(n_temps) # 27 for i in range(n_filters): row = R[i, :] - ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) # cm^-5 + ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) # cm^-5 first estimate for the DEM dem_logT += ratio - dem_logT /= n_filters - + #IDL - for i=0,n_channels-1 do dem += obs_val[i] / response[i,*] + #IDL - dem = dem / n_channels + + # DO NOT divide by self._dT here. # Optional smoothing in per-logT space: if smooth: @@ -501,34 +503,6 @@ def _estimate_initial_dem( dem_perK = dem_logT / (np.log(10.0) * self.T.to_value(u.K)) # cm^-5 K^-1 self.initial_dem = dem_perK * (u.cm**-5 / u.K) - # self.dem_initial = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1), np.ndarray or Quantity - - # estimates = np.zeros(n_temps) - # for i in range(n_filters): - # row = R[i, :] - # ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) - # estimates += ratio - - # estimates /= n_filters - # estimates /= self._dT # Convert to per-logT-bin definition - # assert estimates.shape[0] == len(self.logT) - - # if logscale: - # # Suppress large dynamic range and spikes - # estimates = 10 ** np.log10(estimates + 1e-30) - - # if smooth: - # from scipy.ndimage import gaussian_filter1d - - # estimates = gaussian_filter1d(estimates, sigma=1.0) - - # estimates =dem_logT - # # Apply units - # self.initial_dem = estimates * (u.cm**-5 / u.K) - # print(" Max:", np.max(estimates)) - # print(" Min:", np.min(estimates)) - # print(" dT (logT bin size):", self._dT) - print(" Max DEM_per_logT:", np.max(dem_logT)) print(" Min DEM_per_logT:", np.min(dem_logT)) print(" dlog10T:", self.dlogT) @@ -613,6 +587,7 @@ def _estimate_initial_dem( print("Initial DEM estimate complete") + # STEP 1 - Each temperature bin gets its own parameter, initialized with your initial DEM estimate def _build_lmfit_parameters(self): """ @@ -652,6 +627,7 @@ def _build_lmfit_parameters(self): for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): params.add(f"dem_{i}", value=float(val), min=0.0) self.lmfit_params = params + print(f"Built {len(params)} lmfit parameters for DEM fit") # STEP 2: Build the residual function @@ -681,12 +657,13 @@ def _residuals(self, params): I_model = self.response_matrix @ (dem_logT * self.dlogT) # 27 # 3. Determine observational errors (user-provided or fallback) - if self._intensity_errors is not None: - errors = np.array(self._intensity_errors) - else: - errors = np.maximum( - self.min_error, self.relative_error * self._observed_intensities - ) + errors = self.intensity_errors.to_value(u.DN / u.s) + # if self._intensity_errors is not None: + # errors = np.array(self._intensity_errors) + # else: + # errors = np.maximum( + # self.min_error, self.relative_error * self._observed_intensities + # ) # 4. Return normalized residuals residuals = (I_model - self._observed_intensities) / errors @@ -717,11 +694,16 @@ def fit_dem(self): # 27 if not hasattr(self, "initial_dem_logT"): raise RuntimeError("Call _estimate_initial_dem() first.") - params = Parameters() - for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): - params.add(f"dem_{i}", value=float(val), min=0.0) - self.lmfit_params = params + if not hasattr(self, "lmfit_params"): + self._build_lmfit_parameters() + + #Mimght not need- already using in _build_lmfit_parameters(). + # params = Parameters() + # for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): + # params.add(f"dem_{i}", value=float(val), min=0.0) + # self.lmfit_params = params + print("Starting DEM optimization..") result = minimize( self._residuals, From 3f8f58932e99c4855313dc120802ded550351895 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 28 Aug 2025 19:33:48 -0400 Subject: [PATCH 046/121] Cleaning up some function logic; --- xrtpy/xrt_dem_iterative/dem_solver.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 5a578265e..69e5f941d 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -742,17 +742,21 @@ def fit_dem(self): ) print(f" → Total iterations: {result.nfev}") + if return_dem: + return self.fitted_dem return result + def print_residual_diagnostics(self, params): dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) I_model = self.response_matrix @ (dem_logT * self.dlogT) - residuals = (I_model - self._observed_intensities) / self._intensity_errors + errors = self.intensity_errors.to_value(u.DN / u.s) + residuals = (I_model - self._observed_intensities) / errors print("Observed Intensities:", self._observed_intensities) print("Modeled Intensities:", I_model) - print("Errors:", self._intensity_errors) + print("Errors:", errors) print("Residuals:", residuals) print( f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" From f1a310ac3f3cc981ce1fafad4f82ef1a6420bf58 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 14:20:29 -0400 Subject: [PATCH 047/121] Adding ComputeDEMStatistics --- xrtpy/xrt_dem_iterative/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index 6f19ff5a8..0f6b9de2d 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -4,4 +4,8 @@ from .dem_solver import XRTDEMIterative -__all__ = ["XRTDEMIterative"] +__all__ = [ + "XRTDEMIterative", + "MonteCarloIteration", + "ComputeDEMStatistics" + ] From e2dd4ef1e75b9273e7e059ccbafaa1956d782c06 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:33:54 -0400 Subject: [PATCH 048/121] Script for DEM Stats --- xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 xrtpy/xrt_dem_iterative/xrt_dem_statistics.py diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py new file mode 100644 index 000000000..283df03c3 --- /dev/null +++ b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py @@ -0,0 +1,27 @@ +__all__ = [ + "ComputeDEMStatistics", +] + +import numpy as np + + +class ComputeDEMStatistics: + """ + Diagnostic class for computing residual statistics from a fitted DEM solution. + + This class provides utilities to: + Compute chi-squared and reduced chi-squared between observed and modeled intensities. + Print residual diagnostics per filter (similar to IDL's xrt_iter_demstat.pro). + + Methods + ------- + compute_chi_squared() + Compute total and reduced chi-squared for DEM fit. + + print_residuals() + Print modeled vs. observed intensities and residuals (normalized by error). + """ + + def __init__(self, dem_solver): + self.dem_solver = dem_solver + From 3ca6ec36235699c32e4062a3b461bec09a5ec8f8 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:34:26 -0400 Subject: [PATCH 049/121] Adding ComputeDEMStatistics --- xrtpy/xrt_dem_iterative/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index 0f6b9de2d..f6650a508 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -9,3 +9,4 @@ "MonteCarloIteration", "ComputeDEMStatistics" ] + From 2a398440740aab53f5fefacfda991d12e132f0bd Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:39:25 -0400 Subject: [PATCH 050/121] Removing unsed code --- xrtpy/xrt_dem_iterative/dem_solver.py | 184 +++++++----------- xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 50 +++++ 2 files changed, 115 insertions(+), 119 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 69e5f941d..bb8d42aa5 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -11,6 +11,11 @@ from xrtpy.util.filters import validate_and_format_filters +#DEM specific +from xrtpy.xrt_dem_iterative.monte_carlo_iteration import MonteCarloIteration +from xrtpy.xrt_dem_iterative.xrt_dem_statistics import ComputeDEMStatistics + + class XRTDEMIterative: """ @@ -668,7 +673,7 @@ def _residuals(self, params): # 4. Return normalized residuals residuals = (I_model - self._observed_intensities) / errors print( - "[•] Residuals stats → mean: {:.2e}, std: {:.2e}".format( + "[-Residuals stats > mean: {:.2e}, std: {:.2e}".format( np.mean(residuals), np.std(residuals) ) ) @@ -727,23 +732,21 @@ def fit_dem(self): # self.fitted_dem_logT = (np.log(10.0) * self.T) * self.dem_fit * u.cm**-5 # (N,), per-log10T if not result.success: - print("[⚠️] DEM fit did not fully converge:") - print(" →", result.message) + print(" DEM fit did not fully converge:") + print(" >", result.message) # self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) print( - f"[✓] DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" + f" DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" ) - print("[✓] DEM fit complete") + print(" DEM fit complete") print( f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" ) print(f" → Total iterations: {result.nfev}") - if return_dem: - return self.fitted_dem return result @@ -759,7 +762,7 @@ def print_residual_diagnostics(self, params): print("Errors:", errors) print("Residuals:", residuals) print( - f"[•] Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" + f"Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" ) def plot_dem_fit( @@ -769,15 +772,11 @@ def plot_dem_fit( show_initial: bool = True, ax=None, title: str | None = None, + show: bool = True ): """ Plot the fitted DEM (and optional initial DEM) using a consistent scale. - logscale=True -> semilogy of linear values - logscale=False -> linear plot of log10(values) - - scale="per_K" -> DEM [cm^-5 K^-1] - scale="per_log10T" -> phi = DEM * T * ln(10) [cm^-5] """ import numpy as np import matplotlib.pyplot as plt @@ -860,15 +859,15 @@ def plot_dem_fit( # --- plot without double-logging --- if logscale: - ax.semilogy(logT, y_fit_linear, label="Fitted DEM") + ax.semilogy(logT, y_fit_linear, linestyle='-', label="Fitted DEM") if y_init_linear is not None: - ax.semilogy(logT, y_init_linear, "--", alpha=0.7, label="Initial DEM") + ax.semilogy(logT, y_init_linear,linestyle='-', alpha=0.7, label="Initial DEM") ax.set_ylabel(y_label_lin) else: - ax.plot(logT, np.log10(y_fit_linear), label="Fitted DEM") + ax.plot(logT, np.log10(y_fit_linear),linestyle='-', label="Fitted DEM") if y_init_linear is not None: ax.plot( - logT, np.log10(y_init_linear), "--", alpha=0.7, label="Initial DEM" + logT, np.log10(y_init_linear), linestyle='-', alpha=0.7, label="Initial DEM" ) ax.set_ylabel(y_label_log10) @@ -878,110 +877,57 @@ def plot_dem_fit( ) ax.legend() ax.grid(True, alpha=0.3) + return ax + + + def solve(self): + print("[•] Running DEM fit...") + + # Build spline parameters + self._build_lmfit_parameters() + + # Perform the fit + result = minimize( + self._residuals, + self.lmfit_params, + method="least_squares", + max_nfev=self.max_iterations, + ) + + self.result = result + + # Extract fitted DEM from lmfit parameters + dem_best_logT = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) + self.fitted_dem_logT = dem_best_logT * u.cm**-5 + self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * u.cm**-5 / u.K + + if not result.success: + print("[⚠️] DEM fit did not fully converge:") + print(" →", result.message) + + print("[✓] DEM fit complete") + print(f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") + print(f" → Total iterations: {result.nfev}") + + + # Automatically run MC if enabled + if self.monte_carlo_runs > 0: + print(f"[•] Running Monte Carlo with {self.monte_carlo_runs} trials...") + from .monte_carlo_iteration import MonteCarloIteration + mc = MonteCarloIteration(self) + mc.run_mc_simulation(n_draws=self.monte_carlo_runs) + self.mc_results = mc + self.mc_stats = mc.mc_stats + + # #Stats + # self.dem_stats = ComputeDEMStatistics(self) + # chi2, chi2_red = self.dem_stats.compute_chi_squared() + # print(f"[χ²] Total χ²: {chi2:.2f}, Reduced χ²: {chi2_red:.2f}") + # self.dem_stats.print_residuals() + + return result - # def plot_dem_fit(self, logscale=True): - # import matplotlib.pyplot as plt - # # if not hasattr(self, "initial_dem_logT"): - # # raise RuntimeError("Initial DEM not computed. Run _estimate_initial_dem() first.") - # # if not hasattr(self, "result"): - # # raise RuntimeError("DEM fit result not available. Run fit_dem() first.") - - # # --- grid sanity --- - # if not hasattr(self, "logT"): - # raise RuntimeError("Temperature grid missing. Run create_logT_grid() first.") - # # ensure we have linear T for conversion if needed - # if not hasattr(self, "T"): - # self.T = np.power(10.0, self.logT) - - # ln10T = np.log(10.0) * self.T - - # # Choose per-logT plotting (recommended for XRT) - # initial_vals = self.initial_dem_logT.to_value(u.cm**-5) - # best_vals = self.fitted_dem_logT.to_value(u.cm**-5) - # ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" - - # plt.figure(figsize=(10, 5)) - # plt.plot(self.logT, initial_vals, drawstyle="steps-mid", label="Initial DEM", linestyle="--", color="gray") - # plt.plot(self.logT, best_vals, drawstyle="steps-mid", label="Fitted DEM", color="blue") - - # plt.xlabel("log₁₀ T [K]") - # plt.title("Initial vs Fitted DEM") - # plt.legend() - # plt.grid(True) - # if logscale: - # plt.yscale("log") - # plt.ylabel(ylabel) - # plt.tight_layout() - # plt.show() - - # print(f"[Plot] Peak Initial DEM (per-logT): {np.max(initial_vals):.2e}") - # print(f"[Plot] Peak Fitted DEM (per-logT): {np.max(best_vals):.2e}") - - # def plot_dem_fit(self, logscale=True): - # """ - # Plots the initial and fitted DEM on the same logT grid. - # Parameters - # ---------- - # logscale : bool - # If True, uses a logarithmic y-axis. - # """ - # import matplotlib.pyplot as plt - - # if not hasattr(self, "initial_dem"): - # raise RuntimeError( - # "Initial DEM not computed. Run _estimate_initial_dem() first." - # ) - # if not hasattr(self, "result"): - # raise RuntimeError("DEM fit result not available. Run fit_dem() first.") - - # # Extract best-fit DEM from lmfit result - # # best_fit_vals = np.array( - # # [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] - # # ) - # # initial_dem_vals = ( - # # self.initial_dem.value - # # if hasattr(self.initial_dem, "value") - # # else self.initial_dem - # # ) - # # log_initial_dem_vals = np.log10(self.initial_dem.value) if hasattr( np.log10(self.initial_dem), "value") else np.log10(self.initial_dem) - - # initial_vals = self.initial_dem_logT.to_value(u.cm**-5) - # best_vals = self.fitted_dem_logT.to_value(u.cm**-5) - - # plt.figure(figsize=(10, 5)) - # plt.plot( - # self.logT, - # initial_dem_vals, - # drawstyle="steps-mid", - # label="Initial DEM", - # linestyle="--", - # color="gray", - # ) - # plt.plot( - # self.logT, - # best_fit_vals, - # drawstyle="steps-mid", - # label="Fitted DEM", - # color="blue", - # ) - - # plt.xlabel("log₁₀ T [K]") - # # plt.ylabel("DEM [cm⁻⁵ K⁻¹]") - # ylabel = r"DEM per $\log_{10}T$ [cm$^{-5}$]" - # plt.title("Initial vs Fitted DEM") - # plt.legend() - # plt.grid(True) - # if logscale: - # plt.yscale("log") - # plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$] (log-scaled)") - # else: - # plt.ylabel(r"DEM [cm$^{-5}$ K$^{-1}$]") - # plt.tight_layout() - # plt.show() - - # print(f"[Plot] Peak Initial DEM: {np.max(initial_dem_vals):.2e}") - # print(f"[Plot] Peak Fitted DEM: {np.max(best_fit_vals):.2e}") def summary(self): print("XRTpy DEM Iterative Setup Summary") diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py index 283df03c3..aef921b03 100644 --- a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py +++ b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py @@ -25,3 +25,53 @@ class ComputeDEMStatistics: def __init__(self, dem_solver): self.dem_solver = dem_solver + def compute_chi_squared(self): + """ + Compute chi-squared and reduced chi-squared between observed and modeled intensities. + + Returns + ------- + chi2 : float + Total chi-squared value. + chi2_red : float or None + Reduced chi-squared (chi2 / dof), or None if dof <= 0. + """ + if not hasattr(self.dem_solver, "fitted_dem"): + raise RuntimeError("Must run fit_dem() before computing chi-squared.") + + I_model = self.dem_solver.response_matrix @ self.dem_solver.fitted_dem + I_obs = self.dem_solver._observed_intensities + + abs_error = np.maximum( + self.dem_solver.min_error, + self.dem_solver.relative_error * I_obs + ) + + chi2 = np.sum(((I_model - I_obs) / abs_error) ** 2) + dof = len(I_obs) - len(self.dem_solver.fitted_dem) + chi2_red = chi2 / dof if dof > 0 else None + + return chi2, chi2_red + + def print_residuals(self): + """ + Print residuals and modeled vs. observed intensities (IDL-style diagnostics). + """ + I_model = self.dem_solver.response_matrix @ self.dem_solver.fitted_dem + I_obs = self.dem_solver._observed_intensities + abs_error = np.maximum( + self.dem_solver.min_error, + self.dem_solver.relative_error * I_obs + ) + + residuals = (I_model - I_obs) / abs_error + + print("\n[DEM RESIDUALS PER FILTER]") + print("---------------------------") + for i, name in enumerate(self.dem_solver.filter_names): + print( + f"{name:<20} Obs: {I_obs[i]:.2f} Model: {I_model[i]:.2f} " + f"Error: {abs_error[i]:.2f} Residual: {residuals[i]:+.2f}" + ) + print(f"\nMean residual: {np.mean(residuals):+.2f}") + print(f"Std residual: {np.std(residuals):.2f}") \ No newline at end of file From 05f6b4ec1245d8dbf7124990ff4cbecb610513d5 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:41:22 -0400 Subject: [PATCH 051/121] Applying black --- xrtpy/xrt_dem_iterative/dem_solver.py | 109 +++++++++++++------------- 1 file changed, 56 insertions(+), 53 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index bb8d42aa5..de69fe416 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -11,12 +11,11 @@ from xrtpy.util.filters import validate_and_format_filters -#DEM specific +# DEM specific from xrtpy.xrt_dem_iterative.monte_carlo_iteration import MonteCarloIteration from xrtpy.xrt_dem_iterative.xrt_dem_statistics import ComputeDEMStatistics - class XRTDEMIterative: """ Estimate the differential emission measure (DEM) from Hinode/XRT data @@ -357,25 +356,32 @@ def create_logT_grid(self): np.log(10.0) * self.dlogT ) # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” - def _interpolate_responses_to_grid(self): """ Interpolate each filter's temperature response onto self.logT (log10 K). Stores a dense matrix with shape (n_filters, n_temperatures), unitless numeric. """ if not hasattr(self, "logT"): - raise AttributeError("Temperature grid missing. Call create_logT_grid() first.") + raise AttributeError( + "Temperature grid missing. Call create_logT_grid() first." + ) rows = [] - for i, (T_orig, R_orig, fname) in enumerate(zip(self.response_temperatures, self.response_values, self.filter_names)): + for i, (T_orig, R_orig, fname) in enumerate( + zip(self.response_temperatures, self.response_values, self.filter_names) + ): logT_orig = np.log10(T_orig.to_value(u.K)) - #response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) + # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) - #Remove later + # Remove later print(f"→ Channel {i}: {fname}") - print(f" logT_orig.shape = {logT_orig.shape}, response_vals.shape = {response_vals.shape}") - print(f" logT range: {logT_orig.min():.2f}–{logT_orig.max():.2f}, grid: {self.logT.min():.2f}–{self.logT.max():.2f}") + print( + f" logT_orig.shape = {logT_orig.shape}, response_vals.shape = {response_vals.shape}" + ) + print( + f" logT range: {logT_orig.min():.2f}–{logT_orig.max():.2f}, grid: {self.logT.min():.2f}–{self.logT.max():.2f}" + ) try: interp_func = interp1d( @@ -399,7 +405,6 @@ def _interpolate_responses_to_grid(self): if self._response_matrix.shape != (len(self.responses), self.logT.size): raise RuntimeError("Interpolated response matrix has unexpected shape.") - @property def response_matrix(self): """ @@ -484,13 +489,14 @@ def _estimate_initial_dem( dem_logT = np.zeros(n_temps) # 27 for i in range(n_filters): row = R[i, :] - ratio = np.where(row > 1e-30, I_obs[i] / row, 0.0) # cm^-5 first estimate for the DEM + ratio = np.where( + row > 1e-30, I_obs[i] / row, 0.0 + ) # cm^-5 first estimate for the DEM dem_logT += ratio dem_logT /= n_filters - #IDL - for i=0,n_channels-1 do dem += obs_val[i] / response[i,*] - #IDL - dem = dem / n_channels - - + # IDL - for i=0,n_channels-1 do dem += obs_val[i] / response[i,*] + # IDL - dem = dem / n_channels + # DO NOT divide by self._dT here. # Optional smoothing in per-logT space: if smooth: @@ -592,7 +598,6 @@ def _estimate_initial_dem( print("Initial DEM estimate complete") - # STEP 1 - Each temperature bin gets its own parameter, initialized with your initial DEM estimate def _build_lmfit_parameters(self): """ @@ -632,7 +637,7 @@ def _build_lmfit_parameters(self): for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): params.add(f"dem_{i}", value=float(val), min=0.0) self.lmfit_params = params - + print(f"Built {len(params)} lmfit parameters for DEM fit") # STEP 2: Build the residual function @@ -702,13 +707,12 @@ def fit_dem(self): if not hasattr(self, "lmfit_params"): self._build_lmfit_parameters() - #Mimght not need- already using in _build_lmfit_parameters(). + # Mimght not need- already using in _build_lmfit_parameters(). # params = Parameters() # for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): # params.add(f"dem_{i}", value=float(val), min=0.0) # self.lmfit_params = params - print("Starting DEM optimization..") result = minimize( self._residuals, @@ -749,7 +753,6 @@ def fit_dem(self): return result - def print_residual_diagnostics(self, params): dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) @@ -772,7 +775,7 @@ def plot_dem_fit( show_initial: bool = True, ax=None, title: str | None = None, - show: bool = True + show: bool = True, ): """ Plot the fitted DEM (and optional initial DEM) using a consistent scale. @@ -859,15 +862,21 @@ def plot_dem_fit( # --- plot without double-logging --- if logscale: - ax.semilogy(logT, y_fit_linear, linestyle='-', label="Fitted DEM") + ax.semilogy(logT, y_fit_linear, linestyle="-", label="Fitted DEM") if y_init_linear is not None: - ax.semilogy(logT, y_init_linear,linestyle='-', alpha=0.7, label="Initial DEM") + ax.semilogy( + logT, y_init_linear, linestyle="-", alpha=0.7, label="Initial DEM" + ) ax.set_ylabel(y_label_lin) else: - ax.plot(logT, np.log10(y_fit_linear),linestyle='-', label="Fitted DEM") + ax.plot(logT, np.log10(y_fit_linear), linestyle="-", label="Fitted DEM") if y_init_linear is not None: ax.plot( - logT, np.log10(y_init_linear), linestyle='-', alpha=0.7, label="Initial DEM" + logT, + np.log10(y_init_linear), + linestyle="-", + alpha=0.7, + label="Initial DEM", ) ax.set_ylabel(y_label_log10) @@ -879,10 +888,9 @@ def plot_dem_fit( ax.grid(True, alpha=0.3) return ax - def solve(self): - print("[•] Running DEM fit...") - + print("Running DEM fit...") + # Build spline parameters self._build_lmfit_parameters() @@ -897,38 +905,33 @@ def solve(self): self.result = result # Extract fitted DEM from lmfit parameters - dem_best_logT = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) + dem_best_logT = np.array( + [result.params[f"dem_{i}"].value for i in range(len(self.logT))] + ) self.fitted_dem_logT = dem_best_logT * u.cm**-5 - self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * u.cm**-5 / u.K + self.fitted_dem = ( + (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * u.cm**-5 / u.K + ) - if not result.success: - print("[⚠️] DEM fit did not fully converge:") - print(" →", result.message) + # if not result.success: + # print(" DEM fit did not fully converge:") + # print(" >", result.message) - print("[✓] DEM fit complete") - print(f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") - print(f" → Total iterations: {result.nfev}") + # print(" DEM fit complete") + # print(f" > Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") + # print(f" > Total iterations: {result.nfev}") + # # Automatically run MC if enabled + # if self.monte_carlo_runs > 0: + # print(f"Running Monte Carlo with {self.monte_carlo_runs} trials...") + # from .monte_carlo_iteration import MonteCarloIteration + # mc = MonteCarloIteration(self) + # mc.run_mc_simulation(n_draws=self.monte_carlo_runs) + # self.mc_results = mc + # self.mc_stats = mc.mc_stats - # Automatically run MC if enabled - if self.monte_carlo_runs > 0: - print(f"[•] Running Monte Carlo with {self.monte_carlo_runs} trials...") - from .monte_carlo_iteration import MonteCarloIteration - mc = MonteCarloIteration(self) - mc.run_mc_simulation(n_draws=self.monte_carlo_runs) - self.mc_results = mc - self.mc_stats = mc.mc_stats - - # #Stats - # self.dem_stats = ComputeDEMStatistics(self) - # chi2, chi2_red = self.dem_stats.compute_chi_squared() - # print(f"[χ²] Total χ²: {chi2:.2f}, Reduced χ²: {chi2_red:.2f}") - # self.dem_stats.print_residuals() - return result - - def summary(self): print("XRTpy DEM Iterative Setup Summary") print("-" * 40) From 04c1163802e8374289e9f41cd9bba4d9d231dce7 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:42:59 -0400 Subject: [PATCH 052/121] Minor update to MC code --- .../monte_carlo_iteration.py | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py index 685aa6b8c..b9e145d19 100644 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -1,15 +1,30 @@ __all__ = [ - "Monte_Carlo_Iteration", + "MonteCarloIteration", ] import numpy as np -class Monte_Carlo_Iteration: +class MonteCarloIteration: + + def __init__(self, dem_solver): + """ + Parameters + ---------- + dem_solver : XRTDEMIterative + A fitted DEM object with observed intensities, errors, and temperature grid. + """ + self.dem_solver = dem_solver + + if not hasattr(dem_solver, "logT"): + raise RuntimeError("DEM solver must have a defined temperature grid.") + if not hasattr(dem_solver, "intensity_errors"): + raise RuntimeError("DEM solver must define intensity errors.") - # def __init__( ): + self.n_bins = len(dem_solver.logT) + self.n_filters = len(dem_solver.observed_intensities) - def generate_mc_realizations(self, n_realizations=100, seed=None): + def generate_mc_realizations(self, n_realizations=100, seed=None,reject_negative=True) """ Generate randomized intensity realizations for Monte Carlo uncertainty estimation. @@ -74,4 +89,4 @@ def run_mc_simulation(self, n_realizations=100, seed=None): loc=self._observed_intensities[:, None], # shape (5, 1) scale=errors[:, None], # shape (5, 1) size=(len(self._observed_intensities), n_realizations), # shape (5, 20) - ) + ) \ No newline at end of file From 427cff088a4d854592b8623eaf9fb3cb904eed76 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:54:00 -0400 Subject: [PATCH 053/121] If lmfit is required for any normal use (not just testing) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 84dc2e569..8a2dffea9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ dependencies = [ "scikit-image>=0.21", "scipy>=1.11.1", "sunpy[map]>=5.1", + "lmfit>=1.2.2", ] optional-dependencies.dev = [ From 5538c8db4cf35f8a67e1c7125138e3fc448957dc Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 18:59:23 -0400 Subject: [PATCH 054/121] Syntax error --- xrtpy/xrt_dem_iterative/monte_carlo_iteration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py index b9e145d19..9f6ec7c7c 100644 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -24,7 +24,7 @@ def __init__(self, dem_solver): self.n_bins = len(dem_solver.logT) self.n_filters = len(dem_solver.observed_intensities) - def generate_mc_realizations(self, n_realizations=100, seed=None,reject_negative=True) + def generate_mc_realizations(self, n_realizations=100, seed=None,reject_negative=True): """ Generate randomized intensity realizations for Monte Carlo uncertainty estimation. From 8835e6fbc8071d82fc05d3811021d44e3342abec Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 19:08:52 -0400 Subject: [PATCH 055/121] missing a validate_inputs() method --- xrtpy/xrt_dem_iterative/dem_solver.py | 87 ++++++++++++++++++++++++--- 1 file changed, 78 insertions(+), 9 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index de69fe416..a8c3c25f6 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -2,6 +2,8 @@ "XRTDEMIterative", ] +#import pdb; pdb.set_trace() + import warnings import astropy.units as u @@ -16,6 +18,67 @@ from xrtpy.xrt_dem_iterative.xrt_dem_statistics import ComputeDEMStatistics +def validate_inputs(self) -> None: + """ + Validate user-provided inputs. Raises ValueError on any issue. + + This is intentionally separate from __init__ so tests and users can + construct the object first, then explicitly validate. + """ + # 1) observed_channel non-empty + if self.observed_channel is None or len(self.observed_channel) == 0: + raise ValueError("`observed_channel` is required and cannot be empty.") + + # 2) intensities: length & finite + if self._observed_intensities is None or len(self._observed_intensities) == 0: + raise ValueError("`observed_intensities` is required and cannot be empty.") + if not np.all(np.isfinite(self._observed_intensities)): + raise ValueError("`observed_intensities` must be finite numbers.") + + # 3) responses present + if self.responses is None or len(self.responses) == 0: + raise ValueError("`temperature_responses` is required and cannot be empty.") + + # 4) lengths consistent between filters/intensities/responses + if not ( + len(self._observed_intensities) == len(self.responses) == len(self.observed_channel) + ): + raise ValueError( + "Length mismatch: intensities, responses, and observed_channel must match." + ) + + # 5) temperature grid sanity + if not (self._min_T < self._max_T): + raise ValueError("min_T must be < max_T.") + if self._dT <= 0: + raise ValueError("dT must be a positive scalar.") + n_pts = int(np.floor((self._max_T - self._min_T) / self._dT + 1e-9)) + 1 + if n_pts < 4: + raise ValueError("Temperature grid must have at least 4 points.") + + # 6) grid range inside every response + for r in self.responses: + logT_grid = np.log10(r.temperature.value) + if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): + raise ValueError( + f"The specified temperature range [{self._min_T}, {self._max_T}] " + "is outside the bounds of one or more filter response grids." + ) + + # 7) intensity_errors length (only if provided) + if self._intensity_errors is not None: + if self._intensity_errors.shape != self._observed_intensities.shape: + raise ValueError( + "Length of intensity_errors must match observed_intensities." + ) + if not np.all(np.isfinite(self._intensity_errors)) or np.any(self._intensity_errors < 0): + raise ValueError("`intensity_errors` must be finite and >= 0.") + + # 8) monte_carlo_runs, max_iterations already checked earlier in __init__ (keep if you prefer) + # Nothing to return; success means no exception raised. + return None + + class XRTDEMIterative: """ Estimate the differential emission measure (DEM) from Hinode/XRT data @@ -95,18 +158,24 @@ def __init__( raise ValueError("`observed_intensities` must be finite numbers.") # Errors + # NEW (defer to validate_inputs) if intensity_errors is not None: self._intensity_errors = np.asarray(intensity_errors, dtype=float) - if self._intensity_errors.shape != self._observed_intensities.shape: - raise ValueError( - "Length of intensity_errors must match observed_intensities." - ) - if not np.all(np.isfinite(self._intensity_errors)) or np.any( - self._intensity_errors < 0 - ): - raise ValueError("`intensity_errors` must be finite and >= 0.") else: - self._intensity_errors = None # Will be computed later + self._intensity_errors = None + + # if intensity_errors is not None: + # self._intensity_errors = np.asarray(intensity_errors, dtype=float) + # if self._intensity_errors.shape != self._observed_intensities.shape: + # raise ValueError( + # "Length of intensity_errors must match observed_intensities." + # ) + # if not np.all(np.isfinite(self._intensity_errors)) or np.any( + # self._intensity_errors < 0 + # ): + # raise ValueError("`intensity_errors` must be finite and >= 0.") + # else: + # self._intensity_errors = None # Will be computed later # Store temperature grid parameters self._dT = float(dT) From 0bc097ecc045c2fc0e4f225736f49a6264d82f40 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 19:17:31 -0400 Subject: [PATCH 056/121] Putting validate_inputs() inside class --- xrtpy/xrt_dem_iterative/dem_solver.py | 121 ++++++++++++++------------ 1 file changed, 63 insertions(+), 58 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index a8c3c25f6..5b42bb2f7 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -18,65 +18,7 @@ from xrtpy.xrt_dem_iterative.xrt_dem_statistics import ComputeDEMStatistics -def validate_inputs(self) -> None: - """ - Validate user-provided inputs. Raises ValueError on any issue. - - This is intentionally separate from __init__ so tests and users can - construct the object first, then explicitly validate. - """ - # 1) observed_channel non-empty - if self.observed_channel is None or len(self.observed_channel) == 0: - raise ValueError("`observed_channel` is required and cannot be empty.") - - # 2) intensities: length & finite - if self._observed_intensities is None or len(self._observed_intensities) == 0: - raise ValueError("`observed_intensities` is required and cannot be empty.") - if not np.all(np.isfinite(self._observed_intensities)): - raise ValueError("`observed_intensities` must be finite numbers.") - - # 3) responses present - if self.responses is None or len(self.responses) == 0: - raise ValueError("`temperature_responses` is required and cannot be empty.") - - # 4) lengths consistent between filters/intensities/responses - if not ( - len(self._observed_intensities) == len(self.responses) == len(self.observed_channel) - ): - raise ValueError( - "Length mismatch: intensities, responses, and observed_channel must match." - ) - - # 5) temperature grid sanity - if not (self._min_T < self._max_T): - raise ValueError("min_T must be < max_T.") - if self._dT <= 0: - raise ValueError("dT must be a positive scalar.") - n_pts = int(np.floor((self._max_T - self._min_T) / self._dT + 1e-9)) + 1 - if n_pts < 4: - raise ValueError("Temperature grid must have at least 4 points.") - - # 6) grid range inside every response - for r in self.responses: - logT_grid = np.log10(r.temperature.value) - if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): - raise ValueError( - f"The specified temperature range [{self._min_T}, {self._max_T}] " - "is outside the bounds of one or more filter response grids." - ) - - # 7) intensity_errors length (only if provided) - if self._intensity_errors is not None: - if self._intensity_errors.shape != self._observed_intensities.shape: - raise ValueError( - "Length of intensity_errors must match observed_intensities." - ) - if not np.all(np.isfinite(self._intensity_errors)) or np.any(self._intensity_errors < 0): - raise ValueError("`intensity_errors` must be finite and >= 0.") - # 8) monte_carlo_runs, max_iterations already checked earlier in __init__ (keep if you prefer) - # Nothing to return; success means no exception raised. - return None class XRTDEMIterative: @@ -260,6 +202,69 @@ def __init__( except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") + + + #### TEST GIT CI TEST ##### + + def validate_inputs(self) -> None: + """ + Validate user-provided inputs. Raises ValueError on any issue. + + Intentionally separate from __init__ so tests and users can construct + the object first, then explicitly validate (matches test expectations). + """ + # 1) observed_channel non-empty + if self.observed_channel is None or len(self.observed_channel) == 0: + raise ValueError("`observed_channel` is required and cannot be empty.") + + # 2) intensities: length & finite + if self._observed_intensities is None or len(self._observed_intensities) == 0: + raise ValueError("`observed_intensities` is required and cannot be empty.") + if not np.all(np.isfinite(self._observed_intensities)): + raise ValueError("`observed_intensities` must be finite numbers.") + + # 3) responses present + if self.responses is None or len(self.responses) == 0: + raise ValueError("`temperature_responses` is required and cannot be empty.") + + # 4) lengths consistent between filters/intensities/responses + if not ( + len(self._observed_intensities) == len(self.responses) == len(self.observed_channel) + ): + raise ValueError( + "Length mismatch: intensities, responses, and observed_channel must match." + ) + + # 5) temperature grid sanity + if not (self._min_T < self._max_T): + raise ValueError("min_T must be < max_T.") + if self._dT <= 0: + raise ValueError("dT must be a positive scalar.") + n_pts = int(np.floor((self._max_T - self._min_T) / self._dT + 1e-9)) + 1 + if n_pts < 4: + raise ValueError("Temperature grid must have at least 4 points.") + + # 6) grid range inside every response + for r in self.responses: + logT_grid = np.log10(r.temperature.value) + if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): + raise ValueError( + f"The specified temperature range [{self._min_T}, {self._max_T}] " + "is outside the bounds of one or more filter response grids." + ) + + # 7) intensity_errors length & finiteness (only if provided) + if self._intensity_errors is not None: + if self._intensity_errors.shape != self._observed_intensities.shape: + raise ValueError( + "Length of intensity_errors must match observed_intensities." + ) + if not np.all(np.isfinite(self._intensity_errors)) or np.any(self._intensity_errors < 0): + raise ValueError("`intensity_errors` must be finite and >= 0.") + + # success ⇒ no return value + return None + ########################### def __repr__(self): return ( f" Date: Fri, 29 Aug 2025 19:22:41 -0400 Subject: [PATCH 057/121] Cleaning codedem_solver.py --- xrtpy/xrt_dem_iterative/dem_solver.py | 30 ++++++++++++--------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 5b42bb2f7..1136b0b74 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -2,23 +2,18 @@ "XRTDEMIterative", ] -#import pdb; pdb.set_trace() +# import pdb; pdb.set_trace() import warnings import astropy.units as u import numpy as np from lmfit import Parameters, minimize -from scipy.interpolate import interp1d, CubicSpline +from scipy.interpolate import interp1d from xrtpy.util.filters import validate_and_format_filters # DEM specific -from xrtpy.xrt_dem_iterative.monte_carlo_iteration import MonteCarloIteration -from xrtpy.xrt_dem_iterative.xrt_dem_statistics import ComputeDEMStatistics - - - class XRTDEMIterative: @@ -202,10 +197,8 @@ def __init__( except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") - - #### TEST GIT CI TEST ##### - + def validate_inputs(self) -> None: """ Validate user-provided inputs. Raises ValueError on any issue. @@ -229,7 +222,9 @@ def validate_inputs(self) -> None: # 4) lengths consistent between filters/intensities/responses if not ( - len(self._observed_intensities) == len(self.responses) == len(self.observed_channel) + len(self._observed_intensities) + == len(self.responses) + == len(self.observed_channel) ): raise ValueError( "Length mismatch: intensities, responses, and observed_channel must match." @@ -259,11 +254,14 @@ def validate_inputs(self) -> None: raise ValueError( "Length of intensity_errors must match observed_intensities." ) - if not np.all(np.isfinite(self._intensity_errors)) or np.any(self._intensity_errors < 0): + if not np.all(np.isfinite(self._intensity_errors)) or np.any( + self._intensity_errors < 0 + ): raise ValueError("`intensity_errors` must be finite and >= 0.") # success ⇒ no return value return None + ########################### def __repr__(self): return ( @@ -442,7 +440,7 @@ def _interpolate_responses_to_grid(self): rows = [] for i, (T_orig, R_orig, fname) in enumerate( - zip(self.response_temperatures, self.response_values, self.filter_names) + zip(self.response_temperatures, self.response_values, self.filter_names, strict=False) ): logT_orig = np.log10(T_orig.to_value(u.K)) # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) @@ -752,9 +750,7 @@ def _residuals(self, params): # 4. Return normalized residuals residuals = (I_model - self._observed_intensities) / errors print( - "[-Residuals stats > mean: {:.2e}, std: {:.2e}".format( - np.mean(residuals), np.std(residuals) - ) + f"[-Residuals stats > mean: {np.mean(residuals):.2e}, std: {np.std(residuals):.2e}" ) return residuals @@ -855,8 +851,8 @@ def plot_dem_fit( Plot the fitted DEM (and optional initial DEM) using a consistent scale. """ - import numpy as np import matplotlib.pyplot as plt + import numpy as np if ax is None: fig, ax = plt.subplots() From 6ec50f6b27c5b8bcb45fcc7e754b86030c4e409a Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 19:23:03 -0400 Subject: [PATCH 058/121] Cleaning code --- xrtpy/xrt_dem_iterative/monte_carlo_iteration.py | 4 ++-- xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py index 9f6ec7c7c..63995e53c 100644 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -6,7 +6,7 @@ class MonteCarloIteration: - + def __init__(self, dem_solver): """ Parameters @@ -89,4 +89,4 @@ def run_mc_simulation(self, n_realizations=100, seed=None): loc=self._observed_intensities[:, None], # shape (5, 1) scale=errors[:, None], # shape (5, 1) size=(len(self._observed_intensities), n_realizations), # shape (5, 20) - ) \ No newline at end of file + ) diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py index aef921b03..daeebe183 100644 --- a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py +++ b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py @@ -21,7 +21,7 @@ class ComputeDEMStatistics: print_residuals() Print modeled vs. observed intensities and residuals (normalized by error). """ - + def __init__(self, dem_solver): self.dem_solver = dem_solver @@ -74,4 +74,4 @@ def print_residuals(self): f"Error: {abs_error[i]:.2f} Residual: {residuals[i]:+.2f}" ) print(f"\nMean residual: {np.mean(residuals):+.2f}") - print(f"Std residual: {np.std(residuals):.2f}") \ No newline at end of file + print(f"Std residual: {np.std(residuals):.2f}") From 7e939d8ed68371af553501ec55870a2078fbe583 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 29 Aug 2025 19:23:18 -0400 Subject: [PATCH 059/121] Cleaning code - test --- xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index cb66dc1f9..a8ca5e422 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -1,11 +1,11 @@ -from pathlib import Path + +from importlib.resources import files import numpy as np import pytest import sunpy import sunpy.io.special import sunpy.map -from importlib.resources import files from xrtpy.response.channel import Channel From a90645c541c54035fbbd269761a85352b88d20de Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 12 Sep 2025 15:58:43 -0400 Subject: [PATCH 060/121] Updated info about xrt --- docs/about_xrt.rst | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/about_xrt.rst b/docs/about_xrt.rst index c9848a872..575c6901b 100644 --- a/docs/about_xrt.rst +++ b/docs/about_xrt.rst @@ -8,8 +8,8 @@ Hinode ====== .. image:: _static/images/hinode_satellite.png - :alt: Hinode Satellite - :align: center + :alt: Hinode Satellite + :align: center Hinode is a joint mission involving the space agencies of Japan, the United States, Europe, and the United Kingdom. It is depicted in the *illustration shown above*. @@ -25,9 +25,9 @@ The X-Ray Telescope =================== .. image:: _static/images/XRT_composite_image_full_disk_14February2015.png - :alt: XRT Composite Image - :align: center - :scale: 50% + :alt: XRT Composite Image + :align: center + :scale: 50% The X-Ray Telescope (XRT), depicted as a long linear black tube on the Hinode spacecraft is a crucial instrument for observing the solar corona's most intense regions, with temperatures ranging from 1,000,000 to 10,000,000 Kelvin. The image below is a synoptic composite from February 14, 2015, created using the Al-Mesh/Be-Thin/Al-Med filters. @@ -36,7 +36,7 @@ For a comprehensive overview of XRT's mission and capabilities, please visit the .. tip:: - Visit the `XRT Picture of the Week`_ and the `Hinode-XRT YouTube`_ page for captivating visual content showcasing the XRT's solar observations. + Visit the `XRT Picture of the Week`_ and the `Hinode-XRT YouTube`_ page for captivating visual content showcasing the XRT's solar observations. XRT uses two sequentially positioned filter wheels, as shown in the diagram below, where each wheel houses a variety of filters. By rotating these wheels, scientists can select different filters to study the Sun in different wavelengths, thereby enhancing the resolution and quality of solar images. @@ -73,8 +73,8 @@ The existing filters are structured as follows: The process is the same for all XRT filter channels. .. image:: _static/images/XRT_filter_wheels_Sun_View_Diagram.png - :alt: Diagram of the XRT Filter Wheels - :align: center + :alt: Diagram of the XRT Filter Wheels + :align: center Data Products ************* @@ -99,8 +99,8 @@ The XRT software was originally created in the Interactive Data Language (IDL). .. note:: - Please note that the `SolarSoft XRT Analysis Guide`_ does not serve as a guide for using XRTpy. - It focuses solely on the analysis of XRT data using the IDL software. + Please note that the `SolarSoft XRT Analysis Guide`_ does not serve as a guide for using XRTpy. + It focuses solely on the analysis of XRT data using the IDL software. .. _Hinode-XRT YouTube: https://www.youtube.com/user/xrtpow .. _Interactive Data Language: https://www.l3harrisgeospatial.com/Software-Technology/IDL From 6cedf2a65b428937106b49c53d1d36e57a4dc3b0 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 12 Sep 2025 16:02:50 -0400 Subject: [PATCH 061/121] UPdated with two reference for DEM paper --- docs/bibliography.bib | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docs/bibliography.bib b/docs/bibliography.bib index 9c26a4eb2..59a01cde0 100644 --- a/docs/bibliography.bib +++ b/docs/bibliography.bib @@ -171,3 +171,23 @@ @ARTICLE{velasquez:2024 doi = {10.21105/joss.06396}, url = {https://doi.org/10.21105/joss.06396} } + +@ARTICLE{weber:2004, + author = {{Weber}, M.~A. and {DeLuca}, E.~E. and {Golub}, L. and {Sette}, A.~L.}, + title = "{Temperature diagnostics with multichannel imaging telescopes}", + journal = "{IAU Symposium 223: Multi-Wavelength Investigations of Solar Activity}", + year = 2004, + pages = {321--328}, + publisher = {Cambridge University Press}, + doi = {10.1017/S1743921304006088} +} + +@ARTICLE{golub:2004, + author = {{Golub}, L. and {DeLuca}, E.~E. and {Sette}, A. and {Weber}, M.}, + title = "{DEM analysis with the X-Ray Telescope (XRT) for Solar-B}", + journal = "{ASP Conference Series, The Solar-B Mission and the Outlook for Space-Based Solar Physics}", + year = 2004, + volume = {325}, + pages = {217--222}, + publisher = {Astronomical Society of the Pacific} +} From 2c2459def361a0e4b6332d3c20aabcc8fb35ef91 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 12 Sep 2025 16:04:08 -0400 Subject: [PATCH 062/121] WIP: local changes before pull --- docs/conf.py | 3 +- docs/dem_overview.rst | 172 ++++++++++++++++++ docs/gallery/data_processing/README.txt | 11 ++ docs/gallery/data_processing/__init__.py | 0 docs/gallery/data_processing/deconvolving.py | 45 +++++ .../data_processing/remove_lightleak.py | 68 +++++++ .../temperature_from_filter_ratios.py | 70 +++++++ docs/gallery/image_filtering/README.txt | 6 + docs/gallery/image_filtering/__init__.py | 0 docs/gallery/image_filtering/sorting_data.py | 92 ++++++++++ docs/gallery/instrument_response/README.txt | 10 + docs/gallery/instrument_response/__init__.py | 0 docs/gallery/instrument_response/channels.py | 103 +++++++++++ .../instrument_response/effective_area.py | 75 ++++++++ .../temperature_response.py | 88 +++++++++ docs/getting_started.rst | 64 +++---- docs/index.rst | 8 + 17 files changed, 780 insertions(+), 35 deletions(-) create mode 100644 docs/dem_overview.rst create mode 100644 docs/gallery/data_processing/README.txt create mode 100644 docs/gallery/data_processing/__init__.py create mode 100644 docs/gallery/data_processing/deconvolving.py create mode 100644 docs/gallery/data_processing/remove_lightleak.py create mode 100644 docs/gallery/data_processing/temperature_from_filter_ratios.py create mode 100644 docs/gallery/image_filtering/README.txt create mode 100644 docs/gallery/image_filtering/__init__.py create mode 100644 docs/gallery/image_filtering/sorting_data.py create mode 100644 docs/gallery/instrument_response/README.txt create mode 100644 docs/gallery/instrument_response/__init__.py create mode 100644 docs/gallery/instrument_response/channels.py create mode 100644 docs/gallery/instrument_response/effective_area.py create mode 100644 docs/gallery/instrument_response/temperature_response.py diff --git a/docs/conf.py b/docs/conf.py index 941c35134..c3681e6e0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -70,7 +70,7 @@ ogp_image = "https://raw.githubusercontent.com/HinodeXRT/xrtpy/main/docs/_static/images/xrtpy_logo.png" ogp_use_first_image = True ogp_description_length = 160 -ogp_custom_meta_tags = ('',) +#ogp_custom_meta_tags = ('',) # Suppress warnings about overriding directives as we overload some of the # doctest extensions. @@ -230,6 +230,7 @@ "feedback_communication*": [], "contributing*": [], "code_of_conduct*": [], + "dem_overview*": [], } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst new file mode 100644 index 000000000..a74394856 --- /dev/null +++ b/docs/dem_overview.rst @@ -0,0 +1,172 @@ +.. _xrtpy-dem-overview: + +=================================== +DEM +=================================== + +.. contents:: + :local: + :depth: 2 + +Introduction +------------ +The **Differential Emission Measure (DEM)** describes how much plasma is present +in the solar corona as a function of temperature. +It is a key diagnostic for understanding coronal heating, solar flares, and +general plasma properties. + +Hinode/XRT is particularly well suited for DEM analysis because of its multiple +broadband filters, which are sensitive to different temperature ranges. + +Why DEM? +-------- +- Converts observed X-ray intensities into a thermal distribution of plasma. +- Allows comparison across instruments (e.g., Hinode/XRT, SDO/AIA, Hinode/EIS). +- Provides a physical link between observations and coronal heating models. + +DEM in XRTpy +------------ +XRTpy provides a Python implementation of the iterative spline-fitting method +(originally available in IDL as `xrt_dem_iterative2.pro`). + +The DEM workflow requires three main inputs, each with specific type, shape, and units: + +1. **Observed channels (filters)** + - Type: ``str`` or ``list`` of ``str`` + - Description: Names of the filters used in the observation (e.g., ``"Al-mesh"``, ``"Be-thin"``). + +2. **Observed intensities** + - Type: array-like + - Units: DN/s (normalized per pixel) + - Description: Measured intensities corresponding to each filter. + +3. **Temperature response functions** + - Type: ``list`` of :class:`~xrtpy.response.fundamentals.TemperatureResponseFundamental` + - Units: DN s\ :sup:`-1` pix\ :sup:`-1` EM\ :sup:`-1` + - Description: Instrument temperature responses matching the filters. + These can be generated with :func:`xrtpy.response.tools.generate_temperature_responses`. + See :doc:`getting_started` for details. + +Example +------- +A simple example with two filters: + +.. code-block:: python + + from xrtpy.response.tools import generate_temperature_responses + from xrtpy.xrt_dem_iterative import XRTDEMIterative + + # Define filters and observed intensities + filters = ["Al-poly","C-poly/Ti-poly"] + intensities = [250.0, 180.0] # DN/s + + # Generate responses + responses = generate_temperature_responses( + filters, + observation_date="2007-07-10", + abundance_model="hybrid" + ) + + # Solve XRT DEM + dem_solver = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + ) + + dem_result = dem_solver.solve() + + dem_result.plot() + + +Comparison with IDL +------------------- +The Python solver mirrors the SolarSoft/IDL routine +`xrt_dem_iterative2.pro `_. + +While results are consistent, minor differences can occur due to +interpolation choices and optimization details. + + +Mathematical Note: Ill-posed Nature of DEM Inversion +---------------------------------------------------- +The DEM problem is inherently an **ill-posed mathematical inversion**. + +Given observed intensities :math:`I_i` in channels *i*, and their +temperature response functions :math:`R_i(T)`, the relationship is: + +.. math:: + + I_i = \int DEM(T) \, R_i(T) \, dT + +Recovering :math:`DEM(T)` from a small set of broadband channels is +not unique and is technically fraught with perils. + +XRTpy (like the original IDL routine ``xrt_dem_iterative2.pro``) employs a +**forward-fitting approach**: +- A trial DEM is guessed. +- It is folded through :math:`R_i(T)` to produce "model" intensities. +- The DEM spline points are adjusted to minimize chi-square between model and observed values. + +Because the number of temperature bins typically exceeds the number +of observations, the solution is constrained by assumptions (e.g., +spline smoothness). + +Uncertainties are estimated through **Monte Carlo iterations**, where +observations are perturbed by their errors and re-fit. The resulting +distribution of DEM solutions gives an estimate of confidence. + + + +Example Extension +----------------- +In addition to the required inputs, you can provide optional parameters +to fine-tune the DEM solution. +The example below shows all options explicitly set. + +.. code-block:: python + + from xrtpy.xrt_dem_iterative import XRTDEMIterative + + dem_solver = XRTDEMIterative( + observed_channel=filters, # Filter names + observed_intensities=intensities, # Observed values + temperature_responses=responses, # Instrument responses + + intensity_errors=errors, # Obs. uncertainties (default: 3%) + min_T=5.6, # Min log T (default: 5.5) + max_T=7.8, # Max log T (default: 8.0) + dT=0.05, # Bin width in log T (default: 0.1) + min_error=1.5, # Minimum error floor (default: 2 DN) + relative_error=0.02, # Fractional error scaling (default: 0.03) + monte_carlo_runs=50, # # of Monte Carlo runs (default: none) + max_iterations=3000, # Solver max iterations (default: 2000) + solv_factor=1e17, # Scaling factor (default: 1e21) + ) + +.. note:: + The values shown above correspond to existing defaults in the solver, + but they are written out here to illustrate what can be tuned. + You can adjust these to best suit your analysis needs. + This mirrors the flexibility of the IDL routine + ``xrt_dem_iterative2.pro``. + +.. Acknowledgement +.. --------------- +.. *Development of the DEM solver in XRTpy has been supported in part by +.. a NASA Heliophysics Tools and Methods (HTM) program grant (ROSES-2025, +.. element B.20). This effort reflects the ongoing transition of DEM +.. capabilities from legacy IDL routines into modern, open-source Python +.. tools for the solar physics community.* + + +References +---------- +- Golub, L., et al. (2004), *Solar Physics*, 243, 63. :cite:p:`golub:2004` +- Weber, M. A., et al. (2004), *ApJ*, 605, 528. :cite:p:`weber:2004`. + +Next Steps +---------- +- See :ref:`API Reference ` for details on + ``XRTDEMIterative``. Coming soon. +- Explore example notebooks in the `examples/` directory. Coming soon. diff --git a/docs/gallery/data_processing/README.txt b/docs/gallery/data_processing/README.txt new file mode 100644 index 000000000..f34326aac --- /dev/null +++ b/docs/gallery/data_processing/README.txt @@ -0,0 +1,11 @@ +Advanced Data Processing +========================= + +This section covers advanced image correction and diagnostic tools. + +Examples here show how to: +- Deconvolve images to correct for blurring +- Identify and remove light leaks +- Estimate temperature and emission measure using multi-filter observations + +These are core XRTpy functionalities for high-level solar data analysis. \ No newline at end of file diff --git a/docs/gallery/data_processing/__init__.py b/docs/gallery/data_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docs/gallery/data_processing/deconvolving.py b/docs/gallery/data_processing/deconvolving.py new file mode 100644 index 000000000..503a36e03 --- /dev/null +++ b/docs/gallery/data_processing/deconvolving.py @@ -0,0 +1,45 @@ +""" +======================= +Deconvolving XRT Images +======================= + +This example demonstrates deconvolvoing X-Ray Telescope (XRT) images using the +`xrtpy.image_correction.deconvolve` function in XRTpy. +""" + +import matplotlib.pyplot as plt +import sunpy.map +from sunpy.net import Fido +from sunpy.net import attrs as a + +from xrtpy.image_correction import deconvolve + +############################################################################## +# We will search for XRT data from the Virtual Solar Observatory (VSO) and fetch the first result. + +result = Fido.search( + a.Time("2012-06-05 21:58:39", "2012-06-05 21:59:00"), a.Instrument("xrt") +) +data_file = Fido.fetch(result[0]) + +############################################################################## +# Typically most deconvolve routines use the Richardson-Lucy deconvolution algorithm. + +xrt_map = sunpy.map.Map(data_file) +deconv_map = deconvolve(xrt_map) + +############################################################################## +# To see the effects of the deconvolution we plot both the before and after images. + +fig = plt.figure(figsize=(15, 10)) + +ax = fig.add_subplot(121, projection=xrt_map) +xrt_map.plot(axes=ax, title="Original") +ax1 = fig.add_subplot(122, projection=deconv_map) +deconv_map.plot(axes=ax1, title="Deconvolved") + +ax1.coords[1].set_ticks_visible(False) +ax1.coords[1].set_ticklabel_visible(False) +fig.tight_layout() + +plt.show() diff --git a/docs/gallery/data_processing/remove_lightleak.py b/docs/gallery/data_processing/remove_lightleak.py new file mode 100644 index 000000000..51e5c6f4f --- /dev/null +++ b/docs/gallery/data_processing/remove_lightleak.py @@ -0,0 +1,68 @@ +""" +=================== +Removing Light Leak +=================== + +In this example, we show how to remove the light leak (visible stray light) +from XRT synoptic composite images. +""" + +from pathlib import Path + +import matplotlib.pyplot as plt +import sunpy.map +from astropy.utils.data import get_pkg_data_path +from astropy.visualization import ImageNormalize, SqrtStretch + +from xrtpy.image_correction import remove_lightleak + +############################################################################## +# This example will be using XRT synoptic data from the first day of summer of 2015. +# This is stored in the ``example_data`` directory of the `xrtpy` package. + +directory = get_pkg_data_path("data/example_data", package="xrtpy.image_correction") +data_file = Path(directory) / "comp_XRT20150621_055911.7.fits" +xrt_map = sunpy.map.Map(data_file) + +############################################################################## +# Removing the light leak from the composite image is done using the `xrtpy.image_correction.remove_lightleak` function. + +lightleak_map = remove_lightleak(xrt_map) + +############################################################################## +# Finally, we plot the original and light leak subtracted images side by side. + +fig = plt.figure(figsize=(12, 6)) + +ax = fig.add_subplot(121, projection=xrt_map) +xrt_map.plot( + axes=ax, + title="Original", + norm=ImageNormalize(vmin=0, vmax=7e3, stretch=SqrtStretch()), +) +ax1 = fig.add_subplot(122, projection=lightleak_map) +lightleak_map.plot( + axes=ax1, + title="Light Leak Subtracted", + norm=ImageNormalize(vmin=0, vmax=7e3, stretch=SqrtStretch()), +) + +ax1.coords[1].set_ticks_visible(False) +ax1.coords[1].set_ticklabel_visible(False) +fig.tight_layout() + +############################################################################## +# They look almost identical, but the light leak has been removed from the second image. +# To confirm this we can plot the difference between the two images. + +diff_data = xrt_map.data - lightleak_map.data +# For this image, the difference is very small. +print(diff_data.min(), diff_data.max()) + +fig = plt.figure() +ax = fig.add_subplot(111) +ax.set_title("Lightleak Difference") +im = ax.imshow(diff_data, origin="lower") +fig.colorbar(im) + +plt.show() diff --git a/docs/gallery/data_processing/temperature_from_filter_ratios.py b/docs/gallery/data_processing/temperature_from_filter_ratios.py new file mode 100644 index 000000000..04d1aa9f0 --- /dev/null +++ b/docs/gallery/data_processing/temperature_from_filter_ratios.py @@ -0,0 +1,70 @@ +""" +================================================ +Calculating the temperature and emission measure +================================================ + +In this example, we will showcase how to use the filter method to calculate +the temperature and emission measure of the X-ray Telescope (XRT) on Hinode. +""" + +import matplotlib.pyplot as plt +import sunpy.map +from astropy.visualization import ImageNormalize, LogStretch +from sunpy.net import Fido +from sunpy.net import attrs as a + +from xrtpy.response import temperature_from_filter_ratio + +############################################################################## +# To start, we will get XRT data via ``sunpy``. +# +# It is important to use images that same size and with the smallest time separation. +# Note that not all filter ratios produce good results. + +query = Fido.search( + a.Time("2011-01-28 01:31:55", "2011-01-28 01:32:05"), a.Instrument("xrt") +) +data_files = Fido.fetch(query) +xrt_map_1 = sunpy.map.Map(data_files[0]) +xrt_map_2 = sunpy.map.Map(data_files[1]) + +############################################################################## +# The `xrtpy.response.temperature_from_filter_ratio` function has several options, mirroring +# the IDL routine xrt_teem.pro in SolarSoft in most respects.A simple call with +# no extra parameters calculates the temperature and (volume) emission measure +# for the two images without any binning or masking of the data. + +T_EM = temperature_from_filter_ratio(xrt_map_1, xrt_map_2) + +############################################################################## +# The output is a namedtuple with attributes ``Tmap``, ``EMmap``, ``Terrmap``, and ``EMerrmap``. +# As with the SolarSoft IDL routine xrt_teem.pro, the output images are logs of the quantities. +# +# ``Tmap`` is the electron temperature, ``EMmap`` is the volume emission measure, ``Terrmap`` +# is a measure of the uncertainties in the temperature determined for each pixel and ``EMerrmap`` +# is the same for the emission measure. + +T_e = T_EM.Tmap + +fig = plt.figure() + +ax = plt.subplot(projection=T_e) +T_e.plot( + title="Derived Temperature", + cmap="inferno", + norm=ImageNormalize(vmin=6, vmax=7, stretch=LogStretch(10)), +) +T_e.draw_limb() +T_e.draw_grid() +plt.colorbar(label="T (K)") +plt.tight_layout() + +plt.show() + +############################################################################## +# If you want to do the same for Level 2 synoptic composite images, you have to use +# `~.make_exposure_map` to generate the exposure maps for the composite images. +# This is then passed to `xrtpy.response.temperature_from_filter_ratio` as the +# ``expmap1`` and ``expmap2`` arguments. +# Otherwise without accounting for the different exposure time per pixel, +# the temperature and emission measure will be incorrect. diff --git a/docs/gallery/image_filtering/README.txt b/docs/gallery/image_filtering/README.txt new file mode 100644 index 000000000..8b6076c10 --- /dev/null +++ b/docs/gallery/image_filtering/README.txt @@ -0,0 +1,6 @@ +Image Filtering & Visualization +=============================== + +This section includes examples that demonstrate how to sort, filter, and visualize XRT image data. + +The tools shown here help users prepare XRT observations for further scientific analysis. \ No newline at end of file diff --git a/docs/gallery/image_filtering/__init__.py b/docs/gallery/image_filtering/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docs/gallery/image_filtering/sorting_data.py b/docs/gallery/image_filtering/sorting_data.py new file mode 100644 index 000000000..ed4fe0b98 --- /dev/null +++ b/docs/gallery/image_filtering/sorting_data.py @@ -0,0 +1,92 @@ +""" +========================= +Filtering and Visualizing +========================= + +This example provides a simple overview of filtering and visualizing XRT data. +""" + +import astropy.units as u +import matplotlib.pyplot as plt +import sunpy.map +from astropy.visualization import ImageNormalize, SqrtStretch +from sunpy.net import Fido +from sunpy.net import attrs as a + +############################################################################## +# To start we will download a range of XRT data from the Virtual Solar Observatory (VSO). +# The goal is to acquire a large set of files we can sort through and visualize. + +query = Fido.search( + a.Time("2021-05-21 18:51:00", "2021-05-22 00:00:00"), a.Instrument("xrt") +) +print(query) + +############################################################################## +# This query will return a large number of files, this is due to the fact we do not +# specify any additional filters. We can filter the data by specifying additional +# attributes in the query. +# +# For wavelength, we use a range that focuses the data to return only Al-Poly filter images. +# This will cut the results down in half. + +query = Fido.search( + a.Time("2021-05-21 20:51:00", "2021-05-22 00:00:00"), + a.Instrument("xrt"), + a.Wavelength(4 * u.nm, 5 * u.nm), +) +print(query) + +############################################################################## +# Now we will download the data. +# As this is still over 60 files, this process can take some time. + +xrt_files = Fido.fetch(query) + +############################################################################## +# We can now load the data into a `~sunpy.map.MapSequence` and create a animation. + +xrt_seq = sunpy.map.Map(xrt_files, sequence=True) + +fig = plt.figure() +ax = fig.add_subplot(projection=xrt_seq.maps[0]) +ani = xrt_seq.plot( + axes=ax, norm=ImageNormalize(vmin=0, vmax=5e3, stretch=SqrtStretch()) +) + +############################################################################## +# You might notice that there is a jump in the sequence. +# The size of the data and the pointing changes. +# We can exclude these images by filtering the data further. + +xrt_seq_filtered_shape = sunpy.map.Map( + [m for m in xrt_seq if m.data.shape == (384, 384)], sequence=True +) + +fig = plt.figure() +ax = fig.add_subplot(projection=xrt_seq.maps[0]) +ani = xrt_seq_filtered_shape.plot( + axes=ax, norm=ImageNormalize(vmin=0, vmax=5e3, stretch=SqrtStretch()) +) + +############################################################################## +# In fact, `sunpy.map.Map` provides many attributes that can be used to filter the data. +# This provides a lot of flexibility in how you can filter the data for your science objective. +# +# For example, we can filter the data by the exposure time or the detector. + +xrt_seq_filtered_exp_time = sunpy.map.Map( + [m for m in xrt_seq_filtered_shape if m.exposure_time < 0.1 * u.s], sequence=True +) + +fig = plt.figure() +ax = fig.add_subplot(projection=xrt_seq.maps[0]) +ani = xrt_seq_filtered_exp_time.plot( + axes=ax, norm=ImageNormalize(vmin=0, vmax=5e3, stretch=SqrtStretch()) +) + +############################################################################## +# If you want to save this animation to a file, you can use the ``save`` method. +# For more information on how to use this method, `see the matplotlib documentation `__. + +plt.show() diff --git a/docs/gallery/instrument_response/README.txt b/docs/gallery/instrument_response/README.txt new file mode 100644 index 000000000..c22eab767 --- /dev/null +++ b/docs/gallery/instrument_response/README.txt @@ -0,0 +1,10 @@ +Instrument Response & Calibration +================================= + +These examples illustrate how to evaluate and interpret the XRT instrument's characteristics. + +You will learn to work with: +- The instrument's effective area across different filters +- Temperature response functions for solar plasma + +This section is helpful for understanding how the instrument "sees" the Sun at different temperatures. \ No newline at end of file diff --git a/docs/gallery/instrument_response/__init__.py b/docs/gallery/instrument_response/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/docs/gallery/instrument_response/channels.py b/docs/gallery/instrument_response/channels.py new file mode 100644 index 000000000..485fbfaba --- /dev/null +++ b/docs/gallery/instrument_response/channels.py @@ -0,0 +1,103 @@ +""" +============================= +Exploring XRT's Configuration +============================= + +This example explores the X-Ray Telescope (XRT) instrument properties +using XRTpy's `xrtpy.response.Channel`. It provides convenient methods and attributes +to access and analyze various aspects of the XRT instrument configuration. +""" + +import matplotlib.pyplot as plt + +import xrtpy + +############################################################################## +# We begin by defining a filter channel by its common abbreviation. +# In this example we will be exploring the titanium-on-polyimide filter. +# For detailed information about various filter channels and their characteristics, you can refer to :ref:`xrtpy-about-xrt-filters`. +# +# To explore the properties and characteristics of a defined filter channel, we will create a +# `xrtpy.response.Channel`. By passing in the filter name as an input, we can work +# with the properties associated with the titanium-on-polyimide filter. + +channel = xrtpy.response.Channel("Ti-poly") + +############################################################################## +# Now that we have created our channel, we can delve into the XRT instrument and its properties. +# We will start by examining basic information about the XRT instrument. + +print("Selected filter:", channel.name) +print("\nObservatory:", channel.observatory) +print("Instrument:", channel.instrument) + +############################################################################## +# It is important to note that most instrument properties of XRT remain the same +# regardless of the specific filter being used. This means that many characteristics +# and specifications of the XRT instrument, such as its dimensions, +# field of view, and detector properties, are independent of the selected filter. +# +# We can explore various characteristics of the the Charge-Coupled-Device (CCD) +# camera camera, such as its quantum efficiency and pixel size to list a few. + +print(channel.ccd.ccd_name) +print("\nPixel size: ", channel.ccd.ccd_pixel_size) +print("Full well: ", channel.ccd.ccd_full_well) +print("Gain left: ", channel.ccd.ccd_gain_left) +print("Gain right: ", channel.ccd.ccd_gain_right) +print("eV pre electron: ", channel.ccd.ccd_energy_per_electron) + +############################################################################## +# We can explore the XRT entrance filter properties utilizing ``entrancefilter``. + +print(channel.entrancefilter.entrancefilter_name) +print("Material: ", channel.entrancefilter.entrancefilter_material) +print("Thickness: ", channel.entrancefilter.entrancefilter_thickness) +print("Density: ", channel.entrancefilter.entrancefilter_density) + +############################################################################## +# XRT data is recorded through nine X-ray filters, which are implemented using two filter wheels. +# +# By utilizing the ``channel.filter_#`` notation, where ``#`` represents filter wheel 1 or 2, +# we can explore detailed information about the selected XRT channel filter. +# +# It's worth noting that sometimes the other filter will yield the result "Open," as it's not use. +# For more comprehensive information about the XRT filters, you can refer to :ref:`xrtpy-about-xrt-filters`. + +print("Filter Wheel:", channel.filter_2.filter_name) +print("\nFilter material:", channel.filter_2.filter_material) +print("Thickness: ", channel.filter_2.filter_thickness) +print("Density: ", channel.filter_2.filter_density) + +############################################################################## +# We can explore geometry factors in the XRT using ``geometry``. + +print(channel.geometry.geometry_name) +print("\nFocal length:", channel.geometry.geometry_focal_len) +print("Aperture Area:", channel.geometry.geometry_aperture_area) + +############################################################################## +# The XRT is equipped with two mirrors and We can access the properties of these +# mirrors using the ``channel_mirror_#`` notation, where ``#`` represents the +# first or second mirror surface. + +print(channel.mirror_1.mirror_name) +print("Material: ", channel.mirror_1.mirror_material) +print("Density: ", channel.mirror_1.mirror_density) +print("Graze_angle: ", channel.mirror_1.mirror_graze_angle) + +############################################################################## +# Finally we can explore the XRT transmission properties + +plt.figure() + +plt.plot(channel.wavelength, channel.transmission, label=f"{channel.name}") +plt.title(f"{channel.name} filter") +plt.xlabel(r"$\lambda$ [Å]") +plt.ylabel(r"Transmittance") +# The full range goes up to 400 Å, but we will limit it to 80 Å for better visualization +plt.xlim(0, 80) +plt.grid(color="lightgrey") +plt.tight_layout() + +plt.show() diff --git a/docs/gallery/instrument_response/effective_area.py b/docs/gallery/instrument_response/effective_area.py new file mode 100644 index 000000000..f2052cfd4 --- /dev/null +++ b/docs/gallery/instrument_response/effective_area.py @@ -0,0 +1,75 @@ +""" +======================= +Effective Area Analysis +======================= + +In this example, we will explore the effective areas for different XRT filter channels. +Understanding the effective areas is important for accurately interpreting and quantifying the data. +""" + +import matplotlib.pyplot as plt + +import xrtpy + +############################################################################## +# Let us begin by defining a filter channel using its abbreviation. +# For example, if we want to explore the effective area for an aluminum-on-polyimide filter +# channel, we need to specify the relevant abbreviation. + +xrt_filter = "Al-poly" + +############################################################################## +# `~.EffectiveAreaFundamental` allows us to accurately determine the effective area +# based on the specified filter channel, date, and time. + +date_time = "2023-09-22T22:59:59" +eaf = xrtpy.response.EffectiveAreaFundamental(xrt_filter, date_time) + +############################################################################## +# To actually calculate the effective area function we can call :meth:`~xrtpy.response.EffectiveAreaFundamental.effective_area`. + +effective_area = eaf.effective_area() +print("Effective Area:\n", effective_area) + +############################################################################## +# Differences overtime arise from an increase of the contamination layer on the +# CCD which blocks some of the X-rays thus reducing the effective area. +# For detailed information about the calculation of the XRT CCD contaminant layer thickness, +# you can refer to +# `Montana State University `__. +# +# Additional information is provided by +# `Narukage et. al. (2011) `__. + +relative_launch_date_time = "2006-09-22T22:59:59" +eaf_launch = xrtpy.response.EffectiveAreaFundamental( + xrt_filter, relative_launch_date_time +) +launch_effective_area = eaf_launch.effective_area() + +############################################################################## +# Finally, we can plot how the effective area has changed over time. + +plt.figure() + +plt.plot( + eaf.wavelength, + effective_area, + label=f"{date_time}", +) +plt.plot( + eaf.wavelength, + launch_effective_area, + label=f"{relative_launch_date_time}", +) + +plt.title("XRT Effective Area - Al-Poly") +plt.xlabel("Wavelength (Å)") +plt.ylabel("Effective Area ($cm^{2}$)") +plt.legend() +plt.xlim(0, 60) + +plt.grid(color="lightgrey") +plt.tight_layout() + +plt.show() diff --git a/docs/gallery/instrument_response/temperature_response.py b/docs/gallery/instrument_response/temperature_response.py new file mode 100644 index 000000000..031d5153c --- /dev/null +++ b/docs/gallery/instrument_response/temperature_response.py @@ -0,0 +1,88 @@ +""" +==================== +Temperature Response +==================== + +In this example, we will explore the temperature response of the filters on XRT. +The temperature response provides important information on how XRT responds to +the different temperatures of X-ray emissions. +""" + +import matplotlib.pyplot as plt +import numpy as np + +import xrtpy + +############################################################################## +# A filter channel is defined by its common abbreviation, which represents +# a specific type of filter used to modify the X-ray radiation observed. +# In this example, we will explore the carbon-on-polyimide filter (abbreviated as "C-poly"). + +xrt_filter = "C-poly" + +############################################################################## +# `~.TemperatureResponseFundamental` provides the functions and properties for +# calculating the temperature response. + +date_time = "2023-09-22T21:59:59" +tpf = xrtpy.response.TemperatureResponseFundamental( + xrt_filter, date_time, abundance_model="Photospheric" +) + +############################################################################## +# To calculate the temperature response,we can do the following: + +temperature_response = tpf.temperature_response() +print("Temperature Response:\n", temperature_response) + +############################################################################## +# We will now visualize the temperature response function using CHIANTI. +# These temperatures are of the plasma and are independent of the channel filter. +# +# We use the log of the these temperatures, to enhance the visibility of the +# variations at lower temperatures. + +chianti_temperature = np.log10(tpf.CHIANTI_temperature.to_value()) + +############################################################################## +# Differences overtime arise from an increase of the contamination layer on the +# CCD which blocks some of the X-rays thus reducing the effective area. +# For detailed information about the calculation of the XRT CCD contaminant layer thickness, +# you can refer to +# `Montana State University `__. +# +# Additional information is provided by +# `Narukage et. al. (2011) `__. + + +launch_datetime = "2006-09-22T23:59:59" + +launch_temperature_response = xrtpy.response.TemperatureResponseFundamental( + xrt_filter, launch_datetime, abundance_model="Photospheric" +).temperature_response() + +############################################################################## +# Now we can plot the temperature response versus the log of the CHIANTI temperature +# and compare the results for the launch date and the chosen date. + +plt.figure() + +plt.plot( + chianti_temperature, + np.log10(temperature_response.value), + label=f"{date_time}", +) +plt.plot( + chianti_temperature, + np.log10(launch_temperature_response.value), + label=f"{launch_datetime}", + color="red", +) + +plt.title("XRT Temperature Response") +plt.xlabel("Log(T) ($K$)") +plt.ylabel("$DN$ $cm^5$ $ s^-1$ $pix^-1$") +plt.legend() +plt.grid() + +plt.show() diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 8f3cae3cf..d6af2ed45 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -7,7 +7,7 @@ on board the Hinode spacecraft. This page is intended for new users of XRTpy. Fo please refer to the `SolarSoft XRT Analysis Guide`_. XRTpy Objects -************* +------------- XRTpy currently provides access to the following core classes: @@ -24,41 +24,41 @@ It also includes functionality to: Visit our Examples page for step-by-step Jupyter notebook guides on how to use each feature. Channel -------- +^^^^^^^ The ``Channel`` class describes the configuration of a specific XRT filter channel. It includes details for the Charge-Coupled Device (CCD), Entrance Filter, Focal Plane Filters, Geometry, and Mirrors. Effective Area --------------- +^^^^^^^^^^^^^^ XRTpy calculates the effective area for each XRT filter channel, accounting for time-dependent contamination on the CCD. For more details, refer to the `SolarSoft XRT Analysis Guide`_. Temperature Response --------------------- +^^^^^^^^^^^^^^^^^^^^ XRTpy calculates the temperature response of XRT filter channels using the CHIANTI_ atomic database (version 10.0) and coronal abundances (:cite:t:`feldman:1992`). This produces a response function as a function of temperature, using an assumed emission model (:cite:t:`narukage:2011`, :cite:t:`narukage:2014`). Deriving Temperature and Emission Measure ------------------------------------------ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``temperature_from_filter_ratio`` function allows you to derive plasma temperature and emission measure from a pair of XRT images using the filter-ratio method. This mirrors the logic in the SolarSoft IDL routine of the same name. A usage example is available in the Examples section. Image Deconvolution with the PSF --------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``deconvolve`` function applies image deconvolution using the instrument's Point Spread Function (PSF) to sharpen XRT images. This is especially useful for recovering detail around bright or sharp solar structures. Light Leak Correction ---------------------- +^^^^^^^^^^^^^^^^^^^^^ The ``remove_lightleak`` function subtracts visible stray light from XRT synoptic composite images. This correction improves the quality of long-term coronal evolution studies. See our Examples section for how to use this function. Abundance Model Options ------------------------ +^^^^^^^^^^^^^^^^^^^^^^^ By default, XRTpy uses CHIANTI coronal abundances (:cite:t:`feldman:1992`). You may also choose: @@ -72,7 +72,9 @@ To use a different abundance model: from xrtpy.response import TemperatureResponseFundamental TemperatureResponseFundamental( - "Al-poly", "2022-07-04T23:43:12", abundance_model="hybrid" + "Al-poly", + "2022-07-04T23:43:12", + abundance_model="hybrid" ) You may also pass the ``abundance_model`` keyword to ``temperature_from_filter_ratio``. @@ -83,12 +85,12 @@ You may also pass the ``abundance_model`` keyword to ``temperature_from_filter_r Tools -***** +----- The ``xrtpy.response.tools`` module includes helpful utility functions to streamline workflows. It includes the following: Generate Temperature Responses ------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Use the ``generate_temperature_responses`` tool to compute the temperature response for one or more filters — including combinations like ``"Al-poly/Ti-poly"`` — with a single command. @@ -101,15 +103,15 @@ This function returns a list of ``TemperatureResponseFundamental`` objects, one from xrtpy.response.tools import generate_temperature_responses responses = generate_temperature_responses( - filters=["Al-poly", "Be-thick", "Al-poly/Ti-poly"], - obs_date="2020-07-04T00:00:00", - abundance="Hybrid", + filters=["Al-poly", "Be-thick", "Al-poly/Ti-poly"], + obs_date="2020-07-04T00:00:00", + abundance="Hybrid", ) for resp in responses: - print(f"Filter: {resp.filter_name}") - print(f" Temperatures: {resp.temperature[:3]}") - print(f" Response: {resp.response[:3]}") + print(f"Filter: {resp.filter_name}") + print(f" Temperatures: {resp.temperature[:3]}") + print(f" Response: {resp.response[:3]}") **Example Output:** @@ -117,16 +119,16 @@ This function returns a list of ``TemperatureResponseFundamental`` objects, one .. code-block:: text Filter: Al-poly - Temperatures: [100000. 112201.9 125892.516] K - Response: [8.34e-31 1.07e-30 1.53e-30] cm5 DN / (pix s) + Temperatures: [100000. 112201.9 125892.516] K + Response: [8.34e-31 1.07e-30 1.53e-30] cm5 DN / (pix s) Filter: Be-thick - Temperatures: [100000. 112201.9 125892.516] K - Response: [0.00e+00 1.73e-94 2.43e-84] cm5 DN / (pix s) + Temperatures: [100000. 112201.9 125892.516] K + Response: [0.00e+00 1.73e-94 2.43e-84] cm5 DN / (pix s) Filter: Al-poly/Ti-poly - Temperatures: [100000. 112201.9 125892.516] K - Response: [5.34e-34 7.24e-34 1.11e-33] cm5 DN / (pix s) + Temperatures: [100000. 112201.9 125892.516] K + Response: [5.34e-34 7.24e-34 1.11e-33] cm5 DN / (pix s) Each response object has the following attributes: @@ -138,7 +140,7 @@ This tool is useful on its own, but it also serves as a foundation for upcoming Data Products -************* +------------- XRT data products are available through the XRT website. These include: @@ -148,7 +150,7 @@ XRT data products are available through the XRT website. These include: For more information, visit the `XRT data products`_ page. Double Filter Combinations -************************** +-------------------------- XRTpy now supports double filter combinations such as ``"Al-poly/Ti-poly"`` in both the :obj:`EffectiveAreaFundamental ` and :obj:`TemperatureResponseFundamental ` classes. @@ -159,10 +161,10 @@ XRTpy now supports double filter combinations such as ``"Al-poly/Ti-poly"`` in b from xrtpy.response import EffectiveAreaFundamental, TemperatureResponseFundamental eff_area = EffectiveAreaFundamental( - "Al-poly/Ti-poly", "2020-08-17T09:00:00", abundance_model="photospheric" + "Al-poly/Ti-poly", "2020-08-17T09:00:00", abundance_model="photospheric" ) temp_resp = TemperatureResponseFundamental( - "C-poly/Ti-poly", "2025-07-10T12:00:00", abundance_model="coronal" + "C-poly/Ti-poly", "2025-07-10T12:00:00", abundance_model="coronal" ) The following combinations are currently supported: @@ -178,7 +180,7 @@ The following combinations are currently supported: X-Ray Filter Channels -********************* +--------------------- XRT uses two filter wheels to configure the imaging filter channel. Each wheel includes several filters and an open slot: @@ -204,12 +206,6 @@ Filter Wheel 2: Filter names in XRTpy are passed as strings like ``'Ti-poly'``. -References -********** - -Velasquez, J., Murphy, N., Reeves, K. K., Slavin, J., Weber, M., & Barnes, W. (2024). -*XRTpy: A Hinode-X-Ray Telescope Python Package*. JOSS, 9(100), 6396. -https://doi.org/10.21105/joss.06396 .. _CHIANTI: https://www.chiantidatabase.org/chianti_database_history.html .. _SolarSoft XRT Analysis Guide: https://xrt.cfa.harvard.edu/resources/documents/XAG/XAG.pdf diff --git a/docs/index.rst b/docs/index.rst index b2812ead1..bc56f7ed6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -21,6 +21,7 @@ Whether you're conducting research or just beginning to explore the world of X-r about_xrt install getting_started + dem_overview generated/gallery/index reference/index acknowledging_xrtpy @@ -30,3 +31,10 @@ Whether you're conducting research or just beginning to explore the world of X-r contributing code_of_conduct changelog/index + +Published Work +-------------- + +The following paper describes the XRTpy package and its initial release- v0.4.0: + +:cite:p:`velasquez:2024` From 9a0fb646a1d79ee3e93ea77edff9240b5c57cabf Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 12 Sep 2025 16:06:11 -0400 Subject: [PATCH 063/121] WIP: local changes in dem_solver.py --- xrtpy/xrt_dem_iterative/dem_solver.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 1136b0b74..8fea311e5 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -23,12 +23,17 @@ class XRTDEMIterative: Parameters ---------- - observed_channel : str or list of str + observed_channel : str or list of str (required) Filter names used in the observation (e.g., 'Al-mesh', 'Be-thin'). - observed_intensities : array-like - Observed intensities in DN/s/pix for each channel. - temperature_responses : list + Must match the provided temperature responses. + observed_intensities : array-like (required) + Observed intensities for each channel. + Units = DN/s/pix. + temperature_responses : list (required) List of `TemperatureResponseFundamental` objects matching the filters. + Units = DN s^-1 pix^-1 EM^-1. + Can be generated using `xrtpy.response.tools.generate_temperature_responses` + for one or more filters. See: https://xrtpy.readthedocs.io/en/latest/getting_started.html intensity_errors : array-like, optional Intensity uncertainties. If None, will use a model-based estimate. min_T : float @@ -241,7 +246,8 @@ def validate_inputs(self) -> None: # 6) grid range inside every response for r in self.responses: - logT_grid = np.log10(r.temperature.value) + #logT_grid = np√.log10(r.temperature.value) + logT_grid = np.log10(r.temperature.to_value(u.K)) if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): raise ValueError( f"The specified temperature range [{self._min_T}, {self._max_T}] " From e648860a5a5eedd45416e9a822f0123bcf370099 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 15 Sep 2025 11:38:56 -0400 Subject: [PATCH 064/121] Trying to fix git doc 3.13 failing test --- docs/dem_overview.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index a74394856..02834e4ef 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -27,7 +27,7 @@ Why DEM? DEM in XRTpy ------------ XRTpy provides a Python implementation of the iterative spline-fitting method -(originally available in IDL as `xrt_dem_iterative2.pro`). +(originally available in IDL as ``xrt_dem_iterative2.pro``). The DEM workflow requires three main inputs, each with specific type, shape, and units: @@ -41,10 +41,10 @@ The DEM workflow requires three main inputs, each with specific type, shape, and - Description: Measured intensities corresponding to each filter. 3. **Temperature response functions** - - Type: ``list`` of :class:`~xrtpy.response.fundamentals.TemperatureResponseFundamental` + - Type: ``list`` of :class:`xrtpy.response.TemperatureResponseFundamental` - Units: DN s\ :sup:`-1` pix\ :sup:`-1` EM\ :sup:`-1` - Description: Instrument temperature responses matching the filters. - These can be generated with :func:`xrtpy.response.tools.generate_temperature_responses`. + These can be generated with :func:`xrtpy.response.tools.generate_temperature_responses` See :doc:`getting_started` for details. Example @@ -165,8 +165,8 @@ References - Golub, L., et al. (2004), *Solar Physics*, 243, 63. :cite:p:`golub:2004` - Weber, M. A., et al. (2004), *ApJ*, 605, 528. :cite:p:`weber:2004`. -Next Steps ----------- -- See :ref:`API Reference ` for details on - ``XRTDEMIterative``. Coming soon. -- Explore example notebooks in the `examples/` directory. Coming soon. +.. Next Steps +.. ---------- +.. - See :ref:`API Reference ` for details on +.. ``XRTDEMIterative``. Coming soon. +.. - Explore example notebooks in the `examples/` directory. Coming soon. From fd4e8e19c685ec697414a6dfc60f962e418dcfc8 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 15 Sep 2025 11:45:24 -0400 Subject: [PATCH 065/121] fixing - generate_temperature_responses reference --- docs/dem_overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index 02834e4ef..0acd9bc3c 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -44,7 +44,7 @@ The DEM workflow requires three main inputs, each with specific type, shape, and - Type: ``list`` of :class:`xrtpy.response.TemperatureResponseFundamental` - Units: DN s\ :sup:`-1` pix\ :sup:`-1` EM\ :sup:`-1` - Description: Instrument temperature responses matching the filters. - These can be generated with :func:`xrtpy.response.tools.generate_temperature_responses` + These can be generated with ``xrtpy.response.tools.generate_temperature_responses`` See :doc:`getting_started` for details. Example From f3c10f62f31359c13d6ae18aa0e02944ef2e5c59 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 16 Sep 2025 16:33:30 -0400 Subject: [PATCH 066/121] Cleaning function --- xrtpy/xrt_dem_iterative/dem_solver.py | 49 +++++++++++++-------------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 8fea311e5..5d9815e4c 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -13,9 +13,6 @@ from xrtpy.util.filters import validate_and_format_filters -# DEM specific - - class XRTDEMIterative: """ Estimate the differential emission measure (DEM) from Hinode/XRT data @@ -105,20 +102,7 @@ def __init__( self._intensity_errors = np.asarray(intensity_errors, dtype=float) else: self._intensity_errors = None - - # if intensity_errors is not None: - # self._intensity_errors = np.asarray(intensity_errors, dtype=float) - # if self._intensity_errors.shape != self._observed_intensities.shape: - # raise ValueError( - # "Length of intensity_errors must match observed_intensities." - # ) - # if not np.all(np.isfinite(self._intensity_errors)) or np.any( - # self._intensity_errors < 0 - # ): - # raise ValueError("`intensity_errors` must be finite and >= 0.") - # else: - # self._intensity_errors = None # Will be computed later - + # Store temperature grid parameters self._dT = float(dT) self._min_T = float(min_T) @@ -282,12 +266,9 @@ def __repr__(self): # """ # return self._name + ####################################################################################################################################### @property - def observed_intensities( - self, - ) -> ( - u.Quantity - ): # Add method to account for known values not worth observed_intensities + def observed_intensities(self,) -> ( u.Quantity): # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. Returns @@ -380,7 +361,7 @@ def intensity_errors(self) -> u.Quantity: "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", UserWarning, ) - self._using_estimated_errors = False # suppress future warnings + self._using_estimated_errors = True # suppress future warnings estimated = np.maximum( self.relative_error * self._observed_intensities, @@ -417,16 +398,32 @@ def max_iterations(self): def create_logT_grid(self): """ Build the DEM temperature grid *exactly* from min to max in steps of dT. + Construct the temperature grid for DEM calculations. + + This builds a regularly spaced grid in log10(temperature), then converts it + to linear temperature for use in the DEM integral. + + Notes + ----- + - IDL's `xrt_dem_iterative2.pro` and the DEM_Solver PDF documentation + describe this as the "regular logT grid". + - Two forms of the temperature grid are stored: + * self.logT : log10(T) values (dimensionless) + * self.T : linear temperatures (Kelvin, astropy.units.Quantity) + - The grid is inclusive of both `min_T` and `max_T`, with step size `dT`. + + """ + #xrt_dem_iter_solver.pro n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 # self.logT = np.linspace(self._min_T, self._max_T, n_bins) #28 # self.T = (10**self.logT) * u.K #28 # self.dlogT = self._dT # dimensionless - convenience-27 # self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals - # inclusive grid with float-safe endpoint - self.logT = np.arange(self._min_T, self._max_T + self._dT / 2.0, self._dT) - self.T = (10.0**self.logT) * u.K + # inclusive grid with float-safe endpoint + self.logT = np.arange(self._min_T, self._max_T + self._dT / 2.0, self._dT) #Defines grid in log space - dimensionless + self.T = (10.0**self.logT) * u.K #Converts to linear temperatur # scalar spacing (dimensionless) self.dlogT = float(self._dT) From 81161d52f538ab9ccbefa44c4fd5fc1b8294672c Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 16 Sep 2025 16:56:54 -0400 Subject: [PATCH 067/121] Clean and updated - create_logT_grid --- xrtpy/xrt_dem_iterative/dem_solver.py | 47 +++++++++++++-------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 5d9815e4c..4121a43db 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -394,42 +394,41 @@ def max_iterations(self): (e.g., when using `lmfit.minimize`). Default is 2000. """ return self._max_iterations - + def create_logT_grid(self): """ - Build the DEM temperature grid *exactly* from min to max in steps of dT. - Construct the temperature grid for DEM calculations. - + Construct the regular log10 temperature grid for DEM calculations. + This builds a regularly spaced grid in log10(temperature), then converts it to linear temperature for use in the DEM integral. - + Notes ----- - - IDL's `xrt_dem_iterative2.pro` and the DEM_Solver PDF documentation - describe this as the "regular logT grid". + - IDL's `xrt_dem_iterative2.pro` describes this as the "regular logT grid". - Two forms of the temperature grid are stored: * self.logT : log10(T) values (dimensionless) * self.T : linear temperatures (Kelvin, astropy.units.Quantity) - The grid is inclusive of both `min_T` and `max_T`, with step size `dT`. - - + + Additional attributes created: + - self.dlogT : float + Step size in log10(T) (dimensionless). + - self.dlnT : float + Step size in natural log(T). Useful for IDL-style integrals of the form: + F = int. DEM(T) * R(T) * T d(ln T) """ - #xrt_dem_iter_solver.pro + # number of bins including endpoints n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 - # self.logT = np.linspace(self._min_T, self._max_T, n_bins) #28 - # self.T = (10**self.logT) * u.K #28 - # self.dlogT = self._dT # dimensionless - convenience-27 - # self.dlnT = np.log(10.0) * self.dlogT # needed for IDL-style integrals - - # inclusive grid with float-safe endpoint - self.logT = np.arange(self._min_T, self._max_T + self._dT / 2.0, self._dT) #Defines grid in log space - dimensionless - self.T = (10.0**self.logT) * u.K #Converts to linear temperatur - - # scalar spacing (dimensionless) - self.dlogT = float(self._dT) - self.dlnT = ( - np.log(10.0) * self.dlogT - ) # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” + + # inclusive logT grid (IDL-style regular grid) + self.logT = np.linspace(self._min_T, self._max_T, n_bins) + + # linear temperature grid in Kelvin + self.T = (10.0 ** self.logT) * u.K + + self.dlogT = float(self._dT) # scalar spacing (dimensionless and natural-log equivalent) + self.dlnT = np.log(10.0) * self.dlogT # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” + def _interpolate_responses_to_grid(self): """ From ca85d4c16e29c2eea29dbbf63ebff7654bbd5f6f Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 16 Sep 2025 17:31:34 -0400 Subject: [PATCH 068/121] Update two functions --- xrtpy/xrt_dem_iterative/dem_solver.py | 88 +++++++++++++++++++++------ 1 file changed, 69 insertions(+), 19 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 4121a43db..e82009ea0 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -185,6 +185,7 @@ def __init__( raise ValueError("solv_factor must be a positive number.") except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") + #### TEST GIT CI TEST ##### @@ -432,9 +433,41 @@ def create_logT_grid(self): def _interpolate_responses_to_grid(self): """ - Interpolate each filter's temperature response onto self.logT (log10 K). - Stores a dense matrix with shape (n_filters, n_temperatures), unitless numeric. + Interpolate each filter's temperature response onto the common logT grid. + + This prepares the response matrix (`_response_matrix`) used in DEM fitting. + Each filter's original response (on its native temperature grid) is + interpolated onto `self.logT`, the regular log10 temperature grid built + by `create_logT_grid`. + + Notes + ----- + - In IDL (`xrt_dem_iterative2.pro`) and the DEM_Solver PDF documentation, + this corresponds to constructing `Res_Mat`, where each row represents + one filter's response on the shared logT grid. + - Extrapolation beyond the original response grid is set to 0.0 + (same behavior as IDL). + - Units are preserved during interpolation: + DN s⁻¹ pix⁻¹ cm⁵ (per emission measure). + - Shape of `_response_matrix`: + (n_filters, n_temperatures) + + Attributes Created + ------------------ + interpolated_responses : list of ndarray + Individual interpolated response arrays (debugging convenience). + _response_matrix : ndarray + Final stacked response matrix with shape (n_filters, n_temperatures). + + Raises + ------ + AttributeError + If `create_logT_grid()` has not been called before this method. + RuntimeError + If the interpolated matrix shape does not match expectations. + """ + #In IDL, Res_LogTemp_arr must exist before responses can be interpolated if not hasattr(self, "logT"): raise AttributeError( "Temperature grid missing. Call create_logT_grid() first." @@ -1004,25 +1037,42 @@ def solve(self): return result + def summary(self): - print("XRTpy DEM Iterative Setup Summary") - print("-" * 40) + print("\nXRTpy DEM Iterative Setup Summary\n") + print("-" * 50) print(f" Filters: {self.filter_names}") print(f" Obs Intensities: {self.observed_intensities}") - print(f" Number of observations (Nobs): {len(self._observed_intensities)}") - print(f" Solver Normalization Factor: {self.solv_factor:.1e}") - print( - f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}" - ) - print(f" Max Iterations: {self.max_iterations}") + print(f" Number of obs: {len(self._observed_intensities)}") print(f" Intensity Errors: {self.intensity_errors}") - print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") + print(f" Error model used: {'User-provided' if self._intensity_errors is not None else f'Auto (obs * {self.relative_error}, min={self.min_error} DN/s)'}") + print(f" Temp Grid: logT {self.min_T}–{self.max_T}, step {self.dT}") print(f" Temp bins: {len(self.logT)}") - print( - f" Error model used: {'User-provided' if self._intensity_errors is not None else 'Auto (obs * 0.03, min=2 DN/s)'}" - ) - if self._intensity_errors is None: - print( - "For more info: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro" - ) - print("-" * 40) + print(f" Solver factor: {self.solv_factor:.1e}") + print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") + print(f" Max Iterations: {self.max_iterations}") + print("-" * 50) + + + # def summary(self): + # print("XRTpy DEM Iterative Setup Summary") + # print("-" * 40) + # print(f" Filters: {self.filter_names}") + # print(f" Obs Intensities: {self.observed_intensities}") + # print(f" Number of observations (Nobs): {len(self._observed_intensities)}") + # print(f" Solver Normalization Factor: {self.solv_factor:.1e}") + # print( + # f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}" + # ) + # print(f" Max Iterations: {self.max_iterations}") + # print(f" Intensity Errors: {self.intensity_errors}") + # print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") + # print(f" Temp bins: {len(self.logT)}") + # print( + # f" Error model used: {'User-provided' if self._intensity_errors is not None else 'Auto (obs * 0.03, min=2 DN/s)'}" + # ) + # if self._intensity_errors is None: + # print( + # "For more info: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro" + # ) + # print("-" * 40) From 8d6d3c5d9a6469e706d35d37a3a3e140e077d9c0 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 17 Sep 2025 11:44:44 -0400 Subject: [PATCH 069/121] Removing_build_response_matrix function --- xrtpy/xrt_dem_iterative/dem_solver.py | 203 +++++++++++++++----------- 1 file changed, 115 insertions(+), 88 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index e82009ea0..08cb48a83 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -431,130 +431,157 @@ def create_logT_grid(self): self.dlnT = np.log(10.0) * self.dlogT # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” + # def _interpolate_responses_to_grid(self): + # """ + # Interpolate each filter's temperature response onto the common logT grid. + + # This prepares the response matrix (`_response_matrix`) used in DEM fitting. + # Each filter's original response (on its native temperature grid) is + # interpolated onto `self.logT`, the regular log10 temperature grid built + # by `create_logT_grid`. + + # Notes + # ----- + # - In IDL (`xrt_dem_iterative2.pro`) and the DEM_Solver PDF documentation, + # this corresponds to constructing `Res_Mat`, where each row represents + # one filter's response on the shared logT grid. + # - Extrapolation beyond the original response grid is set to 0.0 + # (same behavior as IDL). + # - Units are preserved during interpolation: + # DN s⁻¹ pix⁻¹ cm⁵ (per emission measure). + # - Shape of `_response_matrix`: + # (n_filters, n_temperatures) + + # Attributes Created + # ------------------ + # interpolated_responses : list of ndarray + # Individual interpolated response arrays (debugging convenience). + # _response_matrix : ndarray + # Final stacked response matrix with shape (n_filters, n_temperatures). + + # Raises + # ------ + # AttributeError + # If `create_logT_grid()` has not been called before this method. + # RuntimeError + # If the interpolated matrix shape does not match expectations. + + # """ + # #In IDL, Res_LogTemp_arr must exist before responses can be interpolated + # if not hasattr(self, "logT"): + # raise AttributeError( + # "Temperature grid missing. Call create_logT_grid() first." + # ) + + # rows = [] + # for i, (T_orig, R_orig, fname) in enumerate( + # zip(self.response_temperatures, self.response_values, self.filter_names, strict=False) + # ): + # logT_orig = np.log10(T_orig.to_value(u.K)) + # # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) + # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) + + # # Remove later + # print(f"→ Channel {i}: {fname}") + # print( + # f" logT_orig.shape = {logT_orig.shape}, response_vals.shape = {response_vals.shape}" + # ) + # print( + # f" logT range: {logT_orig.min():.2f}–{logT_orig.max():.2f}, grid: {self.logT.min():.2f}–{self.logT.max():.2f}" + # ) + + # try: + # interp_func = interp1d( + # logT_orig, + # response_vals, + # kind="linear", + # bounds_error=False, + # fill_value=0.0, + # assume_sorted=True, + # ) + # interp_row = interp_func(self.logT) + # print(f" Interpolated length: {len(interp_row)}") + # rows.append(interp_row) + # except Exception as e: + # print(f" Interpolation failed: {e}") + # raise + + # self.interpolated_responses = rows + # self._response_matrix = np.vstack(rows) + + # if self._response_matrix.shape != (len(self.responses), self.logT.size): + # raise RuntimeError("Interpolated response matrix has unexpected shape.") + def _interpolate_responses_to_grid(self): """ - Interpolate each filter's temperature response onto the common logT grid. + Interpolate all filter responses onto the common logT grid and build + the response matrix. - This prepares the response matrix (`_response_matrix`) used in DEM fitting. - Each filter's original response (on its native temperature grid) is - interpolated onto `self.logT`, the regular log10 temperature grid built - by `create_logT_grid`. + Equivalent to constructing `Res_Mat` in IDL's `xrt_dem_iterative2.pro` + and in the DEM_Solver PDF documentation. Notes ----- - - In IDL (`xrt_dem_iterative2.pro`) and the DEM_Solver PDF documentation, - this corresponds to constructing `Res_Mat`, where each row represents - one filter's response on the shared logT grid. - - Extrapolation beyond the original response grid is set to 0.0 - (same behavior as IDL). - - Units are preserved during interpolation: - DN s⁻¹ pix⁻¹ cm⁵ (per emission measure). - - Shape of `_response_matrix`: - (n_filters, n_temperatures) + - Each filter's response is interpolated to `self.logT` (regular log10 grid). + - Extrapolation beyond the native response grid is set to 0.0. + - Units: DN s⁻¹ pix⁻¹ cm⁵ (per emission measure). + - Shape of `_response_matrix`: (n_filters, n_temperatures) + Rows = filters, Columns = temperature bins. Attributes Created ------------------ interpolated_responses : list of ndarray - Individual interpolated response arrays (debugging convenience). + Interpolated response arrays for each filter. _response_matrix : ndarray - Final stacked response matrix with shape (n_filters, n_temperatures). - - Raises - ------ - AttributeError - If `create_logT_grid()` has not been called before this method. - RuntimeError - If the interpolated matrix shape does not match expectations. - + Final stacked matrix (n_filters x n_temperatures). """ - #In IDL, Res_LogTemp_arr must exist before responses can be interpolated if not hasattr(self, "logT"): - raise AttributeError( - "Temperature grid missing. Call create_logT_grid() first." - ) + raise AttributeError("Temperature grid missing. Call create_logT_grid() first.") rows = [] - for i, (T_orig, R_orig, fname) in enumerate( - zip(self.response_temperatures, self.response_values, self.filter_names, strict=False) - ): + for T_orig, R_orig in zip(self.response_temperatures, self.response_values): logT_orig = np.log10(T_orig.to_value(u.K)) - # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) - # Remove later - print(f"→ Channel {i}: {fname}") - print( - f" logT_orig.shape = {logT_orig.shape}, response_vals.shape = {response_vals.shape}" - ) - print( - f" logT range: {logT_orig.min():.2f}–{logT_orig.max():.2f}, grid: {self.logT.min():.2f}–{self.logT.max():.2f}" + interp_func = interp1d( + logT_orig, + response_vals, + kind="linear", + bounds_error=False, + fill_value=0.0, + assume_sorted=True, ) - - try: - interp_func = interp1d( - logT_orig, - response_vals, - kind="linear", - bounds_error=False, - fill_value=0.0, - assume_sorted=True, - ) - interp_row = interp_func(self.logT) - print(f" Interpolated length: {len(interp_row)}") - rows.append(interp_row) - except Exception as e: - print(f" Interpolation failed: {e}") - raise + rows.append(interp_func(self.logT)) self.interpolated_responses = rows - self._response_matrix = np.vstack(rows) + self._response_matrix = np.vstack(rows).astype(float) + # Quick sanity check if self._response_matrix.shape != (len(self.responses), self.logT.size): raise RuntimeError("Interpolated response matrix has unexpected shape.") + @property def response_matrix(self): """ - Returns the response matrix after interpolation. + Response matrix (n_filters x n_temperatures) after interpolation. - Shape: (n_filters, n_temperatures) + Units: DN s^-1 pix^-1 cm⁵ per emission measure. + + Equivalent to `Res_Mat` in IDL's `xrt_dem_iterative2.pro`. + + Raises + ------ + AttributeError + If `_interpolate_responses_to_grid()` has not been called yet. """ if not hasattr(self, "_response_matrix"): raise AttributeError( - "Response matrix has not been built . Call _build_response_matrix() first." + "Response matrix not available. Call _interpolate_responses_to_grid() first." ) return self._response_matrix - def _build_response_matrix(self): - """ - Builds the response matrix from interpolated responses. - - Sets: - ------- - self.response_matrix : ndarray - 2D array of shape (n_filters, n_temperatures) - Stack your self.interpolated_responses into a 2D NumPy array - - Personal notes: The response matrix is a 2D array that relates temperature to observed intensity - For numerical DEM: - -You approximate the integral as a matrix multiplication - -Each filter contributes one equation (row) - -Each temperature bin contributes one unknown (column) - - Intergal DEM(T) * R(T) dT = sum[DEM_i * R_i * dT] - """ - if not hasattr(self, "interpolated_responses"): - raise RuntimeError( - "Call _interpolate_responses_to_grid() before building the response matrix." - ) - - # self._response_matrix = np.vstack(self.interpolated_responses) # matrix - self._response_matrix = np.vstack(self.interpolated_responses).astype( - float - ) # matrix - this is what IDL does when stacking per-filter response vectors. - print( - f"Built response matrix: shape = {self._response_matrix.shape} (filters * logT bins)" - ) def _estimate_initial_dem( self, smooth=False, logscale=False, plot=True From 2b4f7613ab8551f2d9a35837fc3c43e148206470 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 17 Sep 2025 11:46:30 -0400 Subject: [PATCH 070/121] Removing older version of -_interpolate_responses_to_grid --- xrtpy/xrt_dem_iterative/dem_solver.py | 83 +-------------------------- 1 file changed, 1 insertion(+), 82 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 08cb48a83..8f6eb9987 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -431,87 +431,6 @@ def create_logT_grid(self): self.dlnT = np.log(10.0) * self.dlogT # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” - # def _interpolate_responses_to_grid(self): - # """ - # Interpolate each filter's temperature response onto the common logT grid. - - # This prepares the response matrix (`_response_matrix`) used in DEM fitting. - # Each filter's original response (on its native temperature grid) is - # interpolated onto `self.logT`, the regular log10 temperature grid built - # by `create_logT_grid`. - - # Notes - # ----- - # - In IDL (`xrt_dem_iterative2.pro`) and the DEM_Solver PDF documentation, - # this corresponds to constructing `Res_Mat`, where each row represents - # one filter's response on the shared logT grid. - # - Extrapolation beyond the original response grid is set to 0.0 - # (same behavior as IDL). - # - Units are preserved during interpolation: - # DN s⁻¹ pix⁻¹ cm⁵ (per emission measure). - # - Shape of `_response_matrix`: - # (n_filters, n_temperatures) - - # Attributes Created - # ------------------ - # interpolated_responses : list of ndarray - # Individual interpolated response arrays (debugging convenience). - # _response_matrix : ndarray - # Final stacked response matrix with shape (n_filters, n_temperatures). - - # Raises - # ------ - # AttributeError - # If `create_logT_grid()` has not been called before this method. - # RuntimeError - # If the interpolated matrix shape does not match expectations. - - # """ - # #In IDL, Res_LogTemp_arr must exist before responses can be interpolated - # if not hasattr(self, "logT"): - # raise AttributeError( - # "Temperature grid missing. Call create_logT_grid() first." - # ) - - # rows = [] - # for i, (T_orig, R_orig, fname) in enumerate( - # zip(self.response_temperatures, self.response_values, self.filter_names, strict=False) - # ): - # logT_orig = np.log10(T_orig.to_value(u.K)) - # # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) - # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) - - # # Remove later - # print(f"→ Channel {i}: {fname}") - # print( - # f" logT_orig.shape = {logT_orig.shape}, response_vals.shape = {response_vals.shape}" - # ) - # print( - # f" logT range: {logT_orig.min():.2f}–{logT_orig.max():.2f}, grid: {self.logT.min():.2f}–{self.logT.max():.2f}" - # ) - - # try: - # interp_func = interp1d( - # logT_orig, - # response_vals, - # kind="linear", - # bounds_error=False, - # fill_value=0.0, - # assume_sorted=True, - # ) - # interp_row = interp_func(self.logT) - # print(f" Interpolated length: {len(interp_row)}") - # rows.append(interp_row) - # except Exception as e: - # print(f" Interpolation failed: {e}") - # raise - - # self.interpolated_responses = rows - # self._response_matrix = np.vstack(rows) - - # if self._response_matrix.shape != (len(self.responses), self.logT.size): - # raise RuntimeError("Interpolated response matrix has unexpected shape.") - def _interpolate_responses_to_grid(self): """ Interpolate all filter responses onto the common logT grid and build @@ -524,7 +443,7 @@ def _interpolate_responses_to_grid(self): ----- - Each filter's response is interpolated to `self.logT` (regular log10 grid). - Extrapolation beyond the native response grid is set to 0.0. - - Units: DN s⁻¹ pix⁻¹ cm⁵ (per emission measure). + - Units: DN s^-1 pix^-1 cm^5 (per emission measure). - Shape of `_response_matrix`: (n_filters, n_temperatures) Rows = filters, Columns = temperature bins. From 69765c9bb9665016d8dfdfc5ae984fd1d76bd866 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 17 Sep 2025 15:52:01 -0400 Subject: [PATCH 071/121] Clean code and apply black to triple checked code --- xrtpy/xrt_dem_iterative/dem_solver.py | 101 +++++++++++++++++++++----- 1 file changed, 81 insertions(+), 20 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 8f6eb9987..f937b8003 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -13,6 +13,7 @@ from xrtpy.util.filters import validate_and_format_filters + class XRTDEMIterative: """ Estimate the differential emission measure (DEM) from Hinode/XRT data @@ -29,7 +30,7 @@ class XRTDEMIterative: temperature_responses : list (required) List of `TemperatureResponseFundamental` objects matching the filters. Units = DN s^-1 pix^-1 EM^-1. - Can be generated using `xrtpy.response.tools.generate_temperature_responses` + Can be generated using `xrtpy.response.tools.generate_temperature_responses` for one or more filters. See: https://xrtpy.readthedocs.io/en/latest/getting_started.html intensity_errors : array-like, optional Intensity uncertainties. If None, will use a model-based estimate. @@ -102,7 +103,7 @@ def __init__( self._intensity_errors = np.asarray(intensity_errors, dtype=float) else: self._intensity_errors = None - + # Store temperature grid parameters self._dT = float(dT) self._min_T = float(min_T) @@ -185,7 +186,6 @@ def __init__( raise ValueError("solv_factor must be a positive number.") except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") - #### TEST GIT CI TEST ##### @@ -231,7 +231,7 @@ def validate_inputs(self) -> None: # 6) grid range inside every response for r in self.responses: - #logT_grid = np√.log10(r.temperature.value) + # logT_grid = np√.log10(r.temperature.value) logT_grid = np.log10(r.temperature.to_value(u.K)) if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): raise ValueError( @@ -269,7 +269,11 @@ def __repr__(self): ####################################################################################################################################### @property - def observed_intensities(self,) -> ( u.Quantity): # Add method to account for known values not worth observed_intensities + def observed_intensities( + self, + ) -> ( + u.Quantity + ): # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. Returns @@ -395,14 +399,14 @@ def max_iterations(self): (e.g., when using `lmfit.minimize`). Default is 2000. """ return self._max_iterations - + def create_logT_grid(self): """ Construct the regular log10 temperature grid for DEM calculations. - + This builds a regularly spaced grid in log10(temperature), then converts it to linear temperature for use in the DEM integral. - + Notes ----- - IDL's `xrt_dem_iterative2.pro` describes this as the "regular logT grid". @@ -425,11 +429,14 @@ def create_logT_grid(self): self.logT = np.linspace(self._min_T, self._max_T, n_bins) # linear temperature grid in Kelvin - self.T = (10.0 ** self.logT) * u.K - - self.dlogT = float(self._dT) # scalar spacing (dimensionless and natural-log equivalent) - self.dlnT = np.log(10.0) * self.dlogT # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” + self.T = (10.0**self.logT) * u.K + self.dlogT = float( + self._dT + ) # scalar spacing (dimensionless and natural-log equivalent) + self.dlnT = ( + np.log(10.0) * self.dlogT + ) # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” def _interpolate_responses_to_grid(self): """ @@ -455,12 +462,16 @@ def _interpolate_responses_to_grid(self): Final stacked matrix (n_filters x n_temperatures). """ if not hasattr(self, "logT"): - raise AttributeError("Temperature grid missing. Call create_logT_grid() first.") + raise AttributeError( + "Temperature grid missing. Call create_logT_grid() first." + ) rows = [] for T_orig, R_orig in zip(self.response_temperatures, self.response_values): logT_orig = np.log10(T_orig.to_value(u.K)) - response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) + # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) + # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) + response_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) interp_func = interp1d( logT_orig, @@ -475,11 +486,14 @@ def _interpolate_responses_to_grid(self): self.interpolated_responses = rows self._response_matrix = np.vstack(rows).astype(float) + # Store the physical unit for clarity + # self._response_unit = u.DN / u.s / u.pix / (u.cm**5) + self._response_unit = (u.DN / u.s / u.pix) * u.cm**5 + # Quick sanity check if self._response_matrix.shape != (len(self.responses), self.logT.size): raise RuntimeError("Interpolated response matrix has unexpected shape.") - @property def response_matrix(self): """ @@ -500,8 +514,7 @@ def response_matrix(self): ) return self._response_matrix - - + ####################################################################################################################################### def _estimate_initial_dem( self, smooth=False, logscale=False, plot=True ): # mirrors xrt_dem_iter_estim.pro @@ -964,6 +977,54 @@ def solve(self): (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * u.cm**-5 / u.K ) + def solved(self): + """ + Run the full DEM solver. + + This method orchestrates the entire pipeline: + 1. Build the temperature grid. + 2. Interpolate responses onto the grid (builds response matrix). + 3. Estimate initial DEM guess. + 4. Run the least-squares solver (LMFIT). + 5. Optionally perform Monte Carlo runs for uncertainties. + 6. Store results in object attributes. + + Returns + ------- + dict + Dictionary containing primary solver outputs: + - "temperature" : log10 temperature grid + - "dem" : best-fit DEM [cm^-5 K^-1] + - "dem_err" : DEM uncertainty (if MC runs > 0) + - "ifit" : fitted intensities [DN/s/pix] + - "chi2" : chi-squared metric + """ + # 1. Build logT grid + self.create_logT_grid() + + # 2. Prepare response matrix + self._interpolate_responses_to_grid() + + # 3. Estimate initial DEM (placeholder – next step to implement) + self._estimate_initial_dem() + + # 4. Run least-squares fit (placeholder) + self._fit_dem() + + # 5. Monte Carlo ensemble (optional – placeholder) + if self.monte_carlo_runs > 0: + self._run_monte_carlo() + + # 6. Bundle results + results = { + "temperature": self.logT, + "dem": self.dem, + "dem_err": getattr(self, "dem_uncertainty", None), + "ifit": self.fitted_intensities, + "chi2": self.chi2, + } + return results + # if not result.success: # print(" DEM fit did not fully converge:") # print(" >", result.message) @@ -983,7 +1044,6 @@ def solve(self): return result - def summary(self): print("\nXRTpy DEM Iterative Setup Summary\n") print("-" * 50) @@ -991,7 +1051,9 @@ def summary(self): print(f" Obs Intensities: {self.observed_intensities}") print(f" Number of obs: {len(self._observed_intensities)}") print(f" Intensity Errors: {self.intensity_errors}") - print(f" Error model used: {'User-provided' if self._intensity_errors is not None else f'Auto (obs * {self.relative_error}, min={self.min_error} DN/s)'}") + print( + f" Error model used: {'User-provided' if self._intensity_errors is not None else f'Auto (obs * {self.relative_error}, min={self.min_error} DN/s)'}" + ) print(f" Temp Grid: logT {self.min_T}–{self.max_T}, step {self.dT}") print(f" Temp bins: {len(self.logT)}") print(f" Solver factor: {self.solv_factor:.1e}") @@ -999,7 +1061,6 @@ def summary(self): print(f" Max Iterations: {self.max_iterations}") print("-" * 50) - # def summary(self): # print("XRTpy DEM Iterative Setup Summary") # print("-" * 40) From 5ef701ae9600ac712ab00d69552226c2b6c1b260 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 19 Sep 2025 16:40:37 -0400 Subject: [PATCH 072/121] Updated solver still needs alot of work - just saving --- xrtpy/xrt_dem_iterative/dem_solver.py | 1423 ++++++++++++++++--------- 1 file changed, 947 insertions(+), 476 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index f937b8003..47b3956ac 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -160,7 +160,7 @@ def __init__( "Please ensure the temperature range fits within all responses.\n" "Hint: Default response range is logT = 5.5 to 8.0. You can view each response's logT range via: [r.temperature for r in responses]" ) - + # Check consistency between inputs if not ( len(self._observed_intensities) @@ -186,6 +186,8 @@ def __init__( raise ValueError("solv_factor must be a positive number.") except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") + + self._using_estimated_errors = False #track whether default error model has been used #### TEST GIT CI TEST ##### @@ -253,7 +255,7 @@ def validate_inputs(self) -> None: # success ⇒ no return value return None - ########################### + def __repr__(self): return ( f" np.ndarray: + """ + Estimate an initial DEM curve from observed intensities and responses. + + This follows the algorithm in IDL's `xrt_dem_iterative2.pro`, which uses + response-peak inversion to generate a crude log10 DEM estimate per channel, + then interpolates these estimates onto the solver's regular temperature grid. Parameters ---------- - smooth : bool - If True, applies mild Gaussian smoothing (future option). - logscale : bool - If True, computes log10(DEM) and exponentiates to suppress spikes. - plot : bool - If True, shows a diagnostic plot of the initial DEM. - """ - print(self.response_temperatures, self.response_values, self.filter_names) + cutoff : float, optional + Fraction of the peak response to use for defining the "good" window + around each filter's peak. Default is 1/e ≈ 0.3679. - # Define inputs - xrt_dem_iter_estim.pro - if not hasattr(self, "response_matrix"): - raise RuntimeError("Run _build_response_matrix() before estimating DEM.") - - I_obs = np.asarray(self._observed_intensities, dtype=float) - R = self.response_matrix - n_filters, n_temps = R.shape + Returns + ------- + est_log_dem_on_grid : ndarray + Array of shape (n_temperatures,) giving the initial DEM estimate + on `self.logT`. Values are log10(DEM) in [cm^-5 K^-1]. + This can be used to seed the solver. - print( - f"Estimating DEM from {n_filters} intensities across {n_temps} temperature bins..." - ) + Notes + ----- + - Units: + * Observed intensities: [DN s^-1 pix^-1] + * Response: [DN s^-1 pix^-1 cm^5] + * DEM(T): [cm^-5 K^-1] + - For each filter: + 1. Locate the peak of its response. + 2. Define a window where response > cutoff * peak. + 3. Compute the denominator integral: sum( T * R * dlnT ). + 4. Estimate DEM_peak = I_obs / denom. + 5. Store log10(DEM_peak) at the peak logT. + - Duplicate peak logTs are merged by averaging. + - If fewer than 2 valid points are found, falls back to a flat guess + (log10 DEM = 22 everywhere). + """ + if not hasattr(self, "logT"): + raise AttributeError("Temperature grid missing. Call create_logT_grid() first.") + if not hasattr(self, "_response_matrix"): + raise AttributeError("Response matrix missing. Call _interpolate_responses_to_grid() first.") - with np.errstate(divide="ignore", invalid="ignore"): - # First estimate DEM per-logT (cm^-5) - dem_logT = np.zeros(n_temps) # 27 - for i in range(n_filters): - row = R[i, :] - ratio = np.where( - row > 1e-30, I_obs[i] / row, 0.0 - ) # cm^-5 first estimate for the DEM - dem_logT += ratio - dem_logT /= n_filters - # IDL - for i=0,n_channels-1 do dem += obs_val[i] / response[i,*] - # IDL - dem = dem / n_channels - - # DO NOT divide by self._dT here. - # Optional smoothing in per-logT space: - if smooth: - from scipy.ndimage import gaussian_filter1d - - dem_logT = gaussian_filter1d(dem_logT, sigma=1.0) - - # Store canonical DEM (per-logT) for fitting/forward model - self.initial_dem_logT = dem_logT * ( - u.cm**-5 - ) ################################## - # self.initial_dem_logT = (np.log(10.0) * self.T) * self.dem_initial * u.cm**-5 # (N,), per-log10T #28 - - # For plotting PER-K if you want that axis: - dem_perK = dem_logT / (np.log(10.0) * self.T.to_value(u.K)) # cm^-5 K^-1 - self.initial_dem = dem_perK * (u.cm**-5 / u.K) - - print(" Max DEM_per_logT:", np.max(dem_logT)) - print(" Min DEM_per_logT:", np.min(dem_logT)) - print(" dlog10T:", self.dlogT) - - print("Initial DEM estimate complete") - print(f"Peak DEM_per_logT: {self.initial_dem_logT.max():.2e}") - print(f" Mean DEM_per_logT: {self.initial_dem_logT.mean():.2e}") - - # Diagnostics - - print(f"I_obs: {I_obs}") # Observed intensities - print(f"R (response matrix): {R.shape}") - print(f"Sum of response rows: {[np.sum(R[i]) for i in range(R.shape[0])]}") - print(f"dT: {self._dT}") - print("[DEBUG] DEM before dT division:") - - print("[DEBUG] Response row sums:") - for i, row in enumerate(R): - print( - f" {self.filter_names[i]}: sum={np.sum(row):.2e}, max={np.max(row):.2e}" - ) + # Storage for peak locations and DEM estimates + t_peaks = [] + log_dem_estimates = [] - print(f"[DEBUG] dT: {self._dT:.3f}") + # Loop over each filter + for i, (T_orig, R_orig, I_obs) in enumerate( + zip(self.response_temperatures, self.response_values, self._observed_intensities) + ): + logT_orig = np.log10(T_orig.to_value(u.K)) + R_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) + + if I_obs <= 0 or np.all(R_vals <= 0): + continue # skip unusable channel + + # 1. Peak location + max_idx = np.argmax(R_vals) + peak_val = R_vals[max_idx] + t_peak = np.round(logT_orig[max_idx] / self._dT) * self._dT + + # 2. Good window (where R > cutoff * peak) + good = np.where(R_vals > peak_val * cutoff)[0] + if len(good) < 2: + continue + + # 3. Compute denominator integral: sum(T * R * dlnT) + T_good = 10.0**logT_orig[good] + R_good = R_vals[good] + dlogT_native = np.diff(logT_orig).mean() + dlnT_native = np.log(10.0) * dlogT_native + denom = np.sum(T_good * R_good * dlnT_native) + + if denom <= 0: + continue + + # 4. DEM estimate at peak + dem_peak = I_obs / denom # [cm^-5 K^-1] + if dem_peak <= 0 or not np.isfinite(dem_peak): + continue + + log_dem_est = np.log10(dem_peak) + t_peaks.append(t_peak) + log_dem_estimates.append(log_dem_est) + + # 5. Handle duplicates: average log10 DEM at same t_peak + if len(t_peaks) == 0: + # Fallback: flat guess (IDL style) + est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 + self._initial_log_dem = est_log_dem_on_grid + return est_log_dem_on_grid + + uniq_t = {} + for t, dem_val in zip(t_peaks, log_dem_estimates): + if t in uniq_t: + uniq_t[t].append(dem_val) + else: + uniq_t[t] = [dem_val] + t_peaks_uniq = np.array(sorted(uniq_t.keys())) + log_dem_uniq = np.array([np.mean(uniq_t[t]) for t in t_peaks_uniq]) + + if len(t_peaks_uniq) < 2: + # Not enough points > flat guess + est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 + self._initial_log_dem = est_log_dem_on_grid + return est_log_dem_on_grid + + # 6. Interpolate sparse estimates onto the solver's grid + interp_func = interp1d( + t_peaks_uniq, + log_dem_uniq, + kind="linear", + bounds_error=False, + fill_value="extrapolate", + ) + est_log_dem_on_grid = interp_func(self.logT) - # Plotting - if plot: - import matplotlib.pyplot as plt + # Store for later use + self._initial_log_dem = est_log_dem_on_grid - plt.figure(figsize=(8, 4)) - ylabel = "DEM [cm⁻⁵ K⁻¹]" + return est_log_dem_on_grid + + + def _build_lmfit_parameters(self, n_knots: int = 6): + """ + Build lmfit.Parameters for the DEM spline knots. - # Custom label with filters and date - filters_str = ", ".join(self.observed_channel) - label_str = f"Initial DEM\n{filters_str}\n" # {self.date_obs}" + Parameters + ---------- + n_knots : int, optional + Number of spline knots across the logT grid. Default = 6. - if logscale: - plt.plot( - self.logT, - np.log10(self.initial_dem.value), - drawstyle="steps-mid", - label=label_str, - color="purple", - ) - ylabel = r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]" - else: - plt.plot( - self.logT, - self.initial_dem.value, - drawstyle="steps-mid", - label=label_str, - color="purple", - ) - plt.yscale("log") - - # if logscale: - # log_dem_vals = np.log10(self.initial_dem.value + 1e-30) - # plt.plot( - # self.logT, - # log_dem_vals, - # drawstyle="steps-mid", - # label=label_str, - # color="purple", - # ) - # ylabel = "log₁₀ DEM [cm⁻⁵ K⁻¹]" - # else: - # plt.plot( - # self.logT, - # self.initial_dem.value, - # drawstyle="steps-mid", - # label=label_str, - # color="purple", - # ) - # plt.yscale("log") - - plt.xlabel("log₁₀ T [K]") - plt.ylabel(ylabel) - plt.title("Initial DEM Estimate") - plt.grid(True) - plt.legend(loc="upper right", fontsize=8) - plt.tight_layout() - plt.show() - - print("Initial DEM estimate complete") - - # STEP 1 - Each temperature bin gets its own parameter, initialized with your initial DEM estimate - def _build_lmfit_parameters(self): - """ - Initializes lmfit Parameters from the initial DEM guess. - - Sets: + Returns ------- - self.lmfit_params : lmfit.Parameters - Each temperature bin gets a parameter (free by default). + params : lmfit.Parameters + Parameters object containing log10(DEM/solv_factor) values at knot points. + Each parameter is named "knot_i" where i = 0..n_knots-1. + + Notes + ----- + - IDL's `xrt_dem_iterative2.pro` seeds its fit by taking DEM estimates + at peak response temperatures and spreading them across the grid. + - Here, we select evenly spaced knots across the solver's logT range. + - The stored value at each knot is: + log10(DEM / solv_factor) + where `solv_factor` is typically 1e17. + - Bounds can be applied to prevent extreme DEM excursions if desired. """ - # if not hasattr(self, "initial_dem"): - # raise RuntimeError( - # "Call _estimate_initial_dem() before building parameters." - # ) + if not hasattr(self, "_initial_log_dem"): + raise AttributeError( + "Initial DEM not available. Run _estimate_initial_dem() first." + ) - # params = Parameters() + from lmfit import Parameters - # for i, val in enumerate(self.initial_dem): - # # You could add bounds here if needed (e.g., min=0) - # params.add(f"dem_{i}", value=val, min=0) + # Choose evenly spaced knot positions across logT range + knot_positions = np.linspace(self._min_T, self._max_T, n_knots) + self._knot_positions = knot_positions # store for later reconstruction - # 27 - # for i, val in enumerate(self.initial_dem): - # # Convert to float if it's a Quantity - # if hasattr(val, "unit"): - # val = val.to_value() # default: returns value in current unit - # params.add(f"dem_{i}", value=val, min=0) + # Interpolate initial DEM estimate at these knot positions + interp_func = interp1d( + self.logT, + self._initial_log_dem, + kind="linear", + bounds_error=False, + fill_value="extrapolate", + ) + init_log_dem_at_knots = interp_func(knot_positions) - # self.lmfit_params = params - # print(f"Built {len(params)} lmfit parameters for DEM fit") + # Convert to log10(DEM/solv_factor) + init_scaled = init_log_dem_at_knots - np.log10(self._solv_factor) - if not hasattr(self, "initial_dem_logT"): - raise RuntimeError( - "Call _estimate_initial_dem() before building parameters." - ) + # Build lmfit Parameters params = Parameters() - for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): - params.add(f"dem_{i}", value=float(val), min=0.0) - self.lmfit_params = params + for i, val in enumerate(init_scaled): + params.add( + name=f"knot_{i}", + value=val, + min=-10, # optional bounds: avoid absurdly low + max=50, # optional bounds: avoid absurdly high + vary=True, + ) - print(f"Built {len(params)} lmfit parameters for DEM fit") + self._init_knot_params = params + return params - # STEP 2: Build the residual function - # This function computes how far off your DEM model’s predicted intensities are from your observed ones, normalized by the uncertainty. - def _residuals(self, params): + + def _reconstruct_dem_from_knots(self, params) -> np.ndarray: """ - Computes the residuals between modeled and observed intensities. + Reconstruct the DEM curve on the solver's logT grid from spline knot parameters. Parameters ---------- params : lmfit.Parameters - DEM values at each temperature bin. + Knot parameters where each value is log10(DEM / solv_factor). Returns ------- - np.ndarray - Residuals = (I_model - I_obs) / sigma - """ - # # 1. Get DEM vector from lmfit Parameters - # dem_vector = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) - - # DEM per-logT (cm^-5) - dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) - - # 2. Compute modeled intensities: I_model = R · DEM - # I_model = self.response_matrix @ dem_vector - I_model = self.response_matrix @ (dem_logT * self.dlogT) # 27 - - # 3. Determine observational errors (user-provided or fallback) - errors = self.intensity_errors.to_value(u.DN / u.s) - # if self._intensity_errors is not None: - # errors = np.array(self._intensity_errors) - # else: - # errors = np.maximum( - # self.min_error, self.relative_error * self._observed_intensities - # ) - - # 4. Return normalized residuals - residuals = (I_model - self._observed_intensities) / errors - print( - f"[-Residuals stats > mean: {np.mean(residuals):.2e}, std: {np.std(residuals):.2e}" + dem_grid : ndarray + DEM values on `self.logT` grid in linear space [cm^-5 K^-1]. + + Notes + ----- + - Knot positions are stored in `self._knot_positions` when + `_build_lmfit_parameters()` is called. + - The stored parameter values are log10(DEM/solv_factor). + - Conversion back to DEM: + DEM = solv_factor * 10^(interp(log10 DEM/solv_factor)) + - Interpolation is linear in log space, as in IDL's `xrt_dem_iterative2.pro`. + """ + if not hasattr(self, "_knot_positions"): + raise AttributeError( + "Knot positions not found. Run _build_lmfit_parameters() first." + ) + + # Extract knot values from parameters (log10(DEM/solv_factor)) + knot_vals = np.array([params[f"knot_{i}"].value for i in range(len(self._knot_positions))]) + + # Interpolate across solver grid in log space + interp_func = interp1d( + self._knot_positions, + knot_vals, + kind="linear", + bounds_error=False, + fill_value="extrapolate", ) - return residuals + log_dem_scaled = interp_func(self.logT) + + # Convert back to DEM [cm^-5 K^-1] + dem_grid = self._solv_factor * (10.0 ** log_dem_scaled) - def fit_dem(self): + return dem_grid + + + # self._iteration_chi2 = [] + + # def _residuals(self, params) -> np.ndarray: + # """ + # Residuals function for DEM fitting. + + # Parameters + # ---------- + # params : lmfit.Parameters + # Knot parameters, each storing log10(DEM / solv_factor). + + # Returns + # ------- + # residuals : ndarray + # Vector of normalized residuals for each observed channel: + # (I_obs - I_calc) / sigma + # Shape = (n_filters,) + + # Notes + # ----- + # - This is the core of the DEM solver. It reconstructs the DEM curve + # from spline knot parameters, computes modeled intensities by + # integrating DEM * Response over temperature, and returns the + # residuals relative to observations. + # - Integration is done using midpoint trapezoid approximation: + # I_calc[i] = sum_j DEM_mid[j] * R_mid[i,j] * T_mid[j] * dlnT + # """ + # # 1. Reconstruct DEM on the grid + # dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] + + # # 2. Prepare midpoint arrays + # dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) + # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + + # # 3. Compute modeled intensities + # # Shape: (n_filters,) + # I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + + # # 4. Residuals: normalize by observational errors + # # sigma = self.intensity_errors.to_value(u.DN / u.s) # ensure numeric + # # residuals = (self._observed_intensities - I_calc) / sigma + # # Use MC-perturbed intensities if present; otherwise the originals + + # y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) + # sigma = self.intensity_errors.to_value(u.DN / u.s) # numeric + # residuals = (y_obs - I_calc) / sigma + + # residuals = (y_obs - I_calc) / sigma + # chi2_val = np.sum(residuals**2) + + # # Log χ² per iteration + # if not hasattr(self, "_iteration_chi2"): + # self._iteration_chi2 = [] + # self._iteration_chi2.append(chi2_val) + + # return residuals + + def _residuals(self, params) -> np.ndarray: """ - Runs the DEM fitting using lmfit's least-squares minimization. + Residuals function for DEM fitting. - Sets: + Returns ------- - self.fitted_dem : np.ndarray - Best-fit DEM solution (length = n_temps) - self.result : lmfit.MinimizerResult - Full fit result object from lmfit - """ - # if not hasattr(self, "lmfit_params"): - # self._build_lmfit_parameters() - - # if not hasattr(self, "lmfit_params"): - # raise RuntimeError("Call _build_lmfit_parameters() before fitting.") - - # 27 - if not hasattr(self, "initial_dem_logT"): - raise RuntimeError("Call _estimate_initial_dem() first.") - if not hasattr(self, "lmfit_params"): - self._build_lmfit_parameters() - - # Mimght not need- already using in _build_lmfit_parameters(). - # params = Parameters() - # for i, val in enumerate(self.initial_dem_logT.to_value(u.cm**-5)): - # params.add(f"dem_{i}", value=float(val), min=0.0) - # self.lmfit_params = params - - print("Starting DEM optimization..") + residuals : ndarray + (I_obs - I_calc) / sigma, one per filter. + """ + # 1. Reconstruct DEM on the grid + dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] + + # 2. Midpoint integration setup + dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) + R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + + # 3. Modeled intensities + I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + + # 4. Residuals: normalize by observational errors + y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) + sigma = self.intensity_errors.to_value(u.DN / u.s) + residuals = (y_obs - I_calc) / sigma + + # 5. Track χ² per iteration + chi2_val = np.sum(residuals**2) + if not hasattr(self, "_iteration_chi2"): + self._iteration_chi2 = [] + self._iteration_chi2.append(chi2_val) + + return residuals + + + # def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): + # """ + # Fit the DEM using lmfit to minimize residuals. + + # Parameters + # ---------- + # n_knots : int, optional + # Number of spline knots across the logT grid. Default = 6. + # method : str, optional + # Minimization method passed to `lmfit.minimize`. + # Common choices: "least_squares", "leastsq", "nelder". + # Default = "least_squares". + # **kwargs : dict + # Additional keyword arguments forwarded to `lmfit.minimize`. + + # Returns + # ------- + # result : lmfit.MinimizerResult + # The lmfit result object containing fit information. + + # Side Effects + # ------------ + # On successful fit, stores: + # - self.dem : ndarray + # Best-fit DEM(T) on self.logT [cm^-5 K^-1]. + # - self.fitted_intensities : ndarray + # Modeled intensities [DN/s/pix] for each filter. + # - self.chi2 : float + # Chi-squared (sum of squared residuals). + # - self.redchi2 : float + # Reduced chi-squared, normalized by (Nobs - Nparams). + + # Notes + # ----- + # This method automatically builds the logT grid, interpolates + # responses, and estimates an initial DEM if not already done. + # """ + # from lmfit import minimize + + + # # --- Auto-prepare prerequisites --- + # if not hasattr(self, "logT") or not hasattr(self, "T"): + # self.create_logT_grid() + + # if not hasattr(self, "_response_matrix"): + # self._interpolate_responses_to_grid() + + # if not hasattr(self, "_initial_log_dem"): + # self._estimate_initial_dem() + + # self._last_n_knots = n_knots #Used for print in the summary function + + # # 1. Build initial knot parameters + # params = self._build_lmfit_parameters(n_knots=n_knots) + + # # 2. Run minimization + # result = minimize(self._residuals, params, method=method, **kwargs) + + # # 3. On success, reconstruct DEM and fitted intensities + # best_dem = self._reconstruct_dem_from_knots(result.params) + # self.dem = best_dem # [cm^-5 K^-1] + + # # Compute fitted intensities using midpoint integration + # dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) + # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + # I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + + # self.fitted_intensities = I_fit # [DN/s/pix] + # sigma = self.intensity_errors.to_value(u.DN / u.s) + # residuals = (self._observed_intensities - I_fit) / sigma + + # # Chi-squared metrics + # self.chi2 = np.sum(residuals**2) + # dof = len(self._observed_intensities) - len(result.params) + # self.redchi2 = self.chi2 / max(dof, 1) + + # return result + + def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): + """ + Fit the DEM using lmfit to minimize residuals. + Tracks chi² per iteration (like IDL's XRT_ITER_DEMSTAT). + """ + from lmfit import minimize + + # --- Auto-prepare prerequisites --- + if not hasattr(self, "logT") or not hasattr(self, "T"): + self.create_logT_grid() + if not hasattr(self, "_response_matrix"): + self._interpolate_responses_to_grid() + if not hasattr(self, "_initial_log_dem"): + self._estimate_initial_dem() + + self._last_n_knots = n_knots # for summary() + + # Storage for iteration statistics + self._iter_stats = {"chisq": [], "iteration": []} + + def _callback(params, iter, resid, *args, **kwargs): + # Compute chi² at this iteration + chi2 = np.sum(resid**2) + self._iter_stats["chisq"].append(chi2) + self._iter_stats["iteration"].append(iter) + + # 1. Build initial knot parameters + params = self._build_lmfit_parameters(n_knots=n_knots) + + # 2. Run minimization result = minimize( self._residuals, - self.lmfit_params, - method="least_squares", + params, + method=method, + iter_cb=_callback, # <-- track stats max_nfev=self.max_iterations, + **kwargs, ) - self.result = result + # 3. On success, reconstruct DEM + fitted intensities + best_dem = self._reconstruct_dem_from_knots(result.params) + self.dem = best_dem - dem_best_logT = np.array( - [self.result.params[f"dem_{i}"].value for i in range(len(self.logT))] - ) + dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) + R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - self.fitted_dem_logT = dem_best_logT * (u.cm**-5) # 28 - self.fitted_dem = (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * ( - u.cm**-5 / u.K - ) + self.fitted_intensities = I_fit + sigma = self.intensity_errors.to_value(u.DN / u.s) + residuals = (self._observed_intensities - I_fit) / sigma - # self.dem_fit = dem_per_K_on_grid # (N,), per-K (cm^-5 K^-1) - # self.fitted_dem_logT = (np.log(10.0) * self.T) * self.dem_fit * u.cm**-5 # (N,), per-log10T + # Chi² metrics + self.chi2 = np.sum(residuals**2) + dof = len(self._observed_intensities) - len(result.params) + self.redchi2 = self.chi2 / max(dof, 1) + self.dof = dof # save for summary - if not result.success: - print(" DEM fit did not fully converge:") - print(" >", result.message) + return result - # self.fitted_dem = np.array([result.params[f"dem_{i}"].value for i in range(len(self.logT))]) - print( - f" DEM fit complete — reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" - ) + def fit_with_multiple_methods(self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs): + """ + Try multiple lmfit minimization methods and pick the best χ². - print(" DEM fit complete") - print( - f" → Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}" - ) - print(f" → Total iterations: {result.nfev}") + Parameters + ---------- + methods : tuple of str, optional + Minimization methods to test. Default = ("leastsq", "least_squares", "nelder"). + n_knots : int, optional + Number of spline knots for DEM fit. Default = 6. + **kwargs : dict + Extra arguments passed to `lmfit.minimize`. - return result + Returns + ------- + best_result : lmfit.MinimizerResult + Result from the method with lowest chi². + """ + from lmfit import minimize + + if not hasattr(self, "_initial_log_dem"): + self._estimate_initial_dem() + + results = {} + best_chi2 = np.inf + best_result = None + best_method = None + + for method in methods: + print(f"\n>>> Trying method: {method}") + params = self._build_lmfit_parameters(n_knots=n_knots) + result = minimize(self._residuals, params, method=method, **kwargs) + + # Compute DEM + chi² for this fit + dem = self._reconstruct_dem_from_knots(result.params) + dem_mid = 0.5 * (dem[:-1] + dem[1:]) + R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + + sigma = self.intensity_errors.to_value(u.DN / u.s) + residuals = (self._observed_intensities - I_fit) / sigma + chi2 = np.sum(residuals**2) + + print(f" χ² = {chi2:.3e}") + + results[method] = (result, chi2) + + if chi2 < best_chi2: + best_chi2 = chi2 + best_result = result + best_method = method + + print(f"\n>>> Best method: {best_method} with χ² = {best_chi2:.3e}") + + # Store outputs from the best fit + best_dem = self._reconstruct_dem_from_knots(best_result.params) + self.dem = best_dem + dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) + R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + self.fitted_intensities = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + sigma = self.intensity_errors.to_value(u.DN / u.s) + residuals = (self._observed_intensities - self.fitted_intensities) / sigma + self.chi2 = np.sum(residuals**2) + dof = len(self._observed_intensities) - len(best_result.params) + self.redchi2 = self.chi2 / max(dof, 1) + + return best_result + + + def run_monte_carlo(self, n_runs=None, n_knots=6, method="least_squares", random_seed=None): + if random_seed is not None: + np.random.seed(random_seed) + """ + Run Monte Carlo DEM fits to estimate uncertainties and store full ensemble. - def print_residual_diagnostics(self, params): + Returns + ------- + dem_ensemble : ndarray + Shape (n_runs, n_temperatures) array of DEM solutions. + """ + from lmfit import minimize + + if n_runs is None: + n_runs = self._monte_carlo_runs + if n_runs <= 0: + raise ValueError("Monte Carlo runs disabled (n_runs=0).") + + sigma = self.intensity_errors.to_value(u.DN / u.s) + dem_ensemble = [] + + self._last_n_knots = n_knots #Used for print in the summary function + + for i in range(n_runs): + noisy_obs = self._observed_intensities + np.random.normal(0, sigma) + #print(f"Given intensities: {noisy_obs}") + self._observed_intensities_mc = noisy_obs # temp override + + # params = self._build_lmfit_parameters(n_knots=n_knots) #Older Version Sept 18 + # result = minimize(lambda p: self._residuals(p), params, method=method) #Older Version Sept 18 + params = self._build_lmfit_parameters(n_knots=n_knots) + # Activate noisy intensities for this run + self._active_observed_intensities = noisy_obs + try: + result = minimize(self._residuals, params, method=method) + finally: + # Always restore (so the main dataset isn’t polluted) + if hasattr(self, "_active_observed_intensities"): + delattr(self, "_active_observed_intensities") + + + dem_i = self._reconstruct_dem_from_knots(result.params) + dem_ensemble.append(dem_i) + + dem_ensemble = np.array(dem_ensemble) + + # Store ensemble + uncertainty + self._dem_ensemble = dem_ensemble + self.dem_uncertainty = np.std(dem_ensemble, axis=0) + self.dem_median = np.median(dem_ensemble, axis=0) + + return dem_ensemble + + + ########################################################################################################################### + ####################################### Plotting section STARTS ########################################################### + ########################################################################################################################### + + ############ Plotting function 1 ######################################################################################### + def plot_dem_results(self, results): + """ + Quick plotting for users who only have the results dict from solve(). - dem_logT = np.array([params[f"dem_{i}"].value for i in range(len(self.logT))]) - I_model = self.response_matrix @ (dem_logT * self.dlogT) - errors = self.intensity_errors.to_value(u.DN / u.s) - residuals = (I_model - self._observed_intensities) / errors + Parameters + ---------- + results : dict + Dictionary returned by self.solve(). Keys: + - "temperature" : log10(T) grid + - "dem" : best-fit DEM [cm^-5 K^-1] + - "dem_err" : DEM uncertainty (if Monte Carlo enabled) - print("Observed Intensities:", self._observed_intensities) - print("Modeled Intensities:", I_model) - print("Errors:", errors) - print("Residuals:", residuals) - print( - f"Residuals stats → mean: {residuals.mean():.2e}, std: {residuals.std():.2e}" - ) + Notes + ----- + - Best-fit DEM is shown as a blue line. + - If dem_err exists, a blue shaded band (±1σ) is shown. + """ + import matplotlib.pyplot as plt + import numpy as np - def plot_dem_fit( - self, - logscale: bool = True, - scale: str = "per_K", # "per_K" or "per_log10T" - show_initial: bool = True, - ax=None, - title: str | None = None, - show: bool = True, - ): + logT = results["temperature"] + dem = results["dem"] + dem_err = results.get("dem_err", None) + + fig, ax = plt.subplots(figsize=(8,6)) + + # Best-fit DEM + ax.step(logT, np.log10(dem + 1e-40), where="mid", + color="blue", linewidth=2, label="Best-fit DEM") + + # Uncertainty shading + if dem_err is not None: + upper = np.log10(dem + dem_err + 1e-40) + lower = np.log10(np.clip(dem - dem_err, 1e-40, None)) + ax.fill_between(logT, lower, upper, step="mid", + color="blue", alpha=0.2, label="±1σ") + + ax.set_xlabel("log10 T [K]") + ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") + ax.set_xlim(logT.min(), logT.max()) + ax.set_ylim(np.floor(np.log10(dem.min()+1e-40)), + np.ceil(np.log10(dem.max()+1e-40))) + ax.set_title("DEM Solution") + ax.legend() + ax.grid(alpha=0.3) + + plt.tight_layout() + plt.show() + + ############ Plotting function 2 ######################################################################################### + def plot_dem_uncertainty(self): + """ + Plot DEM with Monte Carlo uncertainty band. + + Requires + -------- + self.dem : ndarray + Best-fit DEM curve. + self.dem_uncertainty : ndarray + Uncertainty from Monte Carlo runs. + self.logT : ndarray + Temperature grid. + """ + import matplotlib.pyplot as plt + import numpy as np + + if not hasattr(self, "dem"): + raise AttributeError("No DEM found. Run fit_dem() or solve() first.") + if not hasattr(self, "dem_uncertainty"): + raise AttributeError("No DEM uncertainty found. Run run_monte_carlo() first.") + + logT = self.logT + dem = self.dem + dem_err = self.dem_uncertainty + + fig, ax = plt.subplots(figsize=(8,6)) + + # Best-fit DEM + ax.step(logT, np.log10(dem + 1e-40), where="mid", + color="blue", linewidth=2, label="Best-fit DEM") + + # ±1σ shaded region + upper = np.log10(dem + dem_err + 1e-40) + lower = np.log10(np.clip(dem - dem_err, 1e-40, None)) + ax.fill_between(logT, lower, upper, step="mid", + color="blue", alpha=0.2, label="±1σ") + + ax.set_xlabel("log10 T [K]") + ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") + ax.set_xlim(logT.min(), logT.max()) + ax.set_ylim(np.floor(np.log10(dem.min()+1e-40)), + np.ceil(np.log10((dem+dem_err).max()+1e-40))) + ax.set_title("DEM with Monte Carlo Uncertainty") + ax.legend() + ax.grid(alpha=0.3) + + plt.tight_layout() + plt.show() + + ############ Plotting function 3 ######################################################################################### + def plot_idl_style(self): """ - Plot the fitted DEM (and optional initial DEM) using a consistent scale. + Faithful mirror of IDL's xrt_dem_iterative2.pro plotting style. + - Black dotted lines → Monte Carlo DEMs (if available) + - Green line → Best-fit DEM + - No shading, no extras + + Requires + -------- + self.dem : ndarray + self.logT : ndarray """ import matplotlib.pyplot as plt import numpy as np - if ax is None: - fig, ax = plt.subplots() + if not hasattr(self, "dem"): + raise AttributeError("No DEM found. Run fit_dem() or solve() first.") - # Grid logT = self.logT - T = 10.0**logT - - # --- find fitted DEM (prefer canonical names; fall back to legacy) --- - linear_candidates = [ - "dem_fit", - "dem", - "fitted_dem", - "dem_solved", - "dem_solution", - ] - log_candidates = ["logdem_fit", "logdem", "fitted_logdem", "logdem_solved"] - - dem_fit_perK = None - for name in linear_candidates: - if hasattr(self, name) and getattr(self, name) is not None: - dem_fit_perK = np.asarray(getattr(self, name), dtype=float) - break - if dem_fit_perK is None: - for name in log_candidates: - if hasattr(self, name) and getattr(self, name) is not None: - dem_fit_perK = 10.0 ** np.asarray(getattr(self, name), dtype=float) - break - if dem_fit_perK is None: - raise RuntimeError( - "No fitted DEM found. Expected one of " - f"{linear_candidates + log_candidates} to exist on the object." - ) + dem = self.dem + + fig, ax = plt.subplots(figsize=(8,6)) + + # Monte Carlo ensemble (if available) + if hasattr(self, "_dem_ensemble"): + mc_dems = np.array(self._dem_ensemble) + for i in range(mc_dems.shape[0]): + ax.step(logT, np.log10(mc_dems[i] + 1e-40), + where="mid", linestyle=":", color="black", alpha=0.3, linewidth=0.6) + + # Best-fit DEM + ax.step(logT, np.log10(dem + 1e-40), where="mid", + color="green", linewidth=2, label="Best-fit DEM") + + ax.set_xlabel("log10 T [K]") + ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") + ax.set_xlim(logT.min(), logT.max()) + ax.set_ylim(np.floor(np.log10(dem.min()+1e-40)), + np.ceil(np.log10(dem.max()+1e-40))) + ax.set_title("DEM (IDL Style)") + ax.legend() + ax.grid(alpha=0.3) - # --- initial DEM (optional) --- - dem_init_perK = None - if show_initial: - init_linear_candidates = ["initial_dem", "dem_initial"] - init_log_candidates = ["initial_logdem", "logdem_initial"] - for name in init_linear_candidates: - if hasattr(self, name) and getattr(self, name) is not None: - dem_init_perK = np.asarray(getattr(self, name), dtype=float) - break - if dem_init_perK is None: - for name in init_log_candidates: - if hasattr(self, name) and getattr(self, name) is not None: - dem_init_perK = 10.0 ** np.asarray( - getattr(self, name), dtype=float - ) - break - - # --- choose scientific scale --- - if scale == "per_K": - y_fit_linear = np.clip(dem_fit_perK, np.finfo(float).tiny, None) - y_init_linear = ( - None - if dem_init_perK is None - else np.clip(dem_init_perK, np.finfo(float).tiny, None) - ) - y_label_lin = r"DEM per K [cm$^{-5}$ K$^{-1}$]" - y_label_log10 = r"$\log_{10}$ DEM per K [cm$^{-5}$ K$^{-1}$]" - elif scale == "per_log10T": - y_fit_linear = np.clip( - dem_fit_perK * T * np.log(10.0), np.finfo(float).tiny, None - ) - y_init_linear = ( - None - if dem_init_perK is None - else np.clip( - dem_init_perK * T * np.log(10.0), np.finfo(float).tiny, None - ) - ) - y_label_lin = r"DEM per $\log_{10}T$ [cm$^{-5}$]" - y_label_log10 = r"$\log_{10}$ DEM per $\log_{10}T$ [cm$^{-5}$]" - else: - raise ValueError("scale must be 'per_K' or 'per_log10T'") - - # --- plot without double-logging --- - if logscale: - ax.semilogy(logT, y_fit_linear, linestyle="-", label="Fitted DEM") - if y_init_linear is not None: - ax.semilogy( - logT, y_init_linear, linestyle="-", alpha=0.7, label="Initial DEM" - ) - ax.set_ylabel(y_label_lin) - else: - ax.plot(logT, np.log10(y_fit_linear), linestyle="-", label="Fitted DEM") - if y_init_linear is not None: - ax.plot( - logT, - np.log10(y_init_linear), - linestyle="-", - alpha=0.7, - label="Initial DEM", - ) - ax.set_ylabel(y_label_log10) + plt.tight_layout() + plt.show() - ax.set_xlabel(r"$\log_{10} T$ [K]") - ax.set_title( - title or ("Initial vs Fitted DEM" if show_initial else "Fitted DEM") - ) + + ############ Plotting function 4 ######################################################################################### + def plot_fit_residuals(self): + import matplotlib.pyplot as plt + """ + Plot observed vs fitted intensities and residuals. + """ + obs = self._observed_intensities + fit = self.fitted_intensities + sigma = self.intensity_errors.to_value(u.DN / u.s) + + filters = self.filter_names + indices = np.arange(len(obs)) + + # Scatter: observed vs fitted + plt.figure(figsize=(7,5)) + plt.errorbar(indices, obs, yerr=sigma, fmt="o", label="Observed", color="black") + plt.plot(indices, fit, "s", label="Fitted", color="red") + plt.xticks(indices, filters, rotation=45) + plt.ylabel("Intensity [DN/s/pix]") + plt.title("Observed vs Fitted Intensities") + plt.legend() + plt.tight_layout() + plt.show() + + # Residuals + residuals = (obs - fit) / sigma + plt.figure(figsize=(7,4)) + plt.axhline(0, color="gray", linestyle="--") + plt.plot(indices, residuals, "o", color="blue") + plt.xticks(indices, filters, rotation=45) + plt.ylabel("(Obs - Fit) / σ") + plt.title("Residuals per Filter") + plt.tight_layout() + plt.show() + + + ############ Plotting function 5 ######################################################################################### + + def plot_dem_with_median_bins(self): + ####******* IDL MIRROR METHODS *******#### + """ + Reproduce IDL-style DEM plot with: + - Best-fit DEM (green) + - Monte Carlo DEMs as dotted step lines (gray/black) + - Median DEM across ensemble (blue) + - Closest DEM to the median (orange) + + Requires: + self._dem_ensemble from run_monte_carlo() + self.dem from fit_dem() + """ + import matplotlib.pyplot as plt + if not hasattr(self, "_dem_ensemble"): + raise AttributeError("Monte Carlo ensemble not available. Run run_monte_carlo() first.") + if not hasattr(self, "dem"): + raise AttributeError("Best-fit DEM not available. Run fit_dem() first.") + + logT = self.logT + mc_dems = np.array(self._dem_ensemble) # shape (N_runs, N_T) + best_fit = self.dem # (N_T,) + + # --- Median DEM at each temperature bin + med = np.median(mc_dems, axis=0) + + # --- Closest DEM (min L2 distance to median) + diffs = np.linalg.norm(mc_dems - med, axis=1) + closest_idx = np.argmin(diffs) + closest_dem = mc_dems[closest_idx] + + # --- Plot + fig, ax = plt.subplots(figsize=(9, 6)) + + # MC DEMs: dotted black + for i in range(mc_dems.shape[0]): + ax.step(logT, np.log10(mc_dems[i] + 1e-40), + where="mid", linestyle=":", color="black", alpha=0.3, linewidth=0.6) + + # Best-fit DEM (green) + ax.step(logT, np.log10(best_fit + 1e-40), where="mid", + color="green", linewidth=2, label="Obs DEM") + + # Median DEM (blue) + ax.step(logT, np.log10(med + 1e-40), where="mid", + color="blue", linewidth=1.8, label="Median in bins") + + # Closest-to-median DEM (orange) + ax.step(logT, np.log10(closest_dem + 1e-40), where="mid", + color="orange", linewidth=1.8, label="Closest DEM to median") + + # Style + ax.set_xlim(self.min_T, self.max_T) + ax.set_ylim(0, 30) + ax.set_xlabel("Log T (K)") + #ax.set_ylim(np.floor(np.min(np.log10(mc_dems+1e-40))),np.ceil(np.max(np.log10(mc_dems+1e-40)))) + ax.set_ylabel("Log DEM [cm$^{-5}$ K$^{-1}$]") ax.legend() ax.grid(True, alpha=0.3) - return ax + ax.set_title("DEM with Monte Carlo Spread, Median, and Closest Fit (IDL Style)") - def solve(self): - print("Running DEM fit...") + plt.tight_layout() + plt.show() + + ############ Plotting function 5 ######################################################################################### - # Build spline parameters - self._build_lmfit_parameters() + def plot_iteration_stats(self): + """ + Plot χ² convergence across solver iterations. - # Perform the fit - result = minimize( - self._residuals, - self.lmfit_params, - method="least_squares", - max_nfev=self.max_iterations, - ) + Requires + -------- + self._iteration_chi2 : list + Logged χ² values from fit_dem(). + """ + import matplotlib.pyplot as plt + import numpy as np - self.result = result + if not hasattr(self, "_iteration_chi2") or len(self._iteration_chi2) == 0: + raise AttributeError("No iteration stats found. Run fit_dem() or solve() first.") - # Extract fitted DEM from lmfit parameters - dem_best_logT = np.array( - [result.params[f"dem_{i}"].value for i in range(len(self.logT))] - ) - self.fitted_dem_logT = dem_best_logT * u.cm**-5 - self.fitted_dem = ( - (dem_best_logT / (np.log(10.0) * self.T.to_value(u.K))) * u.cm**-5 / u.K - ) + chi2_vals = np.array(self._iteration_chi2) + + fig, ax = plt.subplots(figsize=(8, 5)) + ax.plot(range(len(chi2_vals)), chi2_vals, lw=1.5) + ax.set_xlabel("Iteration") + ax.set_ylabel("Chi²") + ax.set_title("Chi² Convergence") + ax.grid(alpha=0.3) + + # Log-scale option if dynamic range is huge + if chi2_vals.max() / max(chi2_vals.min(), 1e-10) > 1e4: + ax.set_yscale("log") + + plt.tight_layout() + plt.show() - def solved(self): + ########################################################################################################################### + ####################################### Plotting section ENDS ########################################################### + ########################################################################################################################### + + + + def solve(self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True): """ - Run the full DEM solver. + Run the full DEM solver, IDL-style. - This method orchestrates the entire pipeline: - 1. Build the temperature grid. - 2. Interpolate responses onto the grid (builds response matrix). - 3. Estimate initial DEM guess. - 4. Run the least-squares solver (LMFIT). - 5. Optionally perform Monte Carlo runs for uncertainties. - 6. Store results in object attributes. + This orchestrates: + 1. Build temperature grid. + 2. Interpolate responses (response matrix). + 3. Estimate initial DEM. + 4. Fit DEM with lmfit. + 5. Optionally run Monte Carlo ensemble. + + Parameters + ---------- + n_knots : int, optional + Number of spline knots across logT. Default = 6. + method : str, optional + Minimization method for `lmfit.minimize`. Default = "least_squares". + run_mc : bool, optional + Whether to run Monte Carlo simulations (using self.monte_carlo_runs). + Default = True. Returns ------- - dict - Dictionary containing primary solver outputs: - - "temperature" : log10 temperature grid + results : dict + Dictionary of solver outputs: + - "temperature" : log10(T) grid - "dem" : best-fit DEM [cm^-5 K^-1] - "dem_err" : DEM uncertainty (if MC runs > 0) - "ifit" : fitted intensities [DN/s/pix] - - "chi2" : chi-squared metric + - "chi2" : χ² + - "redchi2" : reduced χ² """ - # 1. Build logT grid + # 1. Ensure grid & responses self.create_logT_grid() - - # 2. Prepare response matrix self._interpolate_responses_to_grid() - # 3. Estimate initial DEM (placeholder – next step to implement) + # 2. Estimate initial DEM self._estimate_initial_dem() - # 4. Run least-squares fit (placeholder) - self._fit_dem() + # 3. Fit DEM + result = self.fit_dem(n_knots=n_knots, method=method) - # 5. Monte Carlo ensemble (optional – placeholder) - if self.monte_carlo_runs > 0: - self._run_monte_carlo() + # 4. Monte Carlo (optional) + if run_mc and self.monte_carlo_runs > 0: + self.run_monte_carlo(n_runs=self.monte_carlo_runs, + n_knots=n_knots, + method=method) - # 6. Bundle results - results = { + # 5. Bundle results + return { "temperature": self.logT, "dem": self.dem, "dem_err": getattr(self, "dem_uncertainty", None), "ifit": self.fitted_intensities, - "chi2": self.chi2, + "chi2": getattr(self, "chi2", None), + "redchi2": getattr(self, "redchi2", None), } - return results - - # if not result.success: - # print(" DEM fit did not fully converge:") - # print(" >", result.message) - - # print(" DEM fit complete") - # print(f" > Reduced chi-squared: {result.chisqr / len(self._observed_intensities):.2f}") - # print(f" > Total iterations: {result.nfev}") - # # Automatically run MC if enabled - # if self.monte_carlo_runs > 0: - # print(f"Running Monte Carlo with {self.monte_carlo_runs} trials...") - # from .monte_carlo_iteration import MonteCarloIteration - # mc = MonteCarloIteration(self) - # mc.run_mc_simulation(n_draws=self.monte_carlo_runs) - # self.mc_results = mc - # self.mc_stats = mc.mc_stats - - return result def summary(self): + """ + Print a comprehensive summary of the DEM solver setup, + including inputs, solver configuration, fit results, + Monte Carlo ensemble status, and available plotting helpers. + """ print("\nXRTpy DEM Iterative Setup Summary\n") - print("-" * 50) - print(f" Filters: {self.filter_names}") - print(f" Obs Intensities: {self.observed_intensities}") - print(f" Number of obs: {len(self._observed_intensities)}") - print(f" Intensity Errors: {self.intensity_errors}") - print( - f" Error model used: {'User-provided' if self._intensity_errors is not None else f'Auto (obs * {self.relative_error}, min={self.min_error} DN/s)'}" - ) - print(f" Temp Grid: logT {self.min_T}–{self.max_T}, step {self.dT}") - print(f" Temp bins: {len(self.logT)}") - print(f" Solver factor: {self.solv_factor:.1e}") - print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") - print(f" Max Iterations: {self.max_iterations}") - print("-" * 50) - - # def summary(self): - # print("XRTpy DEM Iterative Setup Summary") - # print("-" * 40) - # print(f" Filters: {self.filter_names}") - # print(f" Obs Intensities: {self.observed_intensities}") - # print(f" Number of observations (Nobs): {len(self._observed_intensities)}") - # print(f" Solver Normalization Factor: {self.solv_factor:.1e}") - # print( - # f" Monte Carlo runs: {self.monte_carlo_runs if self.monte_carlo_runs > 0 else 'None'}" - # ) - # print(f" Max Iterations: {self.max_iterations}") - # print(f" Intensity Errors: {self.intensity_errors}") - # print(f" Temp Grid: logT {self.min_T} to {self.max_T} (step {self.dT})") - # print(f" Temp bins: {len(self.logT)}") - # print( - # f" Error model used: {'User-provided' if self._intensity_errors is not None else 'Auto (obs * 0.03, min=2 DN/s)'}" - # ) - # if self._intensity_errors is None: - # print( - # "For more info: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro" - # ) - # print("-" * 40) + print("=" * 65) + + # Filters & Observations + print(f" Filters: {self.filter_names}") + print(f" Observed Intensities: {self.observed_intensities}") + print(f" Number of channels: {len(self._observed_intensities)}") + + # Errors + print(f" Intensity Errors: {self.intensity_errors}") + if self._intensity_errors is not None: + print(" Error model used: User-provided") + else: + print( + f" Error model used: Auto-estimated " + f"(obs * {self.relative_error}, min={self.min_error} DN/s)" + ) + print(" [IDL reference: xrt_dem_iterative2.pro]") + + # Temperature grid + print(f" Temperature grid: logT {self.min_T:.2f}–{self.max_T:.2f}, step {self.dT}") + print(f" Temp bins: {len(self.logT)}") + print(f" dlogT: {self.dlogT:.3f}, dlnT: {self.dlnT:.3f}") + + # Solver setup + print(f" Solver factor: {self.solv_factor:.1e}") + print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") + print(f" Max Iterations: {self.max_iterations}") + print(f" Knots (n_knots): {getattr(self, '_last_n_knots', 'default=6')}") + + if hasattr(self, "chi2"): + dof = len(self._observed_intensities) - len(getattr(self, "_init_knot_params", [])) + print(f" χ²: {self.chi2:.4e} (dof={dof})") + + + # Responses + print(f" Response unit: {self._response_unit}") + if hasattr(self, "_response_matrix"): + print(f" Response matrix: {self._response_matrix.shape} (filters × bins)") + else: + print(" Response matrix: Not yet built") + + # Fit results + if hasattr(self, "dem"): + print("\n Fit Results:") + print(f" DEM bins: {self.dem.shape}") + if hasattr(self, "chi2"): + print(f" Chi²: {self.chi2:.4e}") + if hasattr(self, "redchi2"): + print(f" Reduced Chi²: {self.redchi2:.4e}") + if hasattr(self, "fitted_intensities"): + print(f" Fitted Intensities: {self.fitted_intensities}") + + # Monte Carlo results + if hasattr(self, "_dem_ensemble"): + print("\n Monte Carlo Ensemble:") + n_mc = len(self._dem_ensemble) + print(f" Runs stored: {n_mc}") + dem_stack = np.array(self._dem_ensemble) + med = np.median(dem_stack, axis=0) + spread = np.percentile(dem_stack, [16, 84], axis=0) + print(" DEM median (log10 cm^-5 K^-1):") + print(f" First 5 bins: {np.log10(med[:5]+1e-40)}") + print(" DEM 1σ spread (first bin):") + print(f" {np.log10(spread[0,0]+1e-40):.2f} – {np.log10(spread[1,0]+1e-40):.2f}") + print(" Reproducibility: Run with random_seed for identical results") + + if hasattr(self, "chi2"): + print(f" Chi²: {self.chi2:.4e}") + if hasattr(self, "redchi2"): + print(f" Reduced Chi²: {self.redchi2:.4e}") + if hasattr(self, "dof"): + print(f" Degrees of Freedom: {self.dof}") + if hasattr(self, "_iter_stats") and len(self._iter_stats["chisq"]) > 0: + print(f" Iterations tracked: {len(self._iter_stats['chisq'])}") + print(f" Final Iter χ²: {self._iter_stats['chisq'][-1]:.4e}") + + # Plotting guidance + # print("\n Plotting Options:") + # if hasattr(self, "dem"): + # print(" • plot_dem_results(results) → Quick plot from solve() dictionary") + # print(" • plot_dem_uncertainty() → Best-fit DEM + shaded ±1σ (if MC available)") + # print(" • plot_idl_style() → IDL-style view (best-fit + MC curves)") + # print(" • plot_dem_with_median_bins() → Median + closest DEM (IDL style extension)") + # print(" • plot_fit_residuals() → Observed vs fitted intensities") + + print("=" * 65) From 28d5b36872544a77fcb0b1534e436b52c0db8397 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 22 Sep 2025 15:52:37 -0400 Subject: [PATCH 073/121] Moving plotting function to its own script --- xrtpy/xrt_dem_iterative/dem_plotting.py | 253 ++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100644 xrtpy/xrt_dem_iterative/dem_plotting.py diff --git a/xrtpy/xrt_dem_iterative/dem_plotting.py b/xrtpy/xrt_dem_iterative/dem_plotting.py new file mode 100644 index 000000000..7e22018ff --- /dev/null +++ b/xrtpy/xrt_dem_iterative/dem_plotting.py @@ -0,0 +1,253 @@ +__all__ = [ + "plot_dem_results", + "plot_dem_uncertainty", + "plot_idl_style", + "plot_fit_residuals", + "plot_dem_with_median_bins", + "plot_iteration_stats", +] + +import matplotlib.pyplot as plt +import numpy as np +import astropy.units as u + + +############################################################################### +# Plotting function 1 +############################################################################### +def plot_dem_results(dem): + """ + Plot the fitted DEM solution (with optional Monte Carlo uncertainty). + + Parameters + ---------- + dem : XRTDEMIterative + Solver object. If not yet solved, .solve() will be called. + """ + if not hasattr(dem, "dem"): + dem.solve() + + logT = dem.logT + best_fit = dem.dem + dem_err = getattr(dem, "dem_uncertainty", None) + + fig, ax = plt.subplots(figsize=(8, 6)) + + ax.step(logT, np.log10(best_fit + 1e-40), where="mid", + color="blue", linewidth=2, label="Best-fit DEM") + + if dem_err is not None: + upper = np.log10(best_fit + dem_err + 1e-40) + lower = np.log10(np.clip(best_fit - dem_err, 1e-40, None)) + ax.fill_between(logT, lower, upper, step="mid", + color="blue", alpha=0.2, label="+/-1σ") + + ax.set_xlabel("log10 T [K]") + ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") + ax.set_xlim(logT.min(), logT.max()) + ax.set_ylim(np.floor(np.log10(best_fit.min() + 1e-40)), + np.ceil(np.log10(best_fit.max() + 1e-40))) + ax.set_title("DEM Solution") + ax.legend() + ax.grid(alpha=0.3) + + plt.tight_layout() + plt.show() + + +############################################################################### +# Plotting function 2 +############################################################################### +def plot_dem_uncertainty(dem): + """ + Plot DEM with Monte Carlo uncertainty band. + """ + if not hasattr(dem, "dem"): + dem.solve() + if not hasattr(dem, "dem_uncertainty"): + raise AttributeError("No DEM uncertainty found. Run with monte_carlo_runs > 0.") + + logT = dem.logT + best_fit = dem.dem + dem_err = dem.dem_uncertainty + + fig, ax = plt.subplots(figsize=(8, 6)) + + ax.step(logT, np.log10(best_fit + 1e-40), where="mid", + color="blue", linewidth=2, label="Best-fit DEM") + + upper = np.log10(best_fit + dem_err + 1e-40) + lower = np.log10(np.clip(best_fit - dem_err, 1e-40, None)) + ax.fill_between(logT, lower, upper, step="mid", + color="blue", alpha=0.2, label="+/-1σ") + + ax.set_xlabel("log10 T [K]") + ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") + ax.set_xlim(logT.min(), logT.max()) + ax.set_ylim(np.floor(np.log10(best_fit.min() + 1e-40)), + np.ceil(np.log10((best_fit + dem_err).max() + 1e-40))) + ax.set_title("DEM with Monte Carlo Uncertainty") + ax.legend() + ax.grid(alpha=0.3) + + plt.tight_layout() + plt.show() + + +############################################################################### +# Plotting function 3 +############################################################################### +def plot_idl_style(dem): + """ + Faithful mirror of IDL's xrt_dem_iterative2.pro plotting style. + + - Black dotted lines -> Monte Carlo DEMs (if available) + - Green line -> Best-fit DEM + """ + if not hasattr(dem, "dem"): + dem.solve() + + logT = dem.logT + best_fit = dem.dem + + fig, ax = plt.subplots(figsize=(8, 6)) + + if hasattr(dem, "_dem_ensemble"): + mc_dems = np.array(dem._dem_ensemble) + for i in range(mc_dems.shape[0]): + ax.step(logT, np.log10(mc_dems[i] + 1e-40), + where="mid", linestyle=":", color="black", + alpha=0.3, linewidth=0.6) + + ax.step(logT, np.log10(best_fit + 1e-40), where="mid", + color="green", linewidth=2, label="Best-fit DEM") + + ax.set_xlabel("log10 T [K]") + ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") + ax.set_xlim(logT.min(), logT.max()) + ax.set_ylim(np.floor(np.log10(best_fit.min() + 1e-40)), + np.ceil(np.log10(best_fit.max() + 1e-40))) + ax.set_title("DEM (IDL Style)") + ax.legend() + ax.grid(alpha=0.3) + + plt.tight_layout() + plt.show() + + +############################################################################### +# Plotting function 4 +############################################################################### +def plot_fit_residuals(dem): + """ + Plot observed vs fitted intensities and residuals. + """ + if not hasattr(dem, "dem"): + dem.solve() + + if not hasattr(dem, "fitted_intensities"): + raise AttributeError("No fitted intensities found. Run fit_dem() or solve() first.") + + obs = dem._observed_intensities + fit = dem.fitted_intensities + sigma = dem.intensity_errors.to_value(u.DN / u.s) + + filters = dem.filter_names + indices = np.arange(len(obs)) + + plt.figure(figsize=(7, 5)) + plt.errorbar(indices, obs, yerr=sigma, fmt="o",label="Observed", color="black") + plt.plot(indices, fit, "s", label="Fitted", color="red") + plt.xticks(indices, filters, rotation=45) + plt.ylabel("Intensity [DN/s/pix]") + plt.title("Observed vs Fitted Intensities") + plt.legend() + plt.tight_layout() + plt.show() + + residuals = (obs - fit) / sigma + plt.figure(figsize=(7, 4)) + plt.axhline(0, color="gray", linestyle="--") + plt.plot(indices, residuals, "o", color="blue") + plt.xticks(indices, filters, rotation=45) + plt.ylabel("(Obs - Fit) / σ") + plt.title("Residuals per Filter") + plt.tight_layout() + plt.show() + + +############################################################################### +# Plotting function 5 +############################################################################### +def plot_dem_with_median_bins(dem): + """ + Reproduce IDL-style DEM plot with: + - Best-fit DEM (green) + - Monte Carlo DEMs (dotted black) + - Median DEM (blue) + - Closest DEM to the median (orange) + """ + if not hasattr(dem, "dem"): + dem.solve() + if not hasattr(dem, "_dem_ensemble"): + raise AttributeError("Monte Carlo ensemble not available. Run with monte_carlo_runs > 0.") + + logT = dem.logT + mc_dems = np.array(dem._dem_ensemble) + best_fit = dem.dem + + med = np.median(mc_dems, axis=0) + diffs = np.linalg.norm(mc_dems - med, axis=1) + closest_idx = np.argmin(diffs) + closest_dem = mc_dems[closest_idx] + + fig, ax = plt.subplots(figsize=(9, 6)) + + for i in range(mc_dems.shape[0]): + ax.step(logT, np.log10(mc_dems[i] + 1e-40), + where="mid", linestyle=":", color="black", + alpha=0.3, linewidth=0.6) + + ax.step(logT, np.log10(best_fit + 1e-40), where="mid", + color="green", linewidth=2, label="Obs DEM") + ax.step(logT, np.log10(med + 1e-40), where="mid", + color="blue", linewidth=1.8, label="Median in bins") + ax.step(logT, np.log10(closest_dem + 1e-40), where="mid", + color="orange", linewidth=1.8, label="Closest DEM to median") + + ax.set_xlim(dem.min_T, dem.max_T) + ax.set_ylim(0, 30) + ax.set_xlabel("Log T (K)") + ax.set_ylabel("Log DEM [cm$^{-5}$ K$^{-1}$]") + ax.legend() + ax.grid(alpha=0.3) + ax.set_title("DEM with Monte Carlo Spread, Median, and Closest Fit (IDL Style)") + + plt.tight_layout() + plt.show() + + +############################################################################### +# Plotting function 6 +############################################################################### +def plot_iteration_stats(dem): + """ + Plot x^2 convergence across solver iterations. + """ + if not hasattr(dem, "_iteration_chi2") or len(dem._iteration_chi2) == 0: + raise AttributeError("No iteration stats found. Run fit_dem() or solve() first.") + + chi2_vals = np.array(dem._iteration_chi2) + + fig, ax = plt.subplots(figsize=(8, 5)) + ax.plot(range(len(chi2_vals)), chi2_vals, lw=1.5) + ax.set_xlabel("Iteration") + ax.set_ylabel("Chi²") + ax.set_title("Chi² Convergence") + ax.grid(alpha=0.3) + + if chi2_vals.max() / max(chi2_vals.min(), 1e-10) > 1e4: + ax.set_yscale("log") + + plt.tight_layout() + plt.show() From a7959ca0de53bc1239f0ae091f83b3f80b344b7f Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 22 Sep 2025 15:53:03 -0400 Subject: [PATCH 074/121] Applying black to plotting --- xrtpy/xrt_dem_iterative/dem_plotting.py | 130 +++++++++++++++++------- 1 file changed, 96 insertions(+), 34 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_plotting.py b/xrtpy/xrt_dem_iterative/dem_plotting.py index 7e22018ff..8ef58a8d2 100644 --- a/xrtpy/xrt_dem_iterative/dem_plotting.py +++ b/xrtpy/xrt_dem_iterative/dem_plotting.py @@ -33,20 +33,29 @@ def plot_dem_results(dem): fig, ax = plt.subplots(figsize=(8, 6)) - ax.step(logT, np.log10(best_fit + 1e-40), where="mid", - color="blue", linewidth=2, label="Best-fit DEM") + ax.step( + logT, + np.log10(best_fit + 1e-40), + where="mid", + color="blue", + linewidth=2, + label="Best-fit DEM", + ) if dem_err is not None: upper = np.log10(best_fit + dem_err + 1e-40) lower = np.log10(np.clip(best_fit - dem_err, 1e-40, None)) - ax.fill_between(logT, lower, upper, step="mid", - color="blue", alpha=0.2, label="+/-1σ") + ax.fill_between( + logT, lower, upper, step="mid", color="blue", alpha=0.2, label="+/-1σ" + ) ax.set_xlabel("log10 T [K]") ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim(np.floor(np.log10(best_fit.min() + 1e-40)), - np.ceil(np.log10(best_fit.max() + 1e-40))) + ax.set_ylim( + np.floor(np.log10(best_fit.min() + 1e-40)), + np.ceil(np.log10(best_fit.max() + 1e-40)), + ) ax.set_title("DEM Solution") ax.legend() ax.grid(alpha=0.3) @@ -73,19 +82,28 @@ def plot_dem_uncertainty(dem): fig, ax = plt.subplots(figsize=(8, 6)) - ax.step(logT, np.log10(best_fit + 1e-40), where="mid", - color="blue", linewidth=2, label="Best-fit DEM") + ax.step( + logT, + np.log10(best_fit + 1e-40), + where="mid", + color="blue", + linewidth=2, + label="Best-fit DEM", + ) upper = np.log10(best_fit + dem_err + 1e-40) lower = np.log10(np.clip(best_fit - dem_err, 1e-40, None)) - ax.fill_between(logT, lower, upper, step="mid", - color="blue", alpha=0.2, label="+/-1σ") + ax.fill_between( + logT, lower, upper, step="mid", color="blue", alpha=0.2, label="+/-1σ" + ) ax.set_xlabel("log10 T [K]") ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim(np.floor(np.log10(best_fit.min() + 1e-40)), - np.ceil(np.log10((best_fit + dem_err).max() + 1e-40))) + ax.set_ylim( + np.floor(np.log10(best_fit.min() + 1e-40)), + np.ceil(np.log10((best_fit + dem_err).max() + 1e-40)), + ) ax.set_title("DEM with Monte Carlo Uncertainty") ax.legend() ax.grid(alpha=0.3) @@ -115,18 +133,32 @@ def plot_idl_style(dem): if hasattr(dem, "_dem_ensemble"): mc_dems = np.array(dem._dem_ensemble) for i in range(mc_dems.shape[0]): - ax.step(logT, np.log10(mc_dems[i] + 1e-40), - where="mid", linestyle=":", color="black", - alpha=0.3, linewidth=0.6) - - ax.step(logT, np.log10(best_fit + 1e-40), where="mid", - color="green", linewidth=2, label="Best-fit DEM") + ax.step( + logT, + np.log10(mc_dems[i] + 1e-40), + where="mid", + linestyle=":", + color="black", + alpha=0.3, + linewidth=0.6, + ) + + ax.step( + logT, + np.log10(best_fit + 1e-40), + where="mid", + color="green", + linewidth=2, + label="Best-fit DEM", + ) ax.set_xlabel("log10 T [K]") ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim(np.floor(np.log10(best_fit.min() + 1e-40)), - np.ceil(np.log10(best_fit.max() + 1e-40))) + ax.set_ylim( + np.floor(np.log10(best_fit.min() + 1e-40)), + np.ceil(np.log10(best_fit.max() + 1e-40)), + ) ax.set_title("DEM (IDL Style)") ax.legend() ax.grid(alpha=0.3) @@ -146,7 +178,9 @@ def plot_fit_residuals(dem): dem.solve() if not hasattr(dem, "fitted_intensities"): - raise AttributeError("No fitted intensities found. Run fit_dem() or solve() first.") + raise AttributeError( + "No fitted intensities found. Run fit_dem() or solve() first." + ) obs = dem._observed_intensities fit = dem.fitted_intensities @@ -156,7 +190,7 @@ def plot_fit_residuals(dem): indices = np.arange(len(obs)) plt.figure(figsize=(7, 5)) - plt.errorbar(indices, obs, yerr=sigma, fmt="o",label="Observed", color="black") + plt.errorbar(indices, obs, yerr=sigma, fmt="o", label="Observed", color="black") plt.plot(indices, fit, "s", label="Fitted", color="red") plt.xticks(indices, filters, rotation=45) plt.ylabel("Intensity [DN/s/pix]") @@ -190,7 +224,9 @@ def plot_dem_with_median_bins(dem): if not hasattr(dem, "dem"): dem.solve() if not hasattr(dem, "_dem_ensemble"): - raise AttributeError("Monte Carlo ensemble not available. Run with monte_carlo_runs > 0.") + raise AttributeError( + "Monte Carlo ensemble not available. Run with monte_carlo_runs > 0." + ) logT = dem.logT mc_dems = np.array(dem._dem_ensemble) @@ -204,16 +240,40 @@ def plot_dem_with_median_bins(dem): fig, ax = plt.subplots(figsize=(9, 6)) for i in range(mc_dems.shape[0]): - ax.step(logT, np.log10(mc_dems[i] + 1e-40), - where="mid", linestyle=":", color="black", - alpha=0.3, linewidth=0.6) - - ax.step(logT, np.log10(best_fit + 1e-40), where="mid", - color="green", linewidth=2, label="Obs DEM") - ax.step(logT, np.log10(med + 1e-40), where="mid", - color="blue", linewidth=1.8, label="Median in bins") - ax.step(logT, np.log10(closest_dem + 1e-40), where="mid", - color="orange", linewidth=1.8, label="Closest DEM to median") + ax.step( + logT, + np.log10(mc_dems[i] + 1e-40), + where="mid", + linestyle=":", + color="black", + alpha=0.3, + linewidth=0.6, + ) + + ax.step( + logT, + np.log10(best_fit + 1e-40), + where="mid", + color="green", + linewidth=2, + label="Obs DEM", + ) + ax.step( + logT, + np.log10(med + 1e-40), + where="mid", + color="blue", + linewidth=1.8, + label="Median in bins", + ) + ax.step( + logT, + np.log10(closest_dem + 1e-40), + where="mid", + color="orange", + linewidth=1.8, + label="Closest DEM to median", + ) ax.set_xlim(dem.min_T, dem.max_T) ax.set_ylim(0, 30) @@ -235,7 +295,9 @@ def plot_iteration_stats(dem): Plot x^2 convergence across solver iterations. """ if not hasattr(dem, "_iteration_chi2") or len(dem._iteration_chi2) == 0: - raise AttributeError("No iteration stats found. Run fit_dem() or solve() first.") + raise AttributeError( + "No iteration stats found. Run fit_dem() or solve() first." + ) chi2_vals = np.array(dem._iteration_chi2) From 6a91264493424f85f7d9da17e647ca1306f561c1 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 22 Sep 2025 15:53:37 -0400 Subject: [PATCH 075/121] Updated and cleaned out dem_solver main code --- xrtpy/xrt_dem_iterative/dem_solver.py | 533 ++++++++------------------ 1 file changed, 161 insertions(+), 372 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 47b3956ac..182e33250 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -12,6 +12,7 @@ from scipy.interpolate import interp1d from xrtpy.util.filters import validate_and_format_filters +from xrtpy.xrt_dem_iterative import dem_plotting class XRTDEMIterative: @@ -160,7 +161,7 @@ def __init__( "Please ensure the temperature range fits within all responses.\n" "Hint: Default response range is logT = 5.5 to 8.0. You can view each response's logT range via: [r.temperature for r in responses]" ) - + # Check consistency between inputs if not ( len(self._observed_intensities) @@ -186,8 +187,10 @@ def __init__( raise ValueError("solv_factor must be a positive number.") except Exception as e: raise ValueError(f"Invalid solv_factor: {e}") - - self._using_estimated_errors = False #track whether default error model has been used + + self._using_estimated_errors = ( + False # track whether default error model has been used + ) #### TEST GIT CI TEST ##### @@ -255,7 +258,6 @@ def validate_inputs(self) -> None: # success ⇒ no return value return None - def __repr__(self): return ( f" np.ndarray: """ Estimate an initial DEM curve from observed intensities and responses. - + This follows the algorithm in IDL's `xrt_dem_iterative2.pro`, which uses response-peak inversion to generate a crude log10 DEM estimate per channel, then interpolates these estimates onto the solver's regular temperature grid. @@ -555,9 +556,13 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: (log10 DEM = 22 everywhere). """ if not hasattr(self, "logT"): - raise AttributeError("Temperature grid missing. Call create_logT_grid() first.") + raise AttributeError( + "Temperature grid missing. Call create_logT_grid() first." + ) if not hasattr(self, "_response_matrix"): - raise AttributeError("Response matrix missing. Call _interpolate_responses_to_grid() first.") + raise AttributeError( + "Response matrix missing. Call _interpolate_responses_to_grid() first." + ) # Storage for peak locations and DEM estimates t_peaks = [] @@ -565,7 +570,11 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # Loop over each filter for i, (T_orig, R_orig, I_obs) in enumerate( - zip(self.response_temperatures, self.response_values, self._observed_intensities) + zip( + self.response_temperatures, + self.response_values, + self._observed_intensities, + ) ): logT_orig = np.log10(T_orig.to_value(u.K)) R_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) @@ -584,7 +593,7 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: continue # 3. Compute denominator integral: sum(T * R * dlnT) - T_good = 10.0**logT_orig[good] + T_good = 10.0 ** logT_orig[good] R_good = R_vals[good] dlogT_native = np.diff(logT_orig).mean() dlnT_native = np.log(10.0) * dlogT_native @@ -638,8 +647,7 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: self._initial_log_dem = est_log_dem_on_grid return est_log_dem_on_grid - - + def _build_lmfit_parameters(self, n_knots: int = 6): """ Build lmfit.Parameters for the DEM spline knots. @@ -695,15 +703,14 @@ def _build_lmfit_parameters(self, n_knots: int = 6): params.add( name=f"knot_{i}", value=val, - min=-10, # optional bounds: avoid absurdly low - max=50, # optional bounds: avoid absurdly high + min=-10, # optional bounds: avoid absurdly low + max=50, # optional bounds: avoid absurdly high vary=True, ) self._init_knot_params = params return params - def _reconstruct_dem_from_knots(self, params) -> np.ndarray: """ Reconstruct the DEM curve on the solver's logT grid from spline knot parameters. @@ -733,7 +740,9 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: ) # Extract knot values from parameters (log10(DEM/solv_factor)) - knot_vals = np.array([params[f"knot_{i}"].value for i in range(len(self._knot_positions))]) + knot_vals = np.array( + [params[f"knot_{i}"].value for i in range(len(self._knot_positions))] + ) # Interpolate across solver grid in log space interp_func = interp1d( @@ -746,13 +755,12 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: log_dem_scaled = interp_func(self.logT) # Convert back to DEM [cm^-5 K^-1] - dem_grid = self._solv_factor * (10.0 ** log_dem_scaled) + dem_grid = self._solv_factor * (10.0**log_dem_scaled) return dem_grid - # self._iteration_chi2 = [] - + # def _residuals(self, params) -> np.ndarray: # """ # Residuals function for DEM fitting. @@ -794,7 +802,7 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: # # sigma = self.intensity_errors.to_value(u.DN / u.s) # ensure numeric # # residuals = (self._observed_intensities - I_calc) / sigma # # Use MC-perturbed intensities if present; otherwise the originals - + # y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) # sigma = self.intensity_errors.to_value(u.DN / u.s) # numeric # residuals = (y_obs - I_calc) / sigma @@ -806,9 +814,8 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: # if not hasattr(self, "_iteration_chi2"): # self._iteration_chi2 = [] # self._iteration_chi2.append(chi2_val) - - # return residuals + # return residuals def _residuals(self, params) -> np.ndarray: """ Residuals function for DEM fitting. @@ -830,7 +837,9 @@ def _residuals(self, params) -> np.ndarray: I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) # 4. Residuals: normalize by observational errors - y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) + y_obs = getattr( + self, "_active_observed_intensities", self._observed_intensities + ) sigma = self.intensity_errors.to_value(u.DN / u.s) residuals = (y_obs - I_calc) / sigma @@ -842,7 +851,6 @@ def _residuals(self, params) -> np.ndarray: return residuals - # def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): # """ # Fit the DEM using lmfit to minimize residuals. @@ -852,7 +860,7 @@ def _residuals(self, params) -> np.ndarray: # n_knots : int, optional # Number of spline knots across the logT grid. Default = 6. # method : str, optional - # Minimization method passed to `lmfit.minimize`. + # Minimization method passed to `lmfit.minimize`. # Common choices: "least_squares", "leastsq", "nelder". # Default = "least_squares". # **kwargs : dict @@ -881,7 +889,6 @@ def _residuals(self, params) -> np.ndarray: # responses, and estimates an initial DEM if not already done. # """ # from lmfit import minimize - # # --- Auto-prepare prerequisites --- # if not hasattr(self, "logT") or not hasattr(self, "T"): @@ -893,8 +900,8 @@ def _residuals(self, params) -> np.ndarray: # if not hasattr(self, "_initial_log_dem"): # self._estimate_initial_dem() - # self._last_n_knots = n_knots #Used for print in the summary function - + # self._last_n_knots = n_knots #Used for print in the summary function + # # 1. Build initial knot parameters # params = self._build_lmfit_parameters(n_knots=n_knots) @@ -956,7 +963,7 @@ def _callback(params, iter, resid, *args, **kwargs): self._residuals, params, method=method, - iter_cb=_callback, # <-- track stats + iter_cb=_callback, # <-- track stats max_nfev=self.max_iterations, **kwargs, ) @@ -982,8 +989,9 @@ def _callback(params, iter, resid, *args, **kwargs): return result - - def fit_with_multiple_methods(self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs): + def fit_with_multiple_methods( + self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs + ): """ Try multiple lmfit minimization methods and pick the best χ². @@ -1053,356 +1061,102 @@ def fit_with_multiple_methods(self, methods=("leastsq", "least_squares", "nelder return best_result + # def run_monte_carlo(self, n_runs=None, n_knots=6, method="least_squares", random_seed=None): + # if random_seed is not None: + # np.random.seed(random_seed) + # """ + # Run Monte Carlo DEM fits to estimate uncertainties and store full ensemble. - def run_monte_carlo(self, n_runs=None, n_knots=6, method="least_squares", random_seed=None): - if random_seed is not None: - np.random.seed(random_seed) - """ - Run Monte Carlo DEM fits to estimate uncertainties and store full ensemble. + # Returns + # ------- + # dem_ensemble : ndarray + # Shape (n_runs, n_temperatures) array of DEM solutions. + # """ + # from lmfit import minimize - Returns - ------- - dem_ensemble : ndarray - Shape (n_runs, n_temperatures) array of DEM solutions. - """ + # if n_runs is None: + # n_runs = self._monte_carlo_runs + # if n_runs <= 0: + # raise ValueError("Monte Carlo runs disabled (n_runs=0).") + + # sigma = self.intensity_errors.to_value(u.DN / u.s) + # dem_ensemble = [] + + # self._last_n_knots = n_knots #Used for print in the summary function + + # for i in range(n_runs): + # noisy_obs = self._observed_intensities + np.random.normal(0, sigma) + # #print(f"Given intensities: {noisy_obs}") + # self._observed_intensities_mc = noisy_obs # temp override + + # # params = self._build_lmfit_parameters(n_knots=n_knots) #Older Version Sept 18 + # # result = minimize(lambda p: self._residuals(p), params, method=method) #Older Version Sept 18 + # params = self._build_lmfit_parameters(n_knots=n_knots) + # # Activate noisy intensities for this run + # self._active_observed_intensities = noisy_obs + # try: + # result = minimize(self._residuals, params, method=method) + # finally: + # # Always restore (so the main dataset isn’t polluted) + # if hasattr(self, "_active_observed_intensities"): + # delattr(self, "_active_observed_intensities") + + # dem_i = self._reconstruct_dem_from_knots(result.params) + # dem_ensemble.append(dem_i) + + # dem_ensemble = np.array(dem_ensemble) + + # # Store ensemble + uncertainty + # self._dem_ensemble = dem_ensemble + # self.dem_uncertainty = np.std(dem_ensemble, axis=0) + # self.dem_median = np.median(dem_ensemble, axis=0) + + # return dem_ensemble + + def run_monte_carlo( + self, n_runs=None, n_knots=6, method="least_squares", random_seed=None + ): from lmfit import minimize - + import numpy as np + from tqdm import tqdm # add this at top of file + if n_runs is None: n_runs = self._monte_carlo_runs if n_runs <= 0: raise ValueError("Monte Carlo runs disabled (n_runs=0).") + if random_seed is not None: + np.random.seed(random_seed) + sigma = self.intensity_errors.to_value(u.DN / u.s) dem_ensemble = [] - self._last_n_knots = n_knots #Used for print in the summary function - - for i in range(n_runs): - noisy_obs = self._observed_intensities + np.random.normal(0, sigma) - #print(f"Given intensities: {noisy_obs}") - self._observed_intensities_mc = noisy_obs # temp override + self._last_n_knots = n_knots - # params = self._build_lmfit_parameters(n_knots=n_knots) #Older Version Sept 18 - # result = minimize(lambda p: self._residuals(p), params, method=method) #Older Version Sept 18 - params = self._build_lmfit_parameters(n_knots=n_knots) - # Activate noisy intensities for this run + # --- progress bar + for i in tqdm(range(n_runs), desc="Monte Carlo DEM fits", unit="run"): + noisy_obs = self._observed_intensities + np.random.normal(0, sigma) self._active_observed_intensities = noisy_obs try: + params = self._build_lmfit_parameters(n_knots=n_knots) result = minimize(self._residuals, params, method=method) finally: - # Always restore (so the main dataset isn’t polluted) if hasattr(self, "_active_observed_intensities"): delattr(self, "_active_observed_intensities") - dem_i = self._reconstruct_dem_from_knots(result.params) dem_ensemble.append(dem_i) dem_ensemble = np.array(dem_ensemble) - - # Store ensemble + uncertainty self._dem_ensemble = dem_ensemble self.dem_uncertainty = np.std(dem_ensemble, axis=0) self.dem_median = np.median(dem_ensemble, axis=0) return dem_ensemble - - ########################################################################################################################### - ####################################### Plotting section STARTS ########################################################### - ########################################################################################################################### - - ############ Plotting function 1 ######################################################################################### - def plot_dem_results(self, results): - """ - Quick plotting for users who only have the results dict from solve(). - - Parameters - ---------- - results : dict - Dictionary returned by self.solve(). Keys: - - "temperature" : log10(T) grid - - "dem" : best-fit DEM [cm^-5 K^-1] - - "dem_err" : DEM uncertainty (if Monte Carlo enabled) - - Notes - ----- - - Best-fit DEM is shown as a blue line. - - If dem_err exists, a blue shaded band (±1σ) is shown. - """ - import matplotlib.pyplot as plt - import numpy as np - - logT = results["temperature"] - dem = results["dem"] - dem_err = results.get("dem_err", None) - - fig, ax = plt.subplots(figsize=(8,6)) - - # Best-fit DEM - ax.step(logT, np.log10(dem + 1e-40), where="mid", - color="blue", linewidth=2, label="Best-fit DEM") - - # Uncertainty shading - if dem_err is not None: - upper = np.log10(dem + dem_err + 1e-40) - lower = np.log10(np.clip(dem - dem_err, 1e-40, None)) - ax.fill_between(logT, lower, upper, step="mid", - color="blue", alpha=0.2, label="±1σ") - - ax.set_xlabel("log10 T [K]") - ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") - ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim(np.floor(np.log10(dem.min()+1e-40)), - np.ceil(np.log10(dem.max()+1e-40))) - ax.set_title("DEM Solution") - ax.legend() - ax.grid(alpha=0.3) - - plt.tight_layout() - plt.show() - - ############ Plotting function 2 ######################################################################################### - def plot_dem_uncertainty(self): - """ - Plot DEM with Monte Carlo uncertainty band. - - Requires - -------- - self.dem : ndarray - Best-fit DEM curve. - self.dem_uncertainty : ndarray - Uncertainty from Monte Carlo runs. - self.logT : ndarray - Temperature grid. - """ - import matplotlib.pyplot as plt - import numpy as np - - if not hasattr(self, "dem"): - raise AttributeError("No DEM found. Run fit_dem() or solve() first.") - if not hasattr(self, "dem_uncertainty"): - raise AttributeError("No DEM uncertainty found. Run run_monte_carlo() first.") - - logT = self.logT - dem = self.dem - dem_err = self.dem_uncertainty - - fig, ax = plt.subplots(figsize=(8,6)) - - # Best-fit DEM - ax.step(logT, np.log10(dem + 1e-40), where="mid", - color="blue", linewidth=2, label="Best-fit DEM") - - # ±1σ shaded region - upper = np.log10(dem + dem_err + 1e-40) - lower = np.log10(np.clip(dem - dem_err, 1e-40, None)) - ax.fill_between(logT, lower, upper, step="mid", - color="blue", alpha=0.2, label="±1σ") - - ax.set_xlabel("log10 T [K]") - ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") - ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim(np.floor(np.log10(dem.min()+1e-40)), - np.ceil(np.log10((dem+dem_err).max()+1e-40))) - ax.set_title("DEM with Monte Carlo Uncertainty") - ax.legend() - ax.grid(alpha=0.3) - - plt.tight_layout() - plt.show() - - ############ Plotting function 3 ######################################################################################### - def plot_idl_style(self): - """ - Faithful mirror of IDL's xrt_dem_iterative2.pro plotting style. - - - Black dotted lines → Monte Carlo DEMs (if available) - - Green line → Best-fit DEM - - No shading, no extras - - Requires - -------- - self.dem : ndarray - self.logT : ndarray - """ - import matplotlib.pyplot as plt - import numpy as np - - if not hasattr(self, "dem"): - raise AttributeError("No DEM found. Run fit_dem() or solve() first.") - - logT = self.logT - dem = self.dem - - fig, ax = plt.subplots(figsize=(8,6)) - - # Monte Carlo ensemble (if available) - if hasattr(self, "_dem_ensemble"): - mc_dems = np.array(self._dem_ensemble) - for i in range(mc_dems.shape[0]): - ax.step(logT, np.log10(mc_dems[i] + 1e-40), - where="mid", linestyle=":", color="black", alpha=0.3, linewidth=0.6) - - # Best-fit DEM - ax.step(logT, np.log10(dem + 1e-40), where="mid", - color="green", linewidth=2, label="Best-fit DEM") - - ax.set_xlabel("log10 T [K]") - ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") - ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim(np.floor(np.log10(dem.min()+1e-40)), - np.ceil(np.log10(dem.max()+1e-40))) - ax.set_title("DEM (IDL Style)") - ax.legend() - ax.grid(alpha=0.3) - - plt.tight_layout() - plt.show() - - - ############ Plotting function 4 ######################################################################################### - def plot_fit_residuals(self): - import matplotlib.pyplot as plt - """ - Plot observed vs fitted intensities and residuals. - """ - obs = self._observed_intensities - fit = self.fitted_intensities - sigma = self.intensity_errors.to_value(u.DN / u.s) - - filters = self.filter_names - indices = np.arange(len(obs)) - - # Scatter: observed vs fitted - plt.figure(figsize=(7,5)) - plt.errorbar(indices, obs, yerr=sigma, fmt="o", label="Observed", color="black") - plt.plot(indices, fit, "s", label="Fitted", color="red") - plt.xticks(indices, filters, rotation=45) - plt.ylabel("Intensity [DN/s/pix]") - plt.title("Observed vs Fitted Intensities") - plt.legend() - plt.tight_layout() - plt.show() - - # Residuals - residuals = (obs - fit) / sigma - plt.figure(figsize=(7,4)) - plt.axhline(0, color="gray", linestyle="--") - plt.plot(indices, residuals, "o", color="blue") - plt.xticks(indices, filters, rotation=45) - plt.ylabel("(Obs - Fit) / σ") - plt.title("Residuals per Filter") - plt.tight_layout() - plt.show() - - - ############ Plotting function 5 ######################################################################################### - - def plot_dem_with_median_bins(self): - ####******* IDL MIRROR METHODS *******#### - """ - Reproduce IDL-style DEM plot with: - - Best-fit DEM (green) - - Monte Carlo DEMs as dotted step lines (gray/black) - - Median DEM across ensemble (blue) - - Closest DEM to the median (orange) - - Requires: - self._dem_ensemble from run_monte_carlo() - self.dem from fit_dem() - """ - import matplotlib.pyplot as plt - if not hasattr(self, "_dem_ensemble"): - raise AttributeError("Monte Carlo ensemble not available. Run run_monte_carlo() first.") - if not hasattr(self, "dem"): - raise AttributeError("Best-fit DEM not available. Run fit_dem() first.") - - logT = self.logT - mc_dems = np.array(self._dem_ensemble) # shape (N_runs, N_T) - best_fit = self.dem # (N_T,) - - # --- Median DEM at each temperature bin - med = np.median(mc_dems, axis=0) - - # --- Closest DEM (min L2 distance to median) - diffs = np.linalg.norm(mc_dems - med, axis=1) - closest_idx = np.argmin(diffs) - closest_dem = mc_dems[closest_idx] - - # --- Plot - fig, ax = plt.subplots(figsize=(9, 6)) - - # MC DEMs: dotted black - for i in range(mc_dems.shape[0]): - ax.step(logT, np.log10(mc_dems[i] + 1e-40), - where="mid", linestyle=":", color="black", alpha=0.3, linewidth=0.6) - - # Best-fit DEM (green) - ax.step(logT, np.log10(best_fit + 1e-40), where="mid", - color="green", linewidth=2, label="Obs DEM") - - # Median DEM (blue) - ax.step(logT, np.log10(med + 1e-40), where="mid", - color="blue", linewidth=1.8, label="Median in bins") - - # Closest-to-median DEM (orange) - ax.step(logT, np.log10(closest_dem + 1e-40), where="mid", - color="orange", linewidth=1.8, label="Closest DEM to median") - - # Style - ax.set_xlim(self.min_T, self.max_T) - ax.set_ylim(0, 30) - ax.set_xlabel("Log T (K)") - #ax.set_ylim(np.floor(np.min(np.log10(mc_dems+1e-40))),np.ceil(np.max(np.log10(mc_dems+1e-40)))) - ax.set_ylabel("Log DEM [cm$^{-5}$ K$^{-1}$]") - ax.legend() - ax.grid(True, alpha=0.3) - ax.set_title("DEM with Monte Carlo Spread, Median, and Closest Fit (IDL Style)") - - plt.tight_layout() - plt.show() - - ############ Plotting function 5 ######################################################################################### - - def plot_iteration_stats(self): - """ - Plot χ² convergence across solver iterations. - - Requires - -------- - self._iteration_chi2 : list - Logged χ² values from fit_dem(). - """ - import matplotlib.pyplot as plt - import numpy as np - - if not hasattr(self, "_iteration_chi2") or len(self._iteration_chi2) == 0: - raise AttributeError("No iteration stats found. Run fit_dem() or solve() first.") - - chi2_vals = np.array(self._iteration_chi2) - - fig, ax = plt.subplots(figsize=(8, 5)) - ax.plot(range(len(chi2_vals)), chi2_vals, lw=1.5) - ax.set_xlabel("Iteration") - ax.set_ylabel("Chi²") - ax.set_title("Chi² Convergence") - ax.grid(alpha=0.3) - - # Log-scale option if dynamic range is huge - if chi2_vals.max() / max(chi2_vals.min(), 1e-10) > 1e4: - ax.set_yscale("log") - - plt.tight_layout() - plt.show() - - ########################################################################################################################### - ####################################### Plotting section ENDS ########################################################### - ########################################################################################################################### - - - - def solve(self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True): + def solve( + self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True + ): """ Run the full DEM solver, IDL-style. @@ -1446,21 +1200,33 @@ def solve(self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = # 4. Monte Carlo (optional) if run_mc and self.monte_carlo_runs > 0: - self.run_monte_carlo(n_runs=self.monte_carlo_runs, - n_knots=n_knots, - method=method) + self.run_monte_carlo( + n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method + ) - # 5. Bundle results + # # 5. Bundle results + # return { + # "temperature": self.logT, + # "dem": self.dem, + # "dem_err": getattr(self, "dem_uncertainty", None), + # "ifit": self.fitted_intensities, + # "chi2": getattr(self, "chi2", None), + # "redchi2": getattr(self, "redchi2", None), + # "solver": self, + # } + return self + + def to_dict(self): + """Return solver outputs as a dictionary.""" return { "temperature": self.logT, - "dem": self.dem, + "dem": getattr(self, "dem", None), "dem_err": getattr(self, "dem_uncertainty", None), - "ifit": self.fitted_intensities, + "ifit": getattr(self, "fitted_intensities", None), "chi2": getattr(self, "chi2", None), "redchi2": getattr(self, "redchi2", None), } - def summary(self): """ Print a comprehensive summary of the DEM solver setup, @@ -1487,7 +1253,9 @@ def summary(self): print(" [IDL reference: xrt_dem_iterative2.pro]") # Temperature grid - print(f" Temperature grid: logT {self.min_T:.2f}–{self.max_T:.2f}, step {self.dT}") + print( + f" Temperature grid: logT {self.min_T:.2f}–{self.max_T:.2f}, step {self.dT}" + ) print(f" Temp bins: {len(self.logT)}") print(f" dlogT: {self.dlogT:.3f}, dlnT: {self.dlnT:.3f}") @@ -1496,16 +1264,19 @@ def summary(self): print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") print(f" Max Iterations: {self.max_iterations}") print(f" Knots (n_knots): {getattr(self, '_last_n_knots', 'default=6')}") - + if hasattr(self, "chi2"): - dof = len(self._observed_intensities) - len(getattr(self, "_init_knot_params", [])) + dof = len(self._observed_intensities) - len( + getattr(self, "_init_knot_params", []) + ) print(f" χ²: {self.chi2:.4e} (dof={dof})") - # Responses print(f" Response unit: {self._response_unit}") if hasattr(self, "_response_matrix"): - print(f" Response matrix: {self._response_matrix.shape} (filters × bins)") + print( + f" Response matrix: {self._response_matrix.shape} (filters × bins)" + ) else: print(" Response matrix: Not yet built") @@ -1531,7 +1302,9 @@ def summary(self): print(" DEM median (log10 cm^-5 K^-1):") print(f" First 5 bins: {np.log10(med[:5]+1e-40)}") print(" DEM 1σ spread (first bin):") - print(f" {np.log10(spread[0,0]+1e-40):.2f} – {np.log10(spread[1,0]+1e-40):.2f}") + print( + f" {np.log10(spread[0,0]+1e-40):.2f} – {np.log10(spread[1,0]+1e-40):.2f}" + ) print(" Reproducibility: Run with random_seed for identical results") if hasattr(self, "chi2"): @@ -1545,12 +1318,28 @@ def summary(self): print(f" Final Iter χ²: {self._iter_stats['chisq'][-1]:.4e}") # Plotting guidance - # print("\n Plotting Options:") - # if hasattr(self, "dem"): - # print(" • plot_dem_results(results) → Quick plot from solve() dictionary") - # print(" • plot_dem_uncertainty() → Best-fit DEM + shaded ±1σ (if MC available)") - # print(" • plot_idl_style() → IDL-style view (best-fit + MC curves)") - # print(" • plot_dem_with_median_bins() → Median + closest DEM (IDL style extension)") - # print(" • plot_fit_residuals() → Observed vs fitted intensities") + print("\n Plotting Options:") + if hasattr(self, "dem"): + print(" • plot_dem_results(results) → Quick plot from solve() dictionary") + print( + " • plot_dem_uncertainty() → Best-fit DEM + shaded ±1σ (if MC available)" + ) + print( + " • plot_idl_style() → IDL-style view (best-fit + MC curves)" + ) + print( + " • plot_dem_with_median_bins() → Median + closest DEM (IDL style extension)" + ) + print(" • plot_fit_residuals() → Observed vs fitted intensities") + print(" • plot_iteration_stats() ") print("=" * 65) + + +# Attach plotting functions from plotting.py to the class +XRTDEMIterative.plot_dem_results = dem_plotting.plot_dem_results +XRTDEMIterative.plot_dem_uncertainty = dem_plotting.plot_dem_uncertainty +XRTDEMIterative.plot_idl_style = dem_plotting.plot_idl_style +XRTDEMIterative.plot_fit_residuals = dem_plotting.plot_fit_residuals +XRTDEMIterative.plot_dem_with_median_bins = dem_plotting.plot_dem_with_median_bins +XRTDEMIterative.plot_iteration_stats = dem_plotting.plot_iteration_stats From 0b3d9496090942194e10a7a437a2ab371cb4dcda Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 22 Sep 2025 15:57:17 -0400 Subject: [PATCH 076/121] Removed code that is not going to be used --- xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 75 +------------------ 1 file changed, 3 insertions(+), 72 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py index daeebe183..3be63328b 100644 --- a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py +++ b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py @@ -1,77 +1,8 @@ __all__ = [ - "ComputeDEMStatistics", + "MonteCarlo", ] -import numpy as np -class ComputeDEMStatistics: - """ - Diagnostic class for computing residual statistics from a fitted DEM solution. - - This class provides utilities to: - Compute chi-squared and reduced chi-squared between observed and modeled intensities. - Print residual diagnostics per filter (similar to IDL's xrt_iter_demstat.pro). - - Methods - ------- - compute_chi_squared() - Compute total and reduced chi-squared for DEM fit. - - print_residuals() - Print modeled vs. observed intensities and residuals (normalized by error). - """ - - def __init__(self, dem_solver): - self.dem_solver = dem_solver - - def compute_chi_squared(self): - """ - Compute chi-squared and reduced chi-squared between observed and modeled intensities. - - Returns - ------- - chi2 : float - Total chi-squared value. - chi2_red : float or None - Reduced chi-squared (chi2 / dof), or None if dof <= 0. - """ - if not hasattr(self.dem_solver, "fitted_dem"): - raise RuntimeError("Must run fit_dem() before computing chi-squared.") - - I_model = self.dem_solver.response_matrix @ self.dem_solver.fitted_dem - I_obs = self.dem_solver._observed_intensities - - abs_error = np.maximum( - self.dem_solver.min_error, - self.dem_solver.relative_error * I_obs - ) - - chi2 = np.sum(((I_model - I_obs) / abs_error) ** 2) - dof = len(I_obs) - len(self.dem_solver.fitted_dem) - chi2_red = chi2 / dof if dof > 0 else None - - return chi2, chi2_red - - def print_residuals(self): - """ - Print residuals and modeled vs. observed intensities (IDL-style diagnostics). - """ - I_model = self.dem_solver.response_matrix @ self.dem_solver.fitted_dem - I_obs = self.dem_solver._observed_intensities - abs_error = np.maximum( - self.dem_solver.min_error, - self.dem_solver.relative_error * I_obs - ) - - residuals = (I_model - I_obs) / abs_error - - print("\n[DEM RESIDUALS PER FILTER]") - print("---------------------------") - for i, name in enumerate(self.dem_solver.filter_names): - print( - f"{name:<20} Obs: {I_obs[i]:.2f} Model: {I_model[i]:.2f} " - f"Error: {abs_error[i]:.2f} Residual: {residuals[i]:+.2f}" - ) - print(f"\nMean residual: {np.mean(residuals):+.2f}") - print(f"Std residual: {np.std(residuals):.2f}") +class MonteCarlo: + print('Will be updating soon.') From 62d1698761a971fca400a5b38985c24fd372efff Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 22 Sep 2025 15:59:31 -0400 Subject: [PATCH 077/121] Removed and cleared out code - will update soon --- .../monte_carlo_iteration.py | 86 +------------------ xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 9 +- 2 files changed, 7 insertions(+), 88 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py index 63995e53c..ba361181a 100644 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -2,91 +2,7 @@ "MonteCarloIteration", ] -import numpy as np - class MonteCarloIteration: + print( 'MonteCarloIteration info coming soon.. ') - def __init__(self, dem_solver): - """ - Parameters - ---------- - dem_solver : XRTDEMIterative - A fitted DEM object with observed intensities, errors, and temperature grid. - """ - self.dem_solver = dem_solver - - if not hasattr(dem_solver, "logT"): - raise RuntimeError("DEM solver must have a defined temperature grid.") - if not hasattr(dem_solver, "intensity_errors"): - raise RuntimeError("DEM solver must define intensity errors.") - - self.n_bins = len(dem_solver.logT) - self.n_filters = len(dem_solver.observed_intensities) - - def generate_mc_realizations(self, n_realizations=100, seed=None,reject_negative=True): - """ - Generate randomized intensity realizations for Monte Carlo uncertainty estimation. - - Parameters - ---------- - n_realizations : int - Number of Monte Carlo runs to generate. - seed : int or None - Random seed for reproducibility. - - Sets - ---- - self.mc_intensity_sets : np.ndarray - Shape (n_realizations, n_filters), randomized intensities. - """ - if seed is not None: - np.random.seed(seed) - - # Compute error bars - abs_error = np.maximum( - self.min_error, self.relative_error * self._observed_intensities - ) - - # Draw random perturbations for each intensity - self.mc_intensity_sets = np.random.normal( - loc=self._observed_intensities, - scale=abs_error, - size=(n_realizations, len(self._observed_intensities)), - ) - - def run_mc_simulation(self, n_realizations=100, seed=None): - """ - Run Monte Carlo simulations to estimate DEM uncertainties. - - Parameters - ---------- - n_realizations : int - Number of Monte Carlo realizations to run. - seed : int or None - Optional seed for reproducibility. - - Sets - ---- - self.mc_dems : np.ndarray - Shape (n_temps, n_realizations). Each column is a DEM realization. - """ - if seed is not None: - np.random.seed(seed) - - # Use user-provided or fallback error model - if self._intensity_errors is not None: - errors = np.array( - self._intensity_errors, dtype=float - ) # Covering given user error in pyton array - # errors = self._intensity_errors - else: - errors = np.maximum( - self.min_error, self.relative_error * self._observed_intensities - ) - - self.mc_intensity_sets = np.random.normal( - loc=self._observed_intensities[:, None], # shape (5, 1) - scale=errors[:, None], # shape (5, 1) - size=(len(self._observed_intensities), n_realizations), # shape (5, 20) - ) diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py index 3be63328b..ef73b7def 100644 --- a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py +++ b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py @@ -1,8 +1,11 @@ __all__ = [ - "MonteCarlo", + "SplineSmoothing", + "ErrorPropagation", ] +class SplineSmoothing: + print('Will be updating soon... SplineSmoothing') -class MonteCarlo: - print('Will be updating soon.') +class ErrorPropagation: + print('Will be updating soon... SplineSmoothing') \ No newline at end of file From 95ba2904dc684c9f24bd89e12b8c1b5a9d985b74 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 5 Nov 2025 17:44:37 -0500 Subject: [PATCH 078/121] Appling black --- xrtpy/xrt_dem_iterative/monte_carlo_iteration.py | 3 +-- xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 5 +++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py index ba361181a..fd73422ce 100644 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py @@ -4,5 +4,4 @@ class MonteCarloIteration: - print( 'MonteCarloIteration info coming soon.. ') - + print("MonteCarloIteration info coming soon.. ") diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py index ef73b7def..86cc7968f 100644 --- a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py +++ b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py @@ -5,7 +5,8 @@ class SplineSmoothing: - print('Will be updating soon... SplineSmoothing') + print("Will be updating soon... SplineSmoothing") + class ErrorPropagation: - print('Will be updating soon... SplineSmoothing') \ No newline at end of file + print("Will be updating soon... SplineSmoothing") From 254b2e60fcaccd82f2c626a62bc462e5bdcb5bfb Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 17 Nov 2025 13:35:27 -0500 Subject: [PATCH 079/121] Testingccorrect physical units --- xrtpy/xrt_dem_iterative/dem_solver.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 182e33250..1235fd1b0 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -475,7 +475,11 @@ def _interpolate_responses_to_grid(self): logT_orig = np.log10(T_orig.to_value(u.K)) # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) - response_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) + #response_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) Comment on Nov14 + #response_vals = R_orig.to_value(u.DN / u.s / u.pix / u.cm**5) + response_vals = R_orig.value # already in correct physical units for XRTpy + + interp_func = interp1d( logT_orig, From 0ec1638547d58450744cc2bed21be2cb1839a37d Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 17 Nov 2025 14:55:18 -0500 Subject: [PATCH 080/121] Corrected use of relative_error and added notes to correct/check later --- xrtpy/xrt_dem_iterative/dem_solver.py | 33 ++++++++++++++------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 1235fd1b0..a6c46c94c 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -43,8 +43,6 @@ class XRTDEMIterative: Step size in log10 temperature space (default: 0.1). min_error : float Minimum absolute intensity error (default: 2.0 DN/s/pix). - relative_error : float - Relative error for model-based uncertainty estimate (default: 0.03). monte_carlo_runs : int, optional Number of Monte Carlo runs to perform (default: 0, disabled). Each run perturbs `observed_intensities` using `intensity_errors` @@ -61,7 +59,6 @@ def __init__( max_T=8.0, dT=0.1, min_error=2.0, - relative_error=0.03, monte_carlo_runs=0, max_iterations=2000, solv_factor=1e21, @@ -76,7 +73,7 @@ def __init__( response temperature ranges for **all** filters provided. - If `intensity_errors` is not provided, a model-based error estimate is used: - max(relative_error * observed_intensity, min_error), as in the IDL original. + max(0.03 * observed_intensity, min_error), as in the IDL original. - Default XRT filter names include: {'Al-mesh', 'Al-poly', 'C-poly', 'Ti-poly', 'Be-thin', 'Be-med', 'Al-med', 'Al-thick', 'Be-thick', @@ -179,7 +176,6 @@ def __init__( # Store error model parameters self._min_error = float(min_error) - self._relative_error = float(relative_error) try: self._solv_factor = float(solv_factor) @@ -284,6 +280,7 @@ def observed_intensities( ------- `~astropy.units.Quantity` Intensities in DN/s/pix for each filter channel. + Where "pix" means a one-arcsecond, full -resolution XRT pixel. """ return self._observed_intensities * (u.DN / u.s) @@ -339,9 +336,10 @@ def min_error(self): @property def relative_error(self): """ - Relative error used to scale intensity if error is not provided. + Relative error used to scale intensity if an error is not provided. + Default is 0.03 (3%). """ - return self._relative_error + return 0.03 @property def intensity_errors(self) -> u.Quantity: @@ -349,7 +347,7 @@ def intensity_errors(self) -> u.Quantity: Returns the intensity uncertainties, either user-provided or model-based. If not provided, errors are estimated using: - max(relative_error * observed_intensity, min_error) + max(0.03 * observed_intensity, min_error) For details, see: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro @@ -365,15 +363,15 @@ def intensity_errors(self) -> u.Quantity: if self._using_estimated_errors: warnings.warn( "No intensity_errors provided. Using default model: " - f"max(relative_error * observed_intensity, min_error)\n" - f"=> relative_error = {self.relative_error}, min_error = {self.min_error} DN/s\n" + f"max(relative-error * observed_intensity, min_error)\n" + f"=> relative_error = {self.relative_error} =, min_error = {self.min_error} DN/s\n" "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", UserWarning, ) self._using_estimated_errors = True # suppress future warnings estimated = np.maximum( - self.relative_error * self._observed_intensities, + self.relative_error * self._observed_intensities, self.min_error, ) return estimated * (u.DN / u.s) @@ -430,6 +428,8 @@ def create_logT_grid(self): n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 # inclusive logT grid (IDL-style regular grid) + # Units = 'log K. Runs from Min_T to Max_T with bin-width = DT + # SELFNOTEJOY- Do we need to add units - current holds no units- it's wokring correctly - Should this on the Test as well? self.logT = np.linspace(self._min_T, self._max_T, n_bins) # linear temperature grid in Kelvin @@ -1028,8 +1028,9 @@ def fit_with_multiple_methods( params = self._build_lmfit_parameters(n_knots=n_knots) result = minimize(self._residuals, params, method=method, **kwargs) - # Compute DEM + chi² for this fit - dem = self._reconstruct_dem_from_knots(result.params) + # Compute DEM + chi square for this fit + # SELFNOTEJOY - output currently does not have units. unts=cm^5 * K^-1 Make this a test + dem = self._reconstruct_dem_from_knots(result.params) #SELFNOTEJOY - here is the stamp to defining the DEM - triple check dem_mid = 0.5 * (dem[:-1] + dem[1:]) R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) @@ -1039,7 +1040,7 @@ def fit_with_multiple_methods( residuals = (self._observed_intensities - I_fit) / sigma chi2 = np.sum(residuals**2) - print(f" χ² = {chi2:.3e}") + print(f"x square = {chi2:.3e}") results[method] = (result, chi2) @@ -1048,7 +1049,7 @@ def fit_with_multiple_methods( best_result = result best_method = method - print(f"\n>>> Best method: {best_method} with χ² = {best_chi2:.3e}") + print(f"\n>>> Best method: {best_method} with x square = {best_chi2:.3e}") # Store outputs from the best fit best_dem = self._reconstruct_dem_from_knots(best_result.params) @@ -1252,7 +1253,7 @@ def summary(self): else: print( f" Error model used: Auto-estimated " - f"(obs * {self.relative_error}, min={self.min_error} DN/s)" + f"(obs * 0.03, min={self.min_error} DN/s)" ) print(" [IDL reference: xrt_dem_iterative2.pro]") From 04bdbfdb94886249c3a2ebd0849f5889efa744dc Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 17 Nov 2025 18:12:16 -0500 Subject: [PATCH 081/121] Updated function names to better understand/reference in code --- xrtpy/xrt_dem_iterative/dem_solver.py | 162 +++++++++++++------------- 1 file changed, 83 insertions(+), 79 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index a6c46c94c..bffa37b99 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -35,14 +35,12 @@ class XRTDEMIterative: for one or more filters. See: https://xrtpy.readthedocs.io/en/latest/getting_started.html intensity_errors : array-like, optional Intensity uncertainties. If None, will use a model-based estimate. - min_T : float + minimum_bound_temperature : float Minimum log10 temperature (default: 5.5). - max_T : float + maximum_bound_temperature: float Maximum log10 temperature (default: 8.0). - dT : float + logarithmic_temperature_step_size : float Step size in log10 temperature space (default: 0.1). - min_error : float - Minimum absolute intensity error (default: 2.0 DN/s/pix). monte_carlo_runs : int, optional Number of Monte Carlo runs to perform (default: 0, disabled). Each run perturbs `observed_intensities` using `intensity_errors` @@ -55,13 +53,12 @@ def __init__( observed_intensities, temperature_responses, intensity_errors=None, - min_T=5.5, - max_T=8.0, - dT=0.1, - min_error=2.0, - monte_carlo_runs=0, + minimum_bound_temperature=5.5, + maximum_bound_temperature=8.0, + logarithmic_temperature_step_size=0.1, + monte_carlo_runs=0, #Need to fix this. If set, perform that many Monte Carlo runs. If not set, only do base solution. max_iterations=2000, - solv_factor=1e21, + normalization_factor=1e21, ): """ Notes @@ -69,11 +66,11 @@ def __init__( - All input lists (`observed_channel`, `observed_intensities`, and `temperature_responses`) must be the same length. Each entry should correspond to one filter. - - The temperature grid range (`min_T`, `max_T`) must lie entirely within the + - The temperature grid range (`minimum_bound_temperature`, `maximum_bound_temperature`) must lie entirely within the response temperature ranges for **all** filters provided. - If `intensity_errors` is not provided, a model-based error estimate is used: - max(0.03 * observed_intensity, min_error), as in the IDL original. + max(0.03 * observed_intensity, 2 (DN/s/pix)), as in the IDL original. - Default XRT filter names include: {'Al-mesh', 'Al-poly', 'C-poly', 'Ti-poly', 'Be-thin', 'Be-med', 'Al-med', 'Al-thick', 'Be-thick', @@ -103,13 +100,13 @@ def __init__( self._intensity_errors = None # Store temperature grid parameters - self._dT = float(dT) - self._min_T = float(min_T) - self._max_T = float(max_T) - if not (self._min_T < self._max_T): - raise ValueError("min_T must be < max_T.") + self._logarithmic_temperature_step_size = float(logarithmic_temperature_step_size) + self._minimum_bound_temperature = float(minimum_bound_temperature) + self._maximum_bound_temperature = float(maximum_bound_temperature) + if not (self._minimum_bound_temperature < self._maximum_bound_temperature): + raise ValueError("minimum_bound_temperature must be < maximum_bound_temperature.") - n_pts = int(np.floor((self._max_T - self._min_T) / dT + 1e-9)) + 1 + n_pts = int(np.floor((self._maximum_bound_temperature - self._minimum_bound_temperature) / logarithmic_temperature_step_size + 1e-9)) + 1 if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") @@ -139,9 +136,9 @@ def __init__( self._max_iterations = int(max_iterations) - # Check dT is positive - if self._dT <= 0: - raise ValueError("dT must be a positive scalar.") + # Check logarithmic_temperature_step_size is positive + if self._logarithmic_temperature_step_size <= 0: + raise ValueError("logarithmic_temperature_step_size must be a positive scalar.") # Store temperature response objects self.responses = temperature_responses @@ -152,9 +149,9 @@ def __init__( # Validate that the temperature grid falls within the responses for r in self.responses: logT_grid = np.log10(r.temperature.to_value(u.K)) - if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): + if not (self._minimum_bound_temperature >= logT_grid.min() and self._maximum_bound_temperature <= logT_grid.max()): raise ValueError( - f"The specified temperature range [{min_T}, {max_T}] is outside the bounds of one or more filter response grids.\n" + f"The specified temperature range [{minimum_bound_temperature}, {maximum_bound_temperature}] is outside the bounds of one or more filter response grids.\n" "Please ensure the temperature range fits within all responses.\n" "Hint: Default response range is logT = 5.5 to 8.0. You can view each response's logT range via: [r.temperature for r in responses]" ) @@ -172,17 +169,14 @@ def __init__( f" Filter channels: {len(self.observed_channel)}\n" ) - self.logT = np.arange(self._min_T, self._max_T + self._dT / 2, self._dT) - - # Store error model parameters - self._min_error = float(min_error) + self.logT = np.arange(self._minimum_bound_temperature, self._maximum_bound_temperature + self._logarithmic_temperature_step_size / 2, self._logarithmic_temperature_step_size) try: - self._solv_factor = float(solv_factor) - if self._solv_factor <= 0: - raise ValueError("solv_factor must be a positive number.") + self._normalization_factor = float(normalization_factor) + if self._normalization_factor <= 0: + raise ValueError("normalization_factor must be a positive number.") except Exception as e: - raise ValueError(f"Invalid solv_factor: {e}") + raise ValueError(f"Invalid normalization_factor: {e}") self._using_estimated_errors = ( False # track whether default error model has been used @@ -222,11 +216,11 @@ def validate_inputs(self) -> None: ) # 5) temperature grid sanity - if not (self._min_T < self._max_T): - raise ValueError("min_T must be < max_T.") - if self._dT <= 0: - raise ValueError("dT must be a positive scalar.") - n_pts = int(np.floor((self._max_T - self._min_T) / self._dT + 1e-9)) + 1 + if not (self._minimum_bound_temperature < self._maximum_bound_temperature): + raise ValueError("minimum_bound_temperature must be < maximum_bound_temperature.") + if self._logarithmic_temperature_step_size <= 0: + raise ValueError("logarithmic_temperature_step_size must be a positive scalar.") + n_pts = int(np.floor((self._maximum_bound_temperature - self._minimum_bound_temperature) / self._logarithmic_temperature_step_size + 1e-9)) + 1 if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") @@ -234,9 +228,9 @@ def validate_inputs(self) -> None: for r in self.responses: # logT_grid = np√.log10(r.temperature.value) logT_grid = np.log10(r.temperature.to_value(u.K)) - if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): + if not (self._minimum_bound_temperature >= logT_grid.min() and self._maximum_bound_temperature <= logT_grid.max()): raise ValueError( - f"The specified temperature range [{self._min_T}, {self._max_T}] " + f"The specified temperature range [{self._minimum_bound_temperature}, {self._maximum_bound_temperature}] " "is outside the bounds of one or more filter response grids." ) @@ -257,7 +251,7 @@ def validate_inputs(self) -> None: def __repr__(self): return ( f"" + f"logT={self._minimum_bound_temperature:.2f}–{self._maximum_bound_temperature:.2f}, logarithmic_temperature_step_size={self._logarithmic_temperature_step_size:.3f})>" ) # @property #Removed if not used @@ -306,32 +300,32 @@ def response_values(self): return [r.response for r in self.responses] @property - def min_T(self): + def minimum_bound_temperature(self): """ Lower bound of log10 temperature grid. """ - return self._min_T + return self._minimum_bound_temperature @property - def max_T(self): + def maximum_bound_temperature(self): """ Upper bound of log10 temperature grid. """ - return self._max_T + return self._maximum_bound_temperature @property - def dT(self): + def logarithmic_temperature_step_size(self): """ Bin width of log10 temperature grid. """ - return self._dT + return self._logarithmic_temperature_step_size @property - def min_error(self): + def min_observational_error(self): """ - Minimum error applied to DN/s when intensity error is not provided. + Default - Minimum absolute observational intensity error applied to DN/s/pix when intensity error is not provided. """ - return self._min_error + return 2 * (u.DN/u.s) @property def relative_error(self): @@ -347,7 +341,7 @@ def intensity_errors(self) -> u.Quantity: Returns the intensity uncertainties, either user-provided or model-based. If not provided, errors are estimated using: - max(0.03 * observed_intensity, min_error) + max(0.03 * observed_intensity, 2 DN/s/pix) For details, see: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro @@ -363,18 +357,28 @@ def intensity_errors(self) -> u.Quantity: if self._using_estimated_errors: warnings.warn( "No intensity_errors provided. Using default model: " - f"max(relative-error * observed_intensity, min_error)\n" - f"=> relative_error = {self.relative_error} =, min_error = {self.min_error} DN/s\n" + f"max(relative-error * observed_intensity, min_observational_error)\n" + f"=> relative_error = {self.relative_error} =, min_observational_error = {self.min_observational_error.value} DN/s\n" "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", UserWarning, ) self._using_estimated_errors = True # suppress future warnings + #NOTETOJOYWe can remove if no issues later + # #No units - added in the return + # estimated = np.maximum( + # self.relative_error * self._observed_intensities , + # self.min_observational_error.value, + # ) + # return estimated * (u.DN / u.s) + + #Fixed in units estimated = np.maximum( - self.relative_error * self._observed_intensities, - self.min_error, + (self.relative_error * self._observed_intensities)*(u.DN/u.s), + self.min_observational_error, ) - return estimated * (u.DN / u.s) + return estimated + @property def monte_carlo_runs(self) -> int: @@ -387,12 +391,12 @@ def monte_carlo_runs(self) -> int: return self._monte_carlo_runs @property - def solv_factor(self): + def normalization_factor(self): """ Scaling factor used during DEM optimization to stabilize the spline fit. - Corresponds to `solv_factor` in IDL (typically 1e21). + Corresponds to `normalization_factor` in IDL (typically 1e21). """ - return self._solv_factor + return self._normalization_factor @property def max_iterations(self): @@ -415,7 +419,7 @@ def create_logT_grid(self): - Two forms of the temperature grid are stored: * self.logT : log10(T) values (dimensionless) * self.T : linear temperatures (Kelvin, astropy.units.Quantity) - - The grid is inclusive of both `min_T` and `max_T`, with step size `dT`. + - The grid is inclusive of both `minimum_bound_temperature` and `maximum_bound_temperature`, with step size `logarithmic_temperature_step_size`. Additional attributes created: - self.dlogT : float @@ -425,18 +429,18 @@ def create_logT_grid(self): F = int. DEM(T) * R(T) * T d(ln T) """ # number of bins including endpoints - n_bins = int(round((self._max_T - self._min_T) / self._dT)) + 1 + n_bins = int(round((self._maximum_bound_temperature - self._minimum_bound_temperature) / self._logarithmic_temperature_step_size)) + 1 # inclusive logT grid (IDL-style regular grid) - # Units = 'log K. Runs from Min_T to Max_T with bin-width = DT + # Units = 'log K. Runs from minimum_bound_temperature to Max_T with bin-width = DT # SELFNOTEJOY- Do we need to add units - current holds no units- it's wokring correctly - Should this on the Test as well? - self.logT = np.linspace(self._min_T, self._max_T, n_bins) + self.logT = np.linspace(self._minimum_bound_temperature, self._maximum_bound_temperature, n_bins) # linear temperature grid in Kelvin self.T = (10.0**self.logT) * u.K self.dlogT = float( - self._dT + self._logarithmic_temperature_step_size ) # scalar spacing (dimensionless and natural-log equivalent) self.dlnT = ( np.log(10.0) * self.dlogT @@ -589,7 +593,7 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # 1. Peak location max_idx = np.argmax(R_vals) peak_val = R_vals[max_idx] - t_peak = np.round(logT_orig[max_idx] / self._dT) * self._dT + t_peak = np.round(logT_orig[max_idx] / self._logarithmic_temperature_step_size) * self._logarithmic_temperature_step_size # 2. Good window (where R > cutoff * peak) good = np.where(R_vals > peak_val * cutoff)[0] @@ -664,7 +668,7 @@ def _build_lmfit_parameters(self, n_knots: int = 6): Returns ------- params : lmfit.Parameters - Parameters object containing log10(DEM/solv_factor) values at knot points. + Parameters object containing log10(DEM/normalization_factor) values at knot points. Each parameter is named "knot_i" where i = 0..n_knots-1. Notes @@ -673,8 +677,8 @@ def _build_lmfit_parameters(self, n_knots: int = 6): at peak response temperatures and spreading them across the grid. - Here, we select evenly spaced knots across the solver's logT range. - The stored value at each knot is: - log10(DEM / solv_factor) - where `solv_factor` is typically 1e17. + log10(DEM / normalization_factor) + where `normalization_factor` is typically 1e17. - Bounds can be applied to prevent extreme DEM excursions if desired. """ if not hasattr(self, "_initial_log_dem"): @@ -685,7 +689,7 @@ def _build_lmfit_parameters(self, n_knots: int = 6): from lmfit import Parameters # Choose evenly spaced knot positions across logT range - knot_positions = np.linspace(self._min_T, self._max_T, n_knots) + knot_positions = np.linspace(self._minimum_bound_temperature, self._maximum_bound_temperature, n_knots) self._knot_positions = knot_positions # store for later reconstruction # Interpolate initial DEM estimate at these knot positions @@ -698,8 +702,8 @@ def _build_lmfit_parameters(self, n_knots: int = 6): ) init_log_dem_at_knots = interp_func(knot_positions) - # Convert to log10(DEM/solv_factor) - init_scaled = init_log_dem_at_knots - np.log10(self._solv_factor) + # Convert to log10(DEM/normalization_factor) + init_scaled = init_log_dem_at_knots - np.log10(self._normalization_factor) # Build lmfit Parameters params = Parameters() @@ -722,7 +726,7 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: Parameters ---------- params : lmfit.Parameters - Knot parameters where each value is log10(DEM / solv_factor). + Knot parameters where each value is log10(DEM / normalization_factor). Returns ------- @@ -733,9 +737,9 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: ----- - Knot positions are stored in `self._knot_positions` when `_build_lmfit_parameters()` is called. - - The stored parameter values are log10(DEM/solv_factor). + - The stored parameter values are log10(DEM/normalization_factor). - Conversion back to DEM: - DEM = solv_factor * 10^(interp(log10 DEM/solv_factor)) + DEM = normalization_factor * 10^(interp(log10 DEM/normalization_factor)) - Interpolation is linear in log space, as in IDL's `xrt_dem_iterative2.pro`. """ if not hasattr(self, "_knot_positions"): @@ -743,7 +747,7 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: "Knot positions not found. Run _build_lmfit_parameters() first." ) - # Extract knot values from parameters (log10(DEM/solv_factor)) + # Extract knot values from parameters (log10(DEM/normalization_factor)) knot_vals = np.array( [params[f"knot_{i}"].value for i in range(len(self._knot_positions))] ) @@ -759,7 +763,7 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: log_dem_scaled = interp_func(self.logT) # Convert back to DEM [cm^-5 K^-1] - dem_grid = self._solv_factor * (10.0**log_dem_scaled) + dem_grid = self._normalization_factor * (10.0**log_dem_scaled) return dem_grid @@ -772,7 +776,7 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: # Parameters # ---------- # params : lmfit.Parameters - # Knot parameters, each storing log10(DEM / solv_factor). + # Knot parameters, each storing log10(DEM / normalization_factor). # Returns # ------- @@ -1253,19 +1257,19 @@ def summary(self): else: print( f" Error model used: Auto-estimated " - f"(obs * 0.03, min={self.min_error} DN/s)" + f"(obs * 0.03, min={self.min_observational_error.value} DN/s)" ) print(" [IDL reference: xrt_dem_iterative2.pro]") # Temperature grid print( - f" Temperature grid: logT {self.min_T:.2f}–{self.max_T:.2f}, step {self.dT}" + f" Temperature grid: logT {self.minimum_bound_temperature:.2f}–{self.maximum_bound_temperature:.2f}, step {self.logarithmic_temperature_step_size}" ) print(f" Temp bins: {len(self.logT)}") print(f" dlogT: {self.dlogT:.3f}, dlnT: {self.dlnT:.3f}") # Solver setup - print(f" Solver factor: {self.solv_factor:.1e}") + print(f" Solver factor: {self.normalization_factor:.1e}") print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") print(f" Max Iterations: {self.max_iterations}") print(f" Knots (n_knots): {getattr(self, '_last_n_knots', 'default=6')}") From e64d476b851b3ae5f75a4b3bb6b2ec98dce63eba Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 18 Nov 2025 17:41:42 -0500 Subject: [PATCH 082/121] Debugging-updates made --- xrtpy/xrt_dem_iterative/dem_solver.py | 208 ++++++++++++++++++++------ 1 file changed, 166 insertions(+), 42 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index bffa37b99..aa15e01f3 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -54,9 +54,9 @@ def __init__( temperature_responses, intensity_errors=None, minimum_bound_temperature=5.5, - maximum_bound_temperature=8.0, + maximum_bound_temperature=8.0, logarithmic_temperature_step_size=0.1, - monte_carlo_runs=0, #Need to fix this. If set, perform that many Monte Carlo runs. If not set, only do base solution. + monte_carlo_runs=0, max_iterations=2000, normalization_factor=1e21, ): @@ -100,13 +100,26 @@ def __init__( self._intensity_errors = None # Store temperature grid parameters - self._logarithmic_temperature_step_size = float(logarithmic_temperature_step_size) + self._logarithmic_temperature_step_size = float( + logarithmic_temperature_step_size + ) self._minimum_bound_temperature = float(minimum_bound_temperature) self._maximum_bound_temperature = float(maximum_bound_temperature) if not (self._minimum_bound_temperature < self._maximum_bound_temperature): - raise ValueError("minimum_bound_temperature must be < maximum_bound_temperature.") + raise ValueError( + "minimum_bound_temperature must be < maximum_bound_temperature." + ) - n_pts = int(np.floor((self._maximum_bound_temperature - self._minimum_bound_temperature) / logarithmic_temperature_step_size + 1e-9)) + 1 + n_pts = ( + int( + np.floor( + (self._maximum_bound_temperature - self._minimum_bound_temperature) + / logarithmic_temperature_step_size + + 1e-9 + ) + ) + + 1 + ) if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") @@ -138,7 +151,9 @@ def __init__( # Check logarithmic_temperature_step_size is positive if self._logarithmic_temperature_step_size <= 0: - raise ValueError("logarithmic_temperature_step_size must be a positive scalar.") + raise ValueError( + "logarithmic_temperature_step_size must be a positive scalar." + ) # Store temperature response objects self.responses = temperature_responses @@ -149,7 +164,10 @@ def __init__( # Validate that the temperature grid falls within the responses for r in self.responses: logT_grid = np.log10(r.temperature.to_value(u.K)) - if not (self._minimum_bound_temperature >= logT_grid.min() and self._maximum_bound_temperature <= logT_grid.max()): + if not ( + self._minimum_bound_temperature >= logT_grid.min() + and self._maximum_bound_temperature <= logT_grid.max() + ): raise ValueError( f"The specified temperature range [{minimum_bound_temperature}, {maximum_bound_temperature}] is outside the bounds of one or more filter response grids.\n" "Please ensure the temperature range fits within all responses.\n" @@ -169,7 +187,12 @@ def __init__( f" Filter channels: {len(self.observed_channel)}\n" ) - self.logT = np.arange(self._minimum_bound_temperature, self._maximum_bound_temperature + self._logarithmic_temperature_step_size / 2, self._logarithmic_temperature_step_size) + self.logT = np.arange( + self._minimum_bound_temperature, + self._maximum_bound_temperature + + self._logarithmic_temperature_step_size / 2, + self._logarithmic_temperature_step_size, + ) try: self._normalization_factor = float(normalization_factor) @@ -217,10 +240,23 @@ def validate_inputs(self) -> None: # 5) temperature grid sanity if not (self._minimum_bound_temperature < self._maximum_bound_temperature): - raise ValueError("minimum_bound_temperature must be < maximum_bound_temperature.") + raise ValueError( + "minimum_bound_temperature must be < maximum_bound_temperature." + ) if self._logarithmic_temperature_step_size <= 0: - raise ValueError("logarithmic_temperature_step_size must be a positive scalar.") - n_pts = int(np.floor((self._maximum_bound_temperature - self._minimum_bound_temperature) / self._logarithmic_temperature_step_size + 1e-9)) + 1 + raise ValueError( + "logarithmic_temperature_step_size must be a positive scalar." + ) + n_pts = ( + int( + np.floor( + (self._maximum_bound_temperature - self._minimum_bound_temperature) + / self._logarithmic_temperature_step_size + + 1e-9 + ) + ) + + 1 + ) if n_pts < 4: raise ValueError("Temperature grid must have at least 4 points.") @@ -228,7 +264,10 @@ def validate_inputs(self) -> None: for r in self.responses: # logT_grid = np√.log10(r.temperature.value) logT_grid = np.log10(r.temperature.to_value(u.K)) - if not (self._minimum_bound_temperature >= logT_grid.min() and self._maximum_bound_temperature <= logT_grid.max()): + if not ( + self._minimum_bound_temperature >= logT_grid.min() + and self._maximum_bound_temperature <= logT_grid.max() + ): raise ValueError( f"The specified temperature range [{self._minimum_bound_temperature}, {self._maximum_bound_temperature}] " "is outside the bounds of one or more filter response grids." @@ -325,7 +364,7 @@ def min_observational_error(self): """ Default - Minimum absolute observational intensity error applied to DN/s/pix when intensity error is not provided. """ - return 2 * (u.DN/u.s) + return 2 * (u.DN / u.s) @property def relative_error(self): @@ -364,21 +403,20 @@ def intensity_errors(self) -> u.Quantity: ) self._using_estimated_errors = True # suppress future warnings - #NOTETOJOYWe can remove if no issues later - # #No units - added in the return + # NOTETOJOYWe can remove if no issues later + # #No units - added in the return # estimated = np.maximum( - # self.relative_error * self._observed_intensities , + # self.relative_error * self._observed_intensities , # self.min_observational_error.value, # ) # return estimated * (u.DN / u.s) - - #Fixed in units + + # Fixed in units estimated = np.maximum( - (self.relative_error * self._observed_intensities)*(u.DN/u.s), + (self.relative_error * self._observed_intensities) * (u.DN / u.s), self.min_observational_error, ) return estimated - @property def monte_carlo_runs(self) -> int: @@ -394,7 +432,7 @@ def monte_carlo_runs(self) -> int: def normalization_factor(self): """ Scaling factor used during DEM optimization to stabilize the spline fit. - Corresponds to `normalization_factor` in IDL (typically 1e21). + Corresponds to `normalization_factor` in IDL (default 1e21). """ return self._normalization_factor @@ -406,6 +444,8 @@ def max_iterations(self): """ return self._max_iterations + return None + def create_logT_grid(self): """ Construct the regular log10 temperature grid for DEM calculations. @@ -428,13 +468,26 @@ def create_logT_grid(self): Step size in natural log(T). Useful for IDL-style integrals of the form: F = int. DEM(T) * R(T) * T d(ln T) """ - # number of bins including endpoints - n_bins = int(round((self._maximum_bound_temperature - self._minimum_bound_temperature) / self._logarithmic_temperature_step_size)) + 1 + # number of bins including endpoints - if default values are used - end value is 26 + self.n_bins = ( + int( + round( + (self._maximum_bound_temperature - self._minimum_bound_temperature) + / self._logarithmic_temperature_step_size + ) + ) + + 1 + ) + # This matches the IDL temperature grid exactly. self.logT & self.T. # inclusive logT grid (IDL-style regular grid) # Units = 'log K. Runs from minimum_bound_temperature to Max_T with bin-width = DT - # SELFNOTEJOY- Do we need to add units - current holds no units- it's wokring correctly - Should this on the Test as well? - self.logT = np.linspace(self._minimum_bound_temperature, self._maximum_bound_temperature, n_bins) + # SELFNOTEJOY- Do we need to add units - current holds no units- it's wokring correctly - Should this on the Test as well?- I don't think it- it's noted in IDL but used with units + self.logT = np.linspace( + self._minimum_bound_temperature, + self._maximum_bound_temperature, + self.n_bins, + ) # linear temperature grid in Kelvin self.T = (10.0**self.logT) * u.K @@ -442,12 +495,14 @@ def create_logT_grid(self): self.dlogT = float( self._logarithmic_temperature_step_size ) # scalar spacing (dimensionless and natural-log equivalent) + self.dlnT = ( np.log(10.0) * self.dlogT ) # for IDL-style intergral DEM(T) * R(T) * T dlnT - IDL “regular logT grid” def _interpolate_responses_to_grid(self): """ + IDL method of Interpolate emissivity. Interpolate all filter responses onto the common logT grid and build the response matrix. @@ -475,15 +530,17 @@ def _interpolate_responses_to_grid(self): ) rows = [] - for T_orig, R_orig in zip(self.response_temperatures, self.response_values): + for T_orig, R_orig in zip( + self.response_temperatures, self.response_values + ): # Make sure that R_orig.value is indeed in DN/s/pix per cm^5 logT_orig = np.log10(T_orig.to_value(u.K)) # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) - #response_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) Comment on Nov14 - #response_vals = R_orig.to_value(u.DN / u.s / u.pix / u.cm**5) - response_vals = R_orig.value # already in correct physical units for XRTpy - - + # response_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) Comment on Nov14 + # response_vals = R_orig.to_value(u.DN / u.s / u.pix / u.cm**5) + response_vals = ( + R_orig.value + ) # already in correct physical units for XRTpy #NOTEFORJOY- TRIPLE check this interp_func = interp1d( logT_orig, @@ -593,7 +650,10 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # 1. Peak location max_idx = np.argmax(R_vals) peak_val = R_vals[max_idx] - t_peak = np.round(logT_orig[max_idx] / self._logarithmic_temperature_step_size) * self._logarithmic_temperature_step_size + t_peak = ( + np.round(logT_orig[max_idx] / self._logarithmic_temperature_step_size) + * self._logarithmic_temperature_step_size + ) # 2. Good window (where R > cutoff * peak) good = np.where(R_vals > peak_val * cutoff)[0] @@ -689,7 +749,9 @@ def _build_lmfit_parameters(self, n_knots: int = 6): from lmfit import Parameters # Choose evenly spaced knot positions across logT range - knot_positions = np.linspace(self._minimum_bound_temperature, self._maximum_bound_temperature, n_knots) + knot_positions = np.linspace( + self._minimum_bound_temperature, self._maximum_bound_temperature, n_knots + ) self._knot_positions = knot_positions # store for later reconstruction # Interpolate initial DEM estimate at these knot positions @@ -763,10 +825,27 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: log_dem_scaled = interp_func(self.logT) # Convert back to DEM [cm^-5 K^-1] - dem_grid = self._normalization_factor * (10.0**log_dem_scaled) + dem_grid = self._normalization_factor * ( + 10.0**log_dem_scaled + ) ## dem_grid now back in physical units return dem_grid + ################################################################################################################################ + ################################################################################################################################ + #################################### structure with all fields the DEM solver expects ########################################## + # 1 Temperature - self.logT , self.T + # 2 Response - note - interpolated onto your logT grid. - self.interpolated_responses, self._response_matrix + # 3 # of bins - n_bins + # 4 i_obs – self._observed_intensites - measured DN/s/pixel scaled by solv_factor + # self.observed_intensities + # 5 uncertainty on the intensity - Also scaled by solv_factor. - self.intensity_errors self.normalization_factor + # 6 units? + + ################################################################################################################################ + ################################################################################################################################ + ################################################################################################################################ + # self._iteration_chi2 = [] # def _residuals(self, params) -> np.ndarray: @@ -844,14 +923,40 @@ def _residuals(self, params) -> np.ndarray: # 3. Modeled intensities I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - # 4. Residuals: normalize by observational errors - y_obs = getattr( + # # 4. Residuals: normalize by observational errors + # y_obs = getattr( + # self, "_active_observed_intensities", self._observed_intensities + # ) + # sigma = self.intensity_errors.to_value(u.DN / u.s) + # residuals = (y_obs - I_calc) / sigma + + # # 5. Track χ² per iteration + # chi2_val = np.sum(residuals**2) + # if not hasattr(self, "_iteration_chi2"): + # self._iteration_chi2 = [] + # self._iteration_chi2.append(chi2_val) + + # return residuals + + # 4. Use either base or MC-perturbed observed intensities (physical units) + y_obs_phys = getattr( self, "_active_observed_intensities", self._observed_intensities ) - sigma = self.intensity_errors.to_value(u.DN / u.s) - residuals = (y_obs - I_calc) / sigma + sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + + # 5. Apply normalization_factor in an IDL-like way: + # scale data, model, and errors by the same factor. + # (This keeps residuals numerically identical, but keeps + # internal numbers closer to order unity if desired.) + nf = self._normalization_factor + + y_scaled = y_obs_phys / nf + I_calc_scaled = I_calc / nf + sigma_scaled = sigma_phys / nf - # 5. Track χ² per iteration + residuals = (y_scaled - I_calc_scaled) / sigma_scaled + + # 6. Track χ² per iteration (in scaled space — same χ² as unscaled) chi2_val = np.sum(residuals**2) if not hasattr(self, "_iteration_chi2"): self._iteration_chi2 = [] @@ -989,7 +1094,7 @@ def _callback(params, iter, resid, *args, **kwargs): sigma = self.intensity_errors.to_value(u.DN / u.s) residuals = (self._observed_intensities - I_fit) / sigma - # Chi² metrics + # Chi^2 metrics self.chi2 = np.sum(residuals**2) dof = len(self._observed_intensities) - len(result.params) self.redchi2 = self.chi2 / max(dof, 1) @@ -1033,8 +1138,10 @@ def fit_with_multiple_methods( result = minimize(self._residuals, params, method=method, **kwargs) # Compute DEM + chi square for this fit - # SELFNOTEJOY - output currently does not have units. unts=cm^5 * K^-1 Make this a test - dem = self._reconstruct_dem_from_knots(result.params) #SELFNOTEJOY - here is the stamp to defining the DEM - triple check + # SELFNOTEJOY - output currently does not have units. unts=cm^5 * K^-1 Make this a test + dem = self._reconstruct_dem_from_knots( + result.params + ) # SELFNOTEJOY - here is the stamp to defining the DEM - triple check dem_mid = 0.5 * (dem[:-1] + dem[1:]) R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) @@ -1352,3 +1459,20 @@ def summary(self): XRTDEMIterative.plot_fit_residuals = dem_plotting.plot_fit_residuals XRTDEMIterative.plot_dem_with_median_bins = dem_plotting.plot_dem_with_median_bins XRTDEMIterative.plot_iteration_stats = dem_plotting.plot_iteration_stats + + +# NOTEFROMJOYTOJOY +# Missin outputs +# 1 +# I'm missing BASE_OBS- It is a 2D array of intensity values for all DEM runs (base + MC) +# the set of observed intensities used in each DEM solution, including: +# Column 0 > the original observed intensities (your real data) +# Columns 1..MC_ITER > each Monte-Carlo–perturbed intensity vector + +# 2 +# MOD_OBS - Model intensities predicted by the DEM for each run. + +# 3 +# CHISQ- [1+MC_iter] +# chi-squre for each DEM solution +# Computed via mpdemfunct residuals squared and summed From f8ad163edae4bc7030ee26f412656bbf15f64e10 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 18 Nov 2025 18:11:56 -0500 Subject: [PATCH 083/121] Adding code incase - observed intensities are all zero. --- xrtpy/xrt_dem_iterative/dem_solver.py | 32 ++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index aa15e01f3..333ad77d5 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -10,7 +10,6 @@ import numpy as np from lmfit import Parameters, minimize from scipy.interpolate import interp1d - from xrtpy.util.filters import validate_and_format_filters from xrtpy.xrt_dem_iterative import dem_plotting @@ -284,7 +283,14 @@ def validate_inputs(self) -> None: ): raise ValueError("`intensity_errors` must be finite and >= 0.") - # success ⇒ no return value + # 8 warning + if np.all(self._observed_intensities == 0): + warnings.warn( + "\n\n All observed intensities are zero. DEM solution will yield zero. " + "Object created, but solving will return DEM=0. \n\n" + ) + + # success -> no return value return None def __repr__(self): @@ -1232,8 +1238,8 @@ def fit_with_multiple_methods( def run_monte_carlo( self, n_runs=None, n_knots=6, method="least_squares", random_seed=None ): - from lmfit import minimize import numpy as np + from lmfit import minimize from tqdm import tqdm # add this at top of file if n_runs is None: @@ -1304,6 +1310,26 @@ def solve( - "chi2" : χ² - "redchi2" : reduced χ² """ + # ARLY IDL-STYLE NOSOLVE CHECk + # IDL behavior: if all observed intensities are zero (or non-positive), + # the DEM is trivially zero. Skip solving and return immediately. + if np.all(self._observed_intensities <= 0): + warnings.warn( + "All observed intensities are zero or non-positive. " + "DEM cannot be solved. Returning zero DEM and zero fitted intensities " + "(IDL nosolve behavior)." + ) + + # Ensure grid exists (IDL also returns logT_out even for nosolve) + if not hasattr(self, "logT"): + self.create_logT_grid() + + self.dem = np.zeros_like(self.logT) + self.fitted_intensities = np.zeros_like(self._observed_intensities) + self.chi2 = 0.0 + self.redchi2 = 0.0 + return self + # 1. Ensure grid & responses self.create_logT_grid() self._interpolate_responses_to_grid() From ddd90bf3c8b2a0f5880f43344ddf501d71c6c87e Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 18 Nov 2025 18:59:04 -0500 Subject: [PATCH 084/121] Fixing placement of code --- xrtpy/xrt_dem_iterative/dem_solver.py | 46 +++++++++++++++------------ 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 333ad77d5..797136866 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -306,7 +306,7 @@ def __repr__(self): # """ # return self._name - ####################################################################################################################################### + ########################################################## @property def observed_intensities( self, @@ -589,6 +589,26 @@ def response_matrix(self): ) return self._response_matrix + ############################ Everything line of code ABOVE is PREP for the DEM ############################################# + + ################################################################################################################################ + ################################################################################################################################ + #################################### structure with all fields the DEM solver expects ########################################## + # 1 Temperature - self.logT , self.T + # 2 Response - note - interpolated onto your logT grid. - self.interpolated_responses, self._response_matrix + # 3 # of bins - n_bins + # 4 i_obs – self._observed_intensites - measured DN/s/pixel scaled by solv_factor + # self.observed_intensities + # 5 uncertainty on the intensity - Also scaled by solv_factor. - self.intensity_errors self.normalization_factor + # 6 units? + + ################################################################################################################################ + ################################################################################################################################ + ################################################################################################################################ + + # **************************************************************************************************************************** + ############################ Everything line of code BELOW is FOR the DEM ################################################## + def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: """ Estimate an initial DEM curve from observed intensities and responses. @@ -837,21 +857,6 @@ def _reconstruct_dem_from_knots(self, params) -> np.ndarray: return dem_grid - ################################################################################################################################ - ################################################################################################################################ - #################################### structure with all fields the DEM solver expects ########################################## - # 1 Temperature - self.logT , self.T - # 2 Response - note - interpolated onto your logT grid. - self.interpolated_responses, self._response_matrix - # 3 # of bins - n_bins - # 4 i_obs – self._observed_intensites - measured DN/s/pixel scaled by solv_factor - # self.observed_intensities - # 5 uncertainty on the intensity - Also scaled by solv_factor. - self.intensity_errors self.normalization_factor - # 6 units? - - ################################################################################################################################ - ################################################################################################################################ - ################################################################################################################################ - # self._iteration_chi2 = [] # def _residuals(self, params) -> np.ndarray: @@ -1310,14 +1315,13 @@ def solve( - "chi2" : χ² - "redchi2" : reduced χ² """ - # ARLY IDL-STYLE NOSOLVE CHECk + # IDL-STYLE NOSOLVE CHECk # IDL behavior: if all observed intensities are zero (or non-positive), # the DEM is trivially zero. Skip solving and return immediately. - if np.all(self._observed_intensities <= 0): + if np.all(self._observed_intensities <= 0): # == 0 warnings.warn( - "All observed intensities are zero or non-positive. " - "DEM cannot be solved. Returning zero DEM and zero fitted intensities " - "(IDL nosolve behavior)." + "\n\n All observed intensities are zero or non-positive. " + "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" ) # Ensure grid exists (IDL also returns logT_out even for nosolve) From 9be53d87973f72e7f0e2588e69cde4f4bd02e7f6 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 19 Nov 2025 14:45:45 -0500 Subject: [PATCH 085/121] Prepare the scaled observed intensities and uncertainties-new function --- xrtpy/xrt_dem_iterative/dem_solver.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 797136866..f7d3cc3a7 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -589,6 +589,28 @@ def response_matrix(self): ) return self._response_matrix + def _prepare_scaled_observations(self): + """ + Prepare the scaled observed intensities and uncertainties + exactly as done in the IDL routine xrt_dem_iterative2.pro. + + IDL equivalent: + input1.i_obs = input1.i_obs / solv_factor + input1.i_err = input1.i_err / solv_factor + """ + # Extract values as plain floats (DN/s/pix) + intensities_scaled_raw = self.observed_intensities.value + sigma_intensity_errors_raw = self.intensity_errors.to_value(u.DN / u.s) + + # Apply normalization + self.intensities_scaled = intensities_scaled_raw / self.normalization_factor + self.sigma_scaled_intensity_errors = ( + sigma_intensity_errors_raw / self.normalization_factor + ) + + # Store for solver + self._scaled_prepared = True + ############################ Everything line of code ABOVE is PREP for the DEM ############################################# ################################################################################################################################ From a08fbf75cbeeb969221419fdc768345af4d80e68 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 19 Nov 2025 14:49:22 -0500 Subject: [PATCH 086/121] Just a note to add test for scaling before being sent to DEM function --- .../test/test_dem_input_validation.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index a8ca5e422..925158043 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -117,3 +117,14 @@ def test_validate_inputs_mismatched_errors(): dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) with pytest.raises(ValueError, match="intensity_errors must match"): dem.validate_inputs() + + + +#Test to add later +#both should be True +# np.allclose(x.intensities_scaled, +# x.observed_intensities.value / x.normalization_factor) + +# np.allclose(x.sigma_scaled_intensity_errors, +# x.intensity_errors.to_value(u.DN/u.s) / x.normalization_factor) + From 77b3bc6b86b525938e80da6f64b196e003040152 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 19 Nov 2025 15:22:30 -0500 Subject: [PATCH 087/121] Removed self.logT in the init and kept a sinlge one in a created functioncreate_logT_grid which simply creates the temperature grid which will be used in the whole code/dem. --- xrtpy/xrt_dem_iterative/dem_solver.py | 39 ++++++++++++++++++--------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index f7d3cc3a7..22a1d30c6 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -186,12 +186,13 @@ def __init__( f" Filter channels: {len(self.observed_channel)}\n" ) - self.logT = np.arange( - self._minimum_bound_temperature, - self._maximum_bound_temperature - + self._logarithmic_temperature_step_size / 2, - self._logarithmic_temperature_step_size, - ) + # I am commenting this out because it is redundant since I am defining it below again. I wanna be consistent in using the same logT below.- Remove after testing + # self.logT = np.arange( + # self._minimum_bound_temperature, + # self._maximum_bound_temperature + # + self._logarithmic_temperature_step_size / 2, + # self._logarithmic_temperature_step_size, + # ) try: self._normalization_factor = float(normalization_factor) @@ -399,7 +400,17 @@ def intensity_errors(self) -> u.Quantity: if self._intensity_errors is not None: return self._intensity_errors * (u.DN / u.s) - if self._using_estimated_errors: + # if self._using_estimated_errors: + # warnings.warn( + # "No intensity_errors provided. Using default model: " + # f"max(relative-error * observed_intensity, min_observational_error)\n" + # f"=> relative_error = {self.relative_error} =, min_observational_error = {self.min_observational_error.value} DN/s\n" + # "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", + # UserWarning, + # ) + # self._using_estimated_errors = True # suppress future warnings + + if not self._using_estimated_errors: warnings.warn( "No intensity_errors provided. Using default model: " f"max(relative-error * observed_intensity, min_observational_error)\n" @@ -407,7 +418,7 @@ def intensity_errors(self) -> u.Quantity: "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", UserWarning, ) - self._using_estimated_errors = True # suppress future warnings + self._using_estimated_errors = True # NOTETOJOYWe can remove if no issues later # #No units - added in the return @@ -450,8 +461,6 @@ def max_iterations(self): """ return self._max_iterations - return None - def create_logT_grid(self): """ Construct the regular log10 temperature grid for DEM calculations. @@ -489,6 +498,8 @@ def create_logT_grid(self): # inclusive logT grid (IDL-style regular grid) # Units = 'log K. Runs from minimum_bound_temperature to Max_T with bin-width = DT # SELFNOTEJOY- Do we need to add units - current holds no units- it's wokring correctly - Should this on the Test as well?- I don't think it- it's noted in IDL but used with units + + # np.linspace over np.arange - simple reproduces that reliably:endpoint included, exact number of bins, and no accumulating floating-point drift - Best match to IDL self.logT = np.linspace( self._minimum_bound_temperature, self._maximum_bound_temperature, @@ -599,8 +610,12 @@ def _prepare_scaled_observations(self): input1.i_err = input1.i_err / solv_factor """ # Extract values as plain floats (DN/s/pix) - intensities_scaled_raw = self.observed_intensities.value - sigma_intensity_errors_raw = self.intensity_errors.to_value(u.DN / u.s) + intensities_scaled_raw = ( + self.observed_intensities.value + ) # Might just remove this line and up in the normalization + sigma_intensity_errors_raw = self.intensity_errors.to_value( + u.DN / u.s + ) # Might just remove this line and up in the normalization # Apply normalization self.intensities_scaled = intensities_scaled_raw / self.normalization_factor From a12669ded62ff2a4dee7cee3b37e0653dc3b39f3 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 19 Nov 2025 20:43:19 -0500 Subject: [PATCH 088/121] Clean up - _estimate_initial_dem and mirror IDL where I forgot to -Use flat dem for inital guess --- xrtpy/xrt_dem_iterative/dem_solver.py | 283 +++++++++++++++++++------- 1 file changed, 210 insertions(+), 73 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 22a1d30c6..d3b9981fa 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -645,43 +645,183 @@ def _prepare_scaled_observations(self): # **************************************************************************************************************************** ############################ Everything line of code BELOW is FOR the DEM ################################################## + # **************************************************************************************************************************** + + ################################################################################################################################ + ################################################################################################################################ + #############************************** Start of INITIAL ROUGH DEM ESTIMATE **************************########################## + ################## An estimated EM shape based on simple intensity-over-response peaks, smoothed across T. ##################### + + # def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: + # """ + # Estimate an initial DEM curve from observed intensities and responses. + + # This follows the algorithm in IDL's `xrt_dem_iterative2.pro`, which uses + # response-peak inversion to generate a crude log10 DEM estimate per channel, + # then interpolates these estimates onto the solver's regular temperature grid. + + # Parameters + # ---------- + # cutoff : float, optional + # Fraction of the peak response to use for defining the "good" window + # around each filter's peak. Default is 1/e ≈ 0.3679. + + # Returns + # ------- + # est_log_dem_on_grid : ndarray + # Array of shape (n_temperatures,) giving the initial DEM estimate + # on `self.logT`. Values are log10(DEM) in [cm^-5 K^-1]. + # This can be used to seed the solver. + + # Notes + # ----- + # - Units: + # * Observed intensities: [DN s^-1 pix^-1] + # * Response: [DN s^-1 pix^-1 cm^5] + # * DEM(T): [cm^-5 K^-1] + # - For each filter: + # 1. Locate the peak of its response. + # 2. Define a window where response > cutoff * peak. + # 3. Compute the denominator integral: sum( T * R * dlnT ). + # 4. Estimate DEM_peak = I_obs / denom. + # 5. Store log10(DEM_peak) at the peak logT. + # - Duplicate peak logTs are merged by averaging. + # - If fewer than 2 valid points are found, falls back to a flat guess + # (log10 DEM = 22 everywhere). + # """ + # if not hasattr(self, "logT"): + # raise AttributeError( + # "Temperature grid missing. Call create_logT_grid() first." + # ) + # if not hasattr(self, "_response_matrix"): + # raise AttributeError( + # "Response matrix missing. Call _interpolate_responses_to_grid() first." + # ) + + # # Storage for peak locations and DEM estimates + # t_peaks = [] + # log_dem_estimates = [] + + # # Loop over each filter + # for i, (T_orig, R_orig, I_obs) in enumerate( + # zip( + # self.response_temperatures, + # self.response_values, + # self._observed_intensities, + # ) + # ): + # logT_orig = np.log10(T_orig.to_value(u.K)) + # R_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) + + # if I_obs <= 0 or np.all(R_vals <= 0): + # continue # skip unusable channel + + # # 1. Peak location + # max_idx = np.argmax(R_vals) + # peak_val = R_vals[max_idx] + # t_peak = ( + # np.round(logT_orig[max_idx] / self._logarithmic_temperature_step_size) + # * self._logarithmic_temperature_step_size + # ) + + # # 2. Good window (where R > cutoff * peak) + # good = np.where(R_vals > peak_val * cutoff)[0] + # if len(good) < 2: + # continue + + # # 3. Compute denominator integral: sum(T 8*R* dlnT) + # T_good = 10.0 ** logT_orig[good] + # R_good = R_vals[good] + # dlogT_native = np.diff(logT_orig).mean() + # dlnT_native = np.log(10.0) * dlogT_native + # denom = np.sum(T_good * R_good * dlnT_native) + + # if denom <= 0: + # continue + + # # 4. DEM estimate at peak + # dem_peak = I_obs / denom # [cm^-5 K^-1] + # if dem_peak <= 0 or not np.isfinite(dem_peak): + # continue + + # log_dem_est = np.log10(dem_peak) + # t_peaks.append(t_peak) + # log_dem_estimates.append(log_dem_est) + + # # 5. Handle duplicates: average log10 DEM at same t_peak + # if len(t_peaks) == 0: + # # Fallback: flat guess (IDL style) + # est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 + # self._initial_log_dem = est_log_dem_on_grid + # return est_log_dem_on_grid + + # uniq_t = {} + # for t, dem_val in zip(t_peaks, log_dem_estimates): + # if t in uniq_t: + # uniq_t[t].append(dem_val) + # else: + # uniq_t[t] = [dem_val] + # t_peaks_uniq = np.array(sorted(uniq_t.keys())) + # log_dem_uniq = np.array([np.mean(uniq_t[t]) for t in t_peaks_uniq]) + + # if len(t_peaks_uniq) < 2: + # # Not enough points > flat guess + # est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 + # self._initial_log_dem = est_log_dem_on_grid + # return est_log_dem_on_grid + + # # 6. Interpolate sparse estimates onto the solver's grid + # interp_func = interp1d( + # t_peaks_uniq, + # log_dem_uniq, + # kind="linear", + # bounds_error=False, + # fill_value="extrapolate", + # ) + + # self._raw_estimated_dem_peaks = (t_peaks_uniq, log_dem_uniq) + # # est_log_dem_on_grid = interp_func(self.logT) + # # # Store for later use + # # self._initial_log_dem = est_log_dem_on_grid + + # #Use flat dem for inital guess + # est_log_dem_on_grid = np.ones_like(self.logT) * 1.0 + # self._initial_log_dem = est_log_dem_on_grid + + # return est_log_dem_on_grid def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: """ - Estimate an initial DEM curve from observed intensities and responses. + Construct an initial DEM guess, mirroring IDL's xrt_dem_iter_estim. + + This method follows the *structure* of the IDL routine: + - Identify channels with non-zero observed intensity. + - For each such channel, find the peak of its emissivity/response. + - Integrate the response around the peak to estimate a DEM value. + - Combine/compact duplicate peak temperatures. + + HOWEVER, to exactly match the behavior of IDL's xrt_dem_iter_estim + as used by xrt_dem_iter_nowidget, the final initial guess returned + to the solver is a *flat* log10(DEM) curve: - This follows the algorithm in IDL's `xrt_dem_iterative2.pro`, which uses - response-peak inversion to generate a crude log10 DEM estimate per channel, - then interpolates these estimates onto the solver's regular temperature grid. + log10(DEM(T)) = 1.0 for all T on the solver grid. + + The detailed peak-based DEM estimates are kept only for optional + diagnostics; they do not affect the initial DEM passed into the + spline/least-squares solver (this is exactly what the IDL code does). Parameters ---------- cutoff : float, optional - Fraction of the peak response to use for defining the "good" window - around each filter's peak. Default is 1/e ≈ 0.3679. + Fraction of the peak response used to define the "good" window + around each filter's peak. Default is 1/e (≈ 0.3679), as in IDL. Returns ------- est_log_dem_on_grid : ndarray - Array of shape (n_temperatures,) giving the initial DEM estimate - on `self.logT`. Values are log10(DEM) in [cm^-5 K^-1]. - This can be used to seed the solver. - - Notes - ----- - - Units: - * Observed intensities: [DN s^-1 pix^-1] - * Response: [DN s^-1 pix^-1 cm^5] - * DEM(T): [cm^-5 K^-1] - - For each filter: - 1. Locate the peak of its response. - 2. Define a window where response > cutoff * peak. - 3. Compute the denominator integral: sum( T * R * dlnT ). - 4. Estimate DEM_peak = I_obs / denom. - 5. Store log10(DEM_peak) at the peak logT. - - Duplicate peak logTs are merged by averaging. - - If fewer than 2 valid points are found, falls back to a flat guess - (log10 DEM = 22 everywhere). + Array of shape (n_temperatures,) giving the initial guess for + log10(DEM) on `self.logT`. For strict IDL-compatibility, this + is identically 1.0 everywhere. """ if not hasattr(self, "logT"): raise AttributeError( @@ -692,48 +832,55 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: "Response matrix missing. Call _interpolate_responses_to_grid() first." ) - # Storage for peak locations and DEM estimates + # Optional: store the peak-based estimates for diagnostics only. + # These are NOT used to set the initial DEM (IDL overwrites them + # with a flat DEM before handing off to the solver). t_peaks = [] log_dem_estimates = [] - # Loop over each filter - for i, (T_orig, R_orig, I_obs) in enumerate( - zip( - self.response_temperatures, - self.response_values, - self._observed_intensities, - ) + # Loop over each filter/channel with non-zero intensity + for T_orig, R_orig, I_obs in zip( + self.response_temperatures, + self.response_values, + self._observed_intensities, ): logT_orig = np.log10(T_orig.to_value(u.K)) + # Make sure the response is in DN s^-1 pix^-1 per EM (cm^-5) R_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) if I_obs <= 0 or np.all(R_vals <= 0): continue # skip unusable channel - # 1. Peak location + # 1. Peak location (logT) max_idx = np.argmax(R_vals) peak_val = R_vals[max_idx] - t_peak = ( - np.round(logT_orig[max_idx] / self._logarithmic_temperature_step_size) - * self._logarithmic_temperature_step_size - ) + t_peak_raw = logT_orig[max_idx] + + # Round to nearest grid step in logT, similar to round_off(..., 0.1) + step = self._logarithmic_temperature_step_size + t_peak = np.round(t_peak_raw / step) * step # 2. Good window (where R > cutoff * peak) good = np.where(R_vals > peak_val * cutoff)[0] - if len(good) < 2: + if good.size < 1: continue # 3. Compute denominator integral: sum(T * R * dlnT) - T_good = 10.0 ** logT_orig[good] + T_good = 10.0 ** logT_orig[good] # [K] R_good = R_vals[good] - dlogT_native = np.diff(logT_orig).mean() + # Native spacing in log10(T) + if logT_orig.size > 1: + dlogT_native = np.mean(np.diff(logT_orig)) + else: + # Degenerate case; fall back to solver grid spacing + dlogT_native = step dlnT_native = np.log(10.0) * dlogT_native denom = np.sum(T_good * R_good * dlnT_native) if denom <= 0: continue - # 4. DEM estimate at peak + # 4. DEM estimate at the peak (for diagnostics only) dem_peak = I_obs / denom # [cm^-5 K^-1] if dem_peak <= 0 or not np.isfinite(dem_peak): continue @@ -742,43 +889,33 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: t_peaks.append(t_peak) log_dem_estimates.append(log_dem_est) - # 5. Handle duplicates: average log10 DEM at same t_peak - if len(t_peaks) == 0: - # Fallback: flat guess (IDL style) - est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 - self._initial_log_dem = est_log_dem_on_grid - return est_log_dem_on_grid - - uniq_t = {} - for t, dem_val in zip(t_peaks, log_dem_estimates): - if t in uniq_t: - uniq_t[t].append(dem_val) - else: - uniq_t[t] = [dem_val] - t_peaks_uniq = np.array(sorted(uniq_t.keys())) - log_dem_uniq = np.array([np.mean(uniq_t[t]) for t in t_peaks_uniq]) - - if len(t_peaks_uniq) < 2: - # Not enough points > flat guess - est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 - self._initial_log_dem = est_log_dem_on_grid - return est_log_dem_on_grid + # Compact duplicate peak temperatures by averaging (diagnostic only) + if t_peaks: + uniq = {} + for t, val in zip(t_peaks, log_dem_estimates): + uniq.setdefault(t, []).append(val) + t_peaks_uniq = np.array(sorted(uniq.keys())) + log_dem_uniq = np.array([np.mean(uniq[t]) for t in t_peaks_uniq]) + # Store raw estimated peaks for debugging/inspection if desired + self._raw_estimated_dem_peaks = (t_peaks_uniq, log_dem_uniq) + else: + self._raw_estimated_dem_peaks = (np.array([]), np.array([])) - # 6. Interpolate sparse estimates onto the solver's grid - interp_func = interp1d( - t_peaks_uniq, - log_dem_uniq, - kind="linear", - bounds_error=False, - fill_value="extrapolate", - ) - est_log_dem_on_grid = interp_func(self.logT) + # IDL BEHAVIOR: override with flat initial DEM + # xrt_dem_iter_estim ultimately does: + # dem = 0.0*findgen(nt) + 1.0 ; Use flat dem for initial guess + # on a regular logT grid. We mirror that here exactly: + est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 1.0 - # Store for later use + # Store for later use by the solver self._initial_log_dem = est_log_dem_on_grid return est_log_dem_on_grid + #############************************** End of INITIAL DEM ESTIMATE **************************################################## + ################################################################################################################################ + ################################################################################################################################ + def _build_lmfit_parameters(self, n_knots: int = 6): """ Build lmfit.Parameters for the DEM spline knots. From 5827f2222c981a7293b30de1015c3d495908d0f4 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 20 Nov 2025 18:21:00 -0500 Subject: [PATCH 089/121] Cleaned up code and methods --- xrtpy/xrt_dem_iterative/dem_solver.py | 949 ++++++++++++++++---------- 1 file changed, 589 insertions(+), 360 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index d3b9981fa..8e312d3e1 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -10,6 +10,7 @@ import numpy as np from lmfit import Parameters, minimize from scipy.interpolate import interp1d + from xrtpy.util.filters import validate_and_format_filters from xrtpy.xrt_dem_iterative import dem_plotting @@ -548,7 +549,7 @@ def _interpolate_responses_to_grid(self): rows = [] for T_orig, R_orig in zip( - self.response_temperatures, self.response_values + self.response_temperatures, self.response_values, strict=False ): # Make sure that R_orig.value is indeed in DN/s/pix per cm^5 logT_orig = np.log10(T_orig.to_value(u.K)) # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) @@ -647,6 +648,8 @@ def _prepare_scaled_observations(self): ############################ Everything line of code BELOW is FOR the DEM ################################################## # **************************************************************************************************************************** + # ------------------------------------------------------------------------------------------------------------------------------- + ################################################################################################################################ ################################################################################################################################ #############************************** Start of INITIAL ROUGH DEM ESTIMATE **************************########################## @@ -843,6 +846,7 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: self.response_temperatures, self.response_values, self._observed_intensities, + strict=False, ): logT_orig = np.log10(T_orig.to_value(u.K)) # Make sure the response is in DN s^-1 pix^-1 per EM (cm^-5) @@ -892,7 +896,7 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # Compact duplicate peak temperatures by averaging (diagnostic only) if t_peaks: uniq = {} - for t, val in zip(t_peaks, log_dem_estimates): + for t, val in zip(t_peaks, log_dem_estimates, strict=False): uniq.setdefault(t, []).append(val) t_peaks_uniq = np.array(sorted(uniq.keys())) log_dem_uniq = np.array([np.mean(uniq[t]) for t in t_peaks_uniq]) @@ -905,7 +909,16 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # xrt_dem_iter_estim ultimately does: # dem = 0.0*findgen(nt) + 1.0 ; Use flat dem for initial guess # on a regular logT grid. We mirror that here exactly: - est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 1.0 + + #est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 1.0 NOV20 + + + #est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 0.0 #NOTEFORJOY + #or + est_log_dem_on_grid = np.zeros_like(self.logT) + + #Return the intial first guessed DEM + # Store for later use by the solver self._initial_log_dem = est_log_dem_on_grid @@ -916,323 +929,592 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: ################################################################################################################################ ################################################################################################################################ - def _build_lmfit_parameters(self, n_knots: int = 6): + # ------------------------------------------------------------------------------------------------------------------------------- + + ################################################################################################################################ + ################################################################################################################################ + #############************************** Start of **************************########################## + ################## ##################### + + def _prepare_spline_system(self): + """ + Pythonic, IDL version of mp_prep. + Prepares:s + - self.n_spl (number of spline knots) + - self.spline_logT (knot positions) + - self.spline_log_dem (initial spline logDEM values) + - self.pm_matrix (R(T) * T * dlnT) + - self.weights (all ones) + - self.abundances (all ones) """ - Build lmfit.Parameters for the DEM spline knots. - Parameters - ---------- - n_knots : int, optional - Number of spline knots across the logT grid. Default = 6. + # Number of channels + n_line = len(self._observed_intensities) - Returns - ------- - params : lmfit.Parameters - Parameters object containing log10(DEM/normalization_factor) values at knot points. - Each parameter is named "knot_i" where i = 0..n_knots-1. + # IDL: n_spl = min(n_line - 1, 7) - Make this a keyword in the class so use can tune it? + self.n_spl = min(max(n_line - 1, 1), 7) - Notes - ----- - - IDL's `xrt_dem_iterative2.pro` seeds its fit by taking DEM estimates - at peak response temperatures and spreading them across the grid. - - Here, we select evenly spaced knots across the solver's logT range. - - The stored value at each knot is: - log10(DEM / normalization_factor) - where `normalization_factor` is typically 1e17. - - Bounds can be applied to prevent extreme DEM excursions if desired. - """ - if not hasattr(self, "_initial_log_dem"): - raise AttributeError( - "Initial DEM not available. Run _estimate_initial_dem() first." - ) + # Weights and abundances (IDL sets all =1) + # Later, should I a use_line mask (IDL ignores lines with i_obs=0), but you can add that when you need it. + self.weights = np.ones(n_line, dtype=float) + self.abundances = np.ones(n_line, dtype=float) - from lmfit import Parameters + # pm_matrix = R(T) * T * dlnT (IDL line: emis * 10^t * alog(10^dt)) + # units - DN/s/pix/cm^5 * K * dLnT * DEM == DN/s/PIX + T_linear = self.T.to_value(u.K) + self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) - # Choose evenly spaced knot positions across logT range - knot_positions = np.linspace( - self._minimum_bound_temperature, self._maximum_bound_temperature, n_knots - ) - self._knot_positions = knot_positions # store for later reconstruction + # Knot positions are evenly spaced in logT (IDL spl_t) + self.spline_logT = np.linspace(self.logT.min(), self.logT.max(), self.n_spl) - # Interpolate initial DEM estimate at these knot positions - interp_func = interp1d( + # Initial spline DEM values: sample from initial logDEM grid + # (IDL spline(est_t, est_dem, spl_t)) + interp_init = interp1d( self.logT, - self._initial_log_dem, - kind="linear", + self._initial_log_dem, # IDL is flat logDEM = 1.0 + kind="linear", #IDL uses a cubic spline later NOTEFORJOY NOV24 bounds_error=False, fill_value="extrapolate", ) - init_log_dem_at_knots = interp_func(knot_positions) + + self.spline_log_dem = interp_init(self.spline_logT) + + def _build_lmfit_parameters(self): + """ + Build lmfit.Parameters object representing log10(DEM) at the spline knots. + IDL limits each spline DEM parameter to [-20, 0]. + """ - # Convert to log10(DEM/normalization_factor) - init_scaled = init_log_dem_at_knots - np.log10(self._normalization_factor) + if not hasattr(self, "spline_log_dem"): + raise RuntimeError("Run _prepare_spline_system() first.") - # Build lmfit Parameters params = Parameters() - for i, val in enumerate(init_scaled): + + for i in range(self.n_spl): params.add( - name=f"knot_{i}", - value=val, - min=-10, # optional bounds: avoid absurdly low - max=50, # optional bounds: avoid absurdly high + f"knot_{i}", + value=float(self.spline_log_dem[i]), + min = -20.0, + max = 0.0, vary=True, ) - self._init_knot_params = params return params - def _reconstruct_dem_from_knots(self, params) -> np.ndarray: + + def _reconstruct_dem_from_knots(self, params): + """ + Construct DEM(T) on self.logT using spline of log10(DEM) at knot positions. """ - Reconstruct the DEM curve on the solver's logT grid from spline knot parameters. + from scipy.interpolate import CubicSpline - Parameters - ---------- - params : lmfit.Parameters - Knot parameters where each value is log10(DEM / normalization_factor). + knot_vals = np.array([params[f"knot_{i}"].value for i in range(self.n_spl)]) - Returns - ------- - dem_grid : ndarray - DEM values on `self.logT` grid in linear space [cm^-5 K^-1]. + # interp_spline = interp1d( + # self.spline_logT, + # knot_vals, + # kind="linear", #IDL uses cubic spline interpolation NOTEFORJOY NOV20 + # bounds_error=False, + # fill_value="extrapolate", + # ) - Notes - ----- - - Knot positions are stored in `self._knot_positions` when - `_build_lmfit_parameters()` is called. - - The stored parameter values are log10(DEM/normalization_factor). - - Conversion back to DEM: - DEM = normalization_factor * 10^(interp(log10 DEM/normalization_factor)) - - Interpolation is linear in log space, as in IDL's `xrt_dem_iterative2.pro`. + # log_dem = interp_spline(self.logT) # log10(DEM) + # dem = 10.0**log_dem # DEM in linear cm^-5 K^-1 + + #Or used the code above but switch from linear to kind="cubic" + cs = CubicSpline(self.spline_logT, knot_vals, bc_type="natural") + log_dem = cs(self.logT) + dem = 10.0 ** log_dem + return dem + + + def _residuals(self, params): + """ + IDL equivalent of mpdemfunct. + Computes residuals: + ((DEM ## pm) - i_obs_scaled) / i_err_scaled """ - if not hasattr(self, "_knot_positions"): - raise AttributeError( - "Knot positions not found. Run _build_lmfit_parameters() first." - ) - # Extract knot values from parameters (log10(DEM/normalization_factor)) - knot_vals = np.array( - [params[f"knot_{i}"].value for i in range(len(self._knot_positions))] - ) + # 1. DEM(T) + dem = self._reconstruct_dem_from_knots(params) - # Interpolate across solver grid in log space - interp_func = interp1d( - self._knot_positions, - knot_vals, - kind="linear", - bounds_error=False, - fill_value="extrapolate", - ) - log_dem_scaled = interp_func(self.logT) + # 2. Modeled intensities (IDL: i_mod = (dem ## pm) * abunds) + i_mod = (self.pm_matrix @ dem) * self.abundances + + # 3. Observed (scaled) + y_scaled = self.intensities_scaled # i_obs / solv_factor + sigma_scaled = self.sigma_scaled_intensity_errors + + # 4. Residuals = (i_mod - y_obs) * weights / sigma + residuals = (i_mod - y_scaled) * self.weights / sigma_scaled + + # Store χ² if desired + chi2_val = np.sum(residuals**2) + if not hasattr(self, "_iteration_chi2"): + self._iteration_chi2 = [] + self._iteration_chi2.append(chi2_val) - # Convert back to DEM [cm^-5 K^-1] - dem_grid = self._normalization_factor * ( - 10.0**log_dem_scaled - ) ## dem_grid now back in physical units + return residuals - return dem_grid + # def _build_lmfit_parameters(self, n_knots: int = 6): + # """ + # Build lmfit.Parameters for the DEM spline knots. - # self._iteration_chi2 = [] + # Parameters + # ---------- + # n_knots : int, optional + # Number of spline knots across the logT grid. Default = 6. - # def _residuals(self, params) -> np.ndarray: + # Returns + # ------- + # params : lmfit.Parameters + # Parameters object containing log10(DEM/normalization_factor) values at knot points. + # Each parameter is named "knot_i" where i = 0..n_knots-1. + + # Notes + # ----- + # - IDL's `xrt_dem_iterative2.pro` seeds its fit by taking DEM estimates + # at peak response temperatures and spreading them across the grid. + # - Here, we select evenly spaced knots across the solver's logT range. + # - The stored value at each knot is: + # log10(DEM / normalization_factor) + # where `normalization_factor` is typically 1e17. + # - Bounds can be applied to prevent extreme DEM excursions if desired. # """ - # Residuals function for DEM fitting. + # if not hasattr(self, "_initial_log_dem"): + # raise AttributeError( + # "Initial DEM not available. Run _estimate_initial_dem() first." + # ) + + # # Choose evenly spaced knot positions across logT range + # knot_positions = np.linspace( + # self._minimum_bound_temperature, self._maximum_bound_temperature, n_knots + # ) + # self._knot_positions = knot_positions # store for later reconstruction + + # # Interpolate initial DEM estimate at these knot positions + # interp_func = interp1d( + # self.logT, + # self._initial_log_dem, + # kind="linear", + # bounds_error=False, + # fill_value="extrapolate", + # ) + # init_log_dem_at_knots = interp_func(knot_positions) + + # # Convert to log10(DEM/normalization_factor) + # init_scaled = init_log_dem_at_knots - np.log10(self._normalization_factor) + + # # Build lmfit Parameters + # params = Parameters() + # for i, val in enumerate(init_scaled): + # params.add( + # name=f"knot_{i}", + # value=val, + # min=-10, # optional bounds: avoid absurdly low + # max=50, # optional bounds: avoid absurdly high + # vary=True, + # ) + + # self._init_knot_params = params + # return params + + # def _reconstruct_dem_from_knots(self, params) -> np.ndarray: + # """ + # Reconstruct the DEM curve on the solver's logT grid from spline knot parameters. # Parameters # ---------- # params : lmfit.Parameters - # Knot parameters, each storing log10(DEM / normalization_factor). + # Knot parameters where each value is log10(DEM / normalization_factor). # Returns # ------- - # residuals : ndarray - # Vector of normalized residuals for each observed channel: - # (I_obs - I_calc) / sigma - # Shape = (n_filters,) + # dem_grid : ndarray + # DEM values on `self.logT` grid in linear space [cm^-5 K^-1]. # Notes # ----- - # - This is the core of the DEM solver. It reconstructs the DEM curve - # from spline knot parameters, computes modeled intensities by - # integrating DEM * Response over temperature, and returns the - # residuals relative to observations. - # - Integration is done using midpoint trapezoid approximation: - # I_calc[i] = sum_j DEM_mid[j] * R_mid[i,j] * T_mid[j] * dlnT + # - Knot positions are stored in `self._knot_positions` when + # `_build_lmfit_parameters()` is called. + # - The stored parameter values are log10(DEM/normalization_factor). + # - Conversion back to DEM: + # DEM = normalization_factor * 10^(interp(log10 DEM/normalization_factor)) + # - Interpolation is linear in log space, as in IDL's `xrt_dem_iterative2.pro`. + # """ + # if not hasattr(self, "_knot_positions"): + # raise AttributeError( + # "Knot positions not found. Run _build_lmfit_parameters() first." + # ) + + # # Extract knot values from parameters (log10(DEM/normalization_factor)) + # knot_vals = np.array( + # [params[f"knot_{i}"].value for i in range(len(self._knot_positions))] + # ) + + # # Interpolate across solver grid in log space + # interp_func = interp1d( + # self._knot_positions, + # knot_vals, + # kind="linear", + # bounds_error=False, + # fill_value="extrapolate", + # ) + # log_dem_scaled = interp_func(self.logT) + + # # Convert back to DEM [cm^-5 K^-1] + # dem_grid = self._normalization_factor * ( + # 10.0**log_dem_scaled + # ) ## dem_grid now back in physical units + + # return dem_grid + + # # self._iteration_chi2 = [] + + # # def _residuals(self, params) -> np.ndarray: + # # """ + # # Residuals function for DEM fitting. + + # # Parameters + # # ---------- + # # params : lmfit.Parameters + # # Knot parameters, each storing log10(DEM / normalization_factor). + + # # Returns + # # ------- + # # residuals : ndarray + # # Vector of normalized residuals for each observed channel: + # # (I_obs - I_calc) / sigma + # # Shape = (n_filters,) + + # # Notes + # # ----- + # # - This is the core of the DEM solver. It reconstructs the DEM curve + # # from spline knot parameters, computes modeled intensities by + # # integrating DEM * Response over temperature, and returns the + # # residuals relative to observations. + # # - Integration is done using midpoint trapezoid approximation: + # # I_calc[i] = sum_j DEM_mid[j] * R_mid[i,j] * T_mid[j] * dlnT + # # """ + # # # 1. Reconstruct DEM on the grid + # # dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] + + # # # 2. Prepare midpoint arrays + # # dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) + # # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + # # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + + # # # 3. Compute modeled intensities + # # # Shape: (n_filters,) + # # I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + + # # # 4. Residuals: normalize by observational errors + # # # sigma = self.intensity_errors.to_value(u.DN / u.s) # ensure numeric + # # # residuals = (self._observed_intensities - I_calc) / sigma + # # # Use MC-perturbed intensities if present; otherwise the originals + + # # y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) + # # sigma = self.intensity_errors.to_value(u.DN / u.s) # numeric + # # residuals = (y_obs - I_calc) / sigma + + # # residuals = (y_obs - I_calc) / sigma + # # chi2_val = np.sum(residuals**2) + + # # # Log χ² per iteration + # # if not hasattr(self, "_iteration_chi2"): + # # self._iteration_chi2 = [] + # # self._iteration_chi2.append(chi2_val) + + # # return residuals + # def _residuals(self, params) -> np.ndarray: + # """ + # Residuals function for DEM fitting. + + # Returns + # ------- + # residuals : ndarray + # (I_obs - I_calc) / sigma, one per filter. # """ # # 1. Reconstruct DEM on the grid # dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] - # # 2. Prepare midpoint arrays + # # 2. Midpoint integration setup # dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - # # 3. Compute modeled intensities - # # Shape: (n_filters,) + # # 3. Modeled intensities # I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - # # 4. Residuals: normalize by observational errors - # # sigma = self.intensity_errors.to_value(u.DN / u.s) # ensure numeric - # # residuals = (self._observed_intensities - I_calc) / sigma - # # Use MC-perturbed intensities if present; otherwise the originals + # # # 4. Residuals: normalize by observational errors + # # y_obs = getattr( + # # self, "_active_observed_intensities", self._observed_intensities + # # ) + # # sigma = self.intensity_errors.to_value(u.DN / u.s) + # # residuals = (y_obs - I_calc) / sigma - # y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) - # sigma = self.intensity_errors.to_value(u.DN / u.s) # numeric - # residuals = (y_obs - I_calc) / sigma + # # # 5. Track χ² per iteration + # # chi2_val = np.sum(residuals**2) + # # if not hasattr(self, "_iteration_chi2"): + # # self._iteration_chi2 = [] + # # self._iteration_chi2.append(chi2_val) - # residuals = (y_obs - I_calc) / sigma - # chi2_val = np.sum(residuals**2) + # # return residuals - # # Log χ² per iteration + # # 4. Use either base or MC-perturbed observed intensities (physical units) + # y_obs_phys = getattr( + # self, "_active_observed_intensities", self._observed_intensities + # ) + # sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + + # # 5. Apply normalization_factor in an IDL-like way: + # # scale data, model, and errors by the same factor. + # # (This keeps residuals numerically identical, but keeps + # # internal numbers closer to order unity if desired.) + # nf = self._normalization_factor + + # y_scaled = y_obs_phys / nf + # I_calc_scaled = I_calc / nf + # sigma_scaled = sigma_phys / nf + + # residuals = (y_scaled - I_calc_scaled) / sigma_scaled + + # # 6. Track χ² per iteration (in scaled space — same χ² as unscaled) + # chi2_val = np.sum(residuals**2) # if not hasattr(self, "_iteration_chi2"): # self._iteration_chi2 = [] # self._iteration_chi2.append(chi2_val) # return residuals - def _residuals(self, params) -> np.ndarray: + + # # def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): + # # """ + # # Fit the DEM using lmfit to minimize residuals. + + # # Parameters + # # ---------- + # # n_knots : int, optional + # # Number of spline knots across the logT grid. Default = 6. + # # method : str, optional + # # Minimization method passed to `lmfit.minimize`. + # # Common choices: "least_squares", "leastsq", "nelder". + # # Default = "least_squares". + # # **kwargs : dict + # # Additional keyword arguments forwarded to `lmfit.minimize`. + + # # Returns + # # ------- + # # result : lmfit.MinimizerResult + # # The lmfit result object containing fit information. + + # # Side Effects + # # ------------ + # # On successful fit, stores: + # # - self.dem : ndarray + # # Best-fit DEM(T) on self.logT [cm^-5 K^-1]. + # # - self.fitted_intensities : ndarray + # # Modeled intensities [DN/s/pix] for each filter. + # # - self.chi2 : float + # # Chi-squared (sum of squared residuals). + # # - self.redchi2 : float + # # Reduced chi-squared, normalized by (Nobs - Nparams). + + # # Notes + # # ----- + # # This method automatically builds the logT grid, interpolates + # # responses, and estimates an initial DEM if not already done. + # # """ + + # # # --- Auto-prepare prerequisites --- + # # if not hasattr(self, "logT") or not hasattr(self, "T"): + # # self.create_logT_grid() + + # # if not hasattr(self, "_response_matrix"): + # # self._interpolate_responses_to_grid() + + # # if not hasattr(self, "_initial_log_dem"): + # # self._estimate_initial_dem() + + # # self._last_n_knots = n_knots #Used for print in the summary function + + # # # 1. Build initial knot parameters + # # params = self._build_lmfit_parameters(n_knots=n_knots) + + # # # 2. Run minimization + # # result = minimize(self._residuals, params, method=method, **kwargs) + + # # # 3. On success, reconstruct DEM and fitted intensities + # # best_dem = self._reconstruct_dem_from_knots(result.params) + # # self.dem = best_dem # [cm^-5 K^-1] + + # # # Compute fitted intensities using midpoint integration + # # dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) + # # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) + # # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + # # I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + + # # self.fitted_intensities = I_fit # [DN/s/pix] + # # sigma = self.intensity_errors.to_value(u.DN / u.s) + # # residuals = (self._observed_intensities - I_fit) / sigma + + # # # Chi-squared metrics + # # self.chi2 = np.sum(residuals**2) + # # dof = len(self._observed_intensities) - len(result.params) + # # self.redchi2 = self.chi2 / max(dof, 1) + + # # return result + + def _solve_single_dem(self, observed_intensities_vals: np.ndarray): """ - Residuals function for DEM fitting. + Solve the DEM once for a given set of observed intensities (base or MC-perturbed). + + Parameters + ---------- + observed_intensities_vals : ndarray + 1D array of observed intensities in DN/s/pix (no units attached). Returns ------- - residuals : ndarray - (I_obs - I_calc) / sigma, one per filter. + dem_phys : ndarray + DEM(T) in physical units [cm^-5 K^-1] on self.logT. + modeled_intensities_phys : ndarray + Modeled intensities in DN/s/pix for each channel. + chisq : float + Sum of squared residuals for this run. + fit_result : lmfit.MinimizerResult + Full lmfit result object (for diagnostics). """ - # 1. Reconstruct DEM on the grid - dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] - - # 2. Midpoint integration setup - dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) - R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) + # 1. Set scaled observations for this run (IDL: i_obs = i_obs/solv_factor) + nf = self._normalization_factor + self.intensities_scaled = observed_intensities_vals / nf + # Errors are the same for all runs; scale once + sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + self.sigma_scaled_intensity_errors = sigma_phys / nf + + # 2. If all intensities are zero → nosolve, DEM = 0 + if np.all(self.intensities_scaled == 0.0): + dem_scaled = np.zeros_like(self.logT, dtype=float) + dem_phys = dem_scaled * nf + modeled_intensities_phys = np.zeros_like(observed_intensities_vals) + chisq = 0.0 + fit_result = None + return dem_phys, modeled_intensities_phys, chisq, fit_result + + # 3. Initial DEM & spline system (IDL: xrt_dem_iter_estim + mp_prep) + self._estimate_initial_dem() + self._prepare_spline_system() + params = self._build_lmfit_parameters() - # 3. Modeled intensities - I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + # 4. Run the least-squares solver (IDL: xrt_dem_iter_solver + mpfit) + result = minimize(self._residuals, params, max_nfev=self._max_iterations) - # # 4. Residuals: normalize by observational errors - # y_obs = getattr( - # self, "_active_observed_intensities", self._observed_intensities - # ) - # sigma = self.intensity_errors.to_value(u.DN / u.s) - # residuals = (y_obs - I_calc) / sigma + # 5. Reconstruct DEM in *scaled* units, then convert to physical + dem_scaled = self._reconstruct_dem_from_knots(result.params) # cm^-5 K^-1 / nf + dem_phys = dem_scaled * nf # undo normalization, like IDL - # # 5. Track χ² per iteration - # chi2_val = np.sum(residuals**2) - # if not hasattr(self, "_iteration_chi2"): - # self._iteration_chi2 = [] - # self._iteration_chi2.append(chi2_val) + # 6. Modeled intensities (IDL: i_mod = dem ## pm * abunds) + i_mod_scaled = (self.pm_matrix @ dem_scaled) * self.abundances + modeled_intensities_phys = i_mod_scaled * nf # back to DN/s/pix - # return residuals + # 7. χ² from residuals + resid = self._residuals(result.params) + chisq = float(np.sum(resid**2)) - # 4. Use either base or MC-perturbed observed intensities (physical units) - y_obs_phys = getattr( - self, "_active_observed_intensities", self._observed_intensities - ) - sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + return dem_phys, modeled_intensities_phys, chisq, result - # 5. Apply normalization_factor in an IDL-like way: - # scale data, model, and errors by the same factor. - # (This keeps residuals numerically identical, but keeps - # internal numbers closer to order unity if desired.) - nf = self._normalization_factor + def solve(self): + """ + High-level DEM solver. - y_scaled = y_obs_phys / nf - I_calc_scaled = I_calc / nf - sigma_scaled = sigma_phys / nf + Mirrors IDL's xrt_dem_iterative2: + - validates inputs + - prepares temperature grid & responses + - solves for base DEM + - optionally runs Monte Carlo iterations - residuals = (y_scaled - I_calc_scaled) / sigma_scaled + After this call, the following attributes are set: - # 6. Track χ² per iteration (in scaled space — same χ² as unscaled) - chi2_val = np.sum(residuals**2) - if not hasattr(self, "_iteration_chi2"): - self._iteration_chi2 = [] - self._iteration_chi2.append(chi2_val) + self.logT : log10(T) grid (same as self.logT from create_logT_grid) + self.dem : base DEM(T) [cm^-5 K^-1] + self.chisq : χ² for base solution + self.modeled_intensities : modeled DN/s/pix for base solution - return residuals + If monte_carlo_runs > 0: - # def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): - # """ - # Fit the DEM using lmfit to minimize residuals. + self.dem_mc : array shape (n_runs+1, n_T). 0 = base, 1..N = MC. + self.chisq_mc : array shape (n_runs+1,). + self.obs_mc : array shape (n_runs+1, n_channels) of observed intensities. + self.mod_mc : array shape (n_runs+1, n_channels) of modeled intensities. + """ + # 0) Basic validation & preparation + self.validate_inputs() - # Parameters - # ---------- - # n_knots : int, optional - # Number of spline knots across the logT grid. Default = 6. - # method : str, optional - # Minimization method passed to `lmfit.minimize`. - # Common choices: "least_squares", "leastsq", "nelder". - # Default = "least_squares". - # **kwargs : dict - # Additional keyword arguments forwarded to `lmfit.minimize`. + # Temperature grid and responses + self.create_logT_grid() + self._interpolate_responses_to_grid() - # Returns - # ------- - # result : lmfit.MinimizerResult - # The lmfit result object containing fit information. - - # Side Effects - # ------------ - # On successful fit, stores: - # - self.dem : ndarray - # Best-fit DEM(T) on self.logT [cm^-5 K^-1]. - # - self.fitted_intensities : ndarray - # Modeled intensities [DN/s/pix] for each filter. - # - self.chi2 : float - # Chi-squared (sum of squared residuals). - # - self.redchi2 : float - # Reduced chi-squared, normalized by (Nobs - Nparams). + # We don't scale intensities here; scaling will be done per run + base_obs_phys = self._observed_intensities.astype(float) # DN/s (no units) - # Notes - # ----- - # This method automatically builds the logT grid, interpolates - # responses, and estimates an initial DEM if not already done. - # """ - # from lmfit import minimize + # 1) Base DEM solution + dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( + observed_intensities_vals=base_obs_phys + ) - # # --- Auto-prepare prerequisites --- - # if not hasattr(self, "logT") or not hasattr(self, "T"): - # self.create_logT_grid() + # Store base solution + self.logT_solution = self.logT.copy() + self.dem = dem_base # [cm^-5 K^-1] + self.chisq = chisq_base + self.modeled_intensities = mod_base # DN/s/pix - # if not hasattr(self, "_response_matrix"): - # self._interpolate_responses_to_grid() + # For convenience: store base as the first "MC" entry + n_T = self.logT.size + n_ch = base_obs_phys.size + n_runs = self.monte_carlo_runs - # if not hasattr(self, "_initial_log_dem"): - # self._estimate_initial_dem() + self.dem_mc = np.zeros((n_runs + 1, n_T), dtype=float) + self.chisq_mc = np.zeros((n_runs + 1,), dtype=float) + self.obs_mc = np.zeros((n_runs + 1, n_ch), dtype=float) + self.mod_mc = np.zeros((n_runs + 1, n_ch), dtype=float) - # self._last_n_knots = n_knots #Used for print in the summary function + self.dem_mc[0, :] = dem_base + self.chisq_mc[0] = chisq_base + self.obs_mc[0, :] = base_obs_phys + self.mod_mc[0, :] = mod_base - # # 1. Build initial knot parameters - # params = self._build_lmfit_parameters(n_knots=n_knots) + # 2) Monte Carlo runs (if requested) + if n_runs > 0: + rng = np.random.default_rng() + sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - # # 2. Run minimization - # result = minimize(self._residuals, params, method=method, **kwargs) + for ii in range(1, n_runs + 1): + # Perturb observed intensities (IDL: i_obs + randomn * i_err, clipped at 0) + noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) + obs_pert = base_obs_phys + noise + obs_pert = np.maximum(obs_pert, 0.0) - # # 3. On success, reconstruct DEM and fitted intensities - # best_dem = self._reconstruct_dem_from_knots(result.params) - # self.dem = best_dem # [cm^-5 K^-1] + dem_i, mod_i, chisq_i, _ = self._solve_single_dem(obs_pert) - # # Compute fitted intensities using midpoint integration - # dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) - # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - # I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + self.dem_mc[ii, :] = dem_i + self.chisq_mc[ii] = chisq_i + self.obs_mc[ii, :] = obs_pert + self.mod_mc[ii, :] = mod_i - # self.fitted_intensities = I_fit # [DN/s/pix] - # sigma = self.intensity_errors.to_value(u.DN / u.s) - # residuals = (self._observed_intensities - I_fit) / sigma + # Finished + return self.dem - # # Chi-squared metrics - # self.chi2 = np.sum(residuals**2) - # dof = len(self._observed_intensities) - len(result.params) - # self.redchi2 = self.chi2 / max(dof, 1) + ################################################################################################################################ + ################################################################################################################################ + #############************************** END of **************************########################## + ################################################################################################################################# - # return result + # ---------------------------------------------------------------------------------------------------------------------------------- def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): """ Fit the DEM using lmfit to minimize residuals. Tracks chi² per iteration (like IDL's XRT_ITER_DEMSTAT). """ - from lmfit import minimize # --- Auto-prepare prerequisites --- if not hasattr(self, "logT") or not hasattr(self, "T"): @@ -1307,7 +1589,6 @@ def fit_with_multiple_methods( best_result : lmfit.MinimizerResult Result from the method with lowest chi². """ - from lmfit import minimize if not hasattr(self, "_initial_log_dem"): self._estimate_initial_dem() @@ -1362,63 +1643,9 @@ def fit_with_multiple_methods( return best_result - # def run_monte_carlo(self, n_runs=None, n_knots=6, method="least_squares", random_seed=None): - # if random_seed is not None: - # np.random.seed(random_seed) - # """ - # Run Monte Carlo DEM fits to estimate uncertainties and store full ensemble. - - # Returns - # ------- - # dem_ensemble : ndarray - # Shape (n_runs, n_temperatures) array of DEM solutions. - # """ - # from lmfit import minimize - - # if n_runs is None: - # n_runs = self._monte_carlo_runs - # if n_runs <= 0: - # raise ValueError("Monte Carlo runs disabled (n_runs=0).") - - # sigma = self.intensity_errors.to_value(u.DN / u.s) - # dem_ensemble = [] - - # self._last_n_knots = n_knots #Used for print in the summary function - - # for i in range(n_runs): - # noisy_obs = self._observed_intensities + np.random.normal(0, sigma) - # #print(f"Given intensities: {noisy_obs}") - # self._observed_intensities_mc = noisy_obs # temp override - - # # params = self._build_lmfit_parameters(n_knots=n_knots) #Older Version Sept 18 - # # result = minimize(lambda p: self._residuals(p), params, method=method) #Older Version Sept 18 - # params = self._build_lmfit_parameters(n_knots=n_knots) - # # Activate noisy intensities for this run - # self._active_observed_intensities = noisy_obs - # try: - # result = minimize(self._residuals, params, method=method) - # finally: - # # Always restore (so the main dataset isn’t polluted) - # if hasattr(self, "_active_observed_intensities"): - # delattr(self, "_active_observed_intensities") - - # dem_i = self._reconstruct_dem_from_knots(result.params) - # dem_ensemble.append(dem_i) - - # dem_ensemble = np.array(dem_ensemble) - - # # Store ensemble + uncertainty - # self._dem_ensemble = dem_ensemble - # self.dem_uncertainty = np.std(dem_ensemble, axis=0) - # self.dem_median = np.median(dem_ensemble, axis=0) - - # return dem_ensemble - def run_monte_carlo( self, n_runs=None, n_knots=6, method="least_squares", random_seed=None ): - import numpy as np - from lmfit import minimize from tqdm import tqdm # add this at top of file if n_runs is None: @@ -1455,97 +1682,99 @@ def run_monte_carlo( return dem_ensemble - def solve( - self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True - ): - """ - Run the full DEM solver, IDL-style. + # #NOTEFORJOY NOV 19 + # def solve( + # self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True + # ): + # """ + # Run the full DEM solver, IDL-style. - This orchestrates: - 1. Build temperature grid. - 2. Interpolate responses (response matrix). - 3. Estimate initial DEM. - 4. Fit DEM with lmfit. - 5. Optionally run Monte Carlo ensemble. + # This orchestrates: + # 1. Build temperature grid. + # 2. Interpolate responses (response matrix). + # 3. Estimate initial DEM. + # 4. Fit DEM with lmfit. + # 5. Optionally run Monte Carlo ensemble. - Parameters - ---------- - n_knots : int, optional - Number of spline knots across logT. Default = 6. - method : str, optional - Minimization method for `lmfit.minimize`. Default = "least_squares". - run_mc : bool, optional - Whether to run Monte Carlo simulations (using self.monte_carlo_runs). - Default = True. + # Parameters + # ---------- + # n_knots : int, optional + # Number of spline knots across logT. Default = 6. + # method : str, optional + # Minimization method for `lmfit.minimize`. Default = "least_squares". + # run_mc : bool, optional + # Whether to run Monte Carlo simulations (using self.monte_carlo_runs). + # Default = True. - Returns - ------- - results : dict - Dictionary of solver outputs: - - "temperature" : log10(T) grid - - "dem" : best-fit DEM [cm^-5 K^-1] - - "dem_err" : DEM uncertainty (if MC runs > 0) - - "ifit" : fitted intensities [DN/s/pix] - - "chi2" : χ² - - "redchi2" : reduced χ² - """ - # IDL-STYLE NOSOLVE CHECk - # IDL behavior: if all observed intensities are zero (or non-positive), - # the DEM is trivially zero. Skip solving and return immediately. - if np.all(self._observed_intensities <= 0): # == 0 - warnings.warn( - "\n\n All observed intensities are zero or non-positive. " - "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" - ) + # Returns + # ------- + # results : dict + # Dictionary of solver outputs: + # - "temperature" : log10(T) grid + # - "dem" : best-fit DEM [cm^-5 K^-1] + # - "dem_err" : DEM uncertainty (if MC runs > 0) + # - "ifit" : fitted intensities [DN/s/pix] + # - "chi2" : χ² + # - "redchi2" : reduced χ² + # """ + # # IDL-STYLE NOSOLVE CHECk + # # IDL behavior: if all observed intensities are zero (or non-positive), + # # the DEM is trivially zero. Skip solving and return immediately. + # if np.all(self._observed_intensities <= 0): # == 0 + # warnings.warn( + # "\n\n All observed intensities are zero or non-positive. " + # "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" + # ) - # Ensure grid exists (IDL also returns logT_out even for nosolve) - if not hasattr(self, "logT"): - self.create_logT_grid() + # # Ensure grid exists (IDL also returns logT_out even for nosolve) + # if not hasattr(self, "logT"): + # self.create_logT_grid() - self.dem = np.zeros_like(self.logT) - self.fitted_intensities = np.zeros_like(self._observed_intensities) - self.chi2 = 0.0 - self.redchi2 = 0.0 - return self + # self.dem = np.zeros_like(self.logT) + # self.fitted_intensities = np.zeros_like(self._observed_intensities) + # self.chi2 = 0.0 + # self.redchi2 = 0.0 + # return self - # 1. Ensure grid & responses - self.create_logT_grid() - self._interpolate_responses_to_grid() + # # 1. Ensure grid & responses + # self.create_logT_grid() + # self._interpolate_responses_to_grid() - # 2. Estimate initial DEM - self._estimate_initial_dem() + # # 2. Estimate initial DEM + # self._estimate_initial_dem() - # 3. Fit DEM - result = self.fit_dem(n_knots=n_knots, method=method) + # # 3. Fit DEM + # result = self.fit_dem(n_knots=n_knots, method=method) + # result = self.fit_dem() - # 4. Monte Carlo (optional) - if run_mc and self.monte_carlo_runs > 0: - self.run_monte_carlo( - n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method - ) + # # 4. Monte Carlo (optional) + # if run_mc and self.monte_carlo_runs > 0: + # self.run_monte_carlo( + # n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method + # ) - # # 5. Bundle results - # return { - # "temperature": self.logT, - # "dem": self.dem, - # "dem_err": getattr(self, "dem_uncertainty", None), - # "ifit": self.fitted_intensities, - # "chi2": getattr(self, "chi2", None), - # "redchi2": getattr(self, "redchi2", None), - # "solver": self, - # } - return self - - def to_dict(self): - """Return solver outputs as a dictionary.""" - return { - "temperature": self.logT, - "dem": getattr(self, "dem", None), - "dem_err": getattr(self, "dem_uncertainty", None), - "ifit": getattr(self, "fitted_intensities", None), - "chi2": getattr(self, "chi2", None), - "redchi2": getattr(self, "redchi2", None), - } + # # # 5. Bundle results + # # return { + # # "temperature": self.logT, + # # "dem": self.dem, + # # "dem_err": getattr(self, "dem_uncertainty", None), + # # "ifit": self.fitted_intensities, + # # "chi2": getattr(self, "chi2", None), + # # "redchi2": getattr(self, "redchi2", None), + # # "solver": self, + # # } + # return self + + # def to_dict(self): + # """Return solver outputs as a dictionary.""" + # return { + # "temperature": self.logT, + # "dem": getattr(self, "dem", None), + # "dem_err": getattr(self, "dem_uncertainty", None), + # "ifit": getattr(self, "fitted_intensities", None), + # "chi2": getattr(self, "chi2", None), + # "redchi2": getattr(self, "redchi2", None), + # } def summary(self): """ From 6bf6adef7424d66e3217f990e4970e2630103ccc Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 20 Nov 2025 18:27:15 -0500 Subject: [PATCH 090/121] Quick clean up --- xrtpy/xrt_dem_iterative/dem_solver.py | 573 +++++++------------------- 1 file changed, 152 insertions(+), 421 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 8e312d3e1..ea1416b51 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -10,7 +10,6 @@ import numpy as np from lmfit import Parameters, minimize from scipy.interpolate import interp1d - from xrtpy.util.filters import validate_and_format_filters from xrtpy.xrt_dem_iterative import dem_plotting @@ -909,16 +908,14 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # xrt_dem_iter_estim ultimately does: # dem = 0.0*findgen(nt) + 1.0 ; Use flat dem for initial guess # on a regular logT grid. We mirror that here exactly: - - #est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 1.0 NOV20 - - - #est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 0.0 #NOTEFORJOY - #or - est_log_dem_on_grid = np.zeros_like(self.logT) - #Return the intial first guessed DEM + # est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 1.0 NOV20 + # est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 0.0 #NOTEFORJOY + # or + est_log_dem_on_grid = np.zeros_like(self.logT) + + # Return the intial first guessed DEM # Store for later use by the solver self._initial_log_dem = est_log_dem_on_grid @@ -959,7 +956,7 @@ def _prepare_spline_system(self): self.weights = np.ones(n_line, dtype=float) self.abundances = np.ones(n_line, dtype=float) - # pm_matrix = R(T) * T * dlnT (IDL line: emis * 10^t * alog(10^dt)) + # pm_matrix = R(T) * T * dlnT (IDL line: emis * 10^t * alog(10^dt)) # units - DN/s/pix/cm^5 * K * dLnT * DEM == DN/s/PIX T_linear = self.T.to_value(u.K) self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) @@ -972,11 +969,11 @@ def _prepare_spline_system(self): interp_init = interp1d( self.logT, self._initial_log_dem, # IDL is flat logDEM = 1.0 - kind="linear", #IDL uses a cubic spline later NOTEFORJOY NOV24 + kind="linear", # IDL uses a cubic spline later NOTEFORJOY NOV24 bounds_error=False, fill_value="extrapolate", ) - + self.spline_log_dem = interp_init(self.spline_logT) def _build_lmfit_parameters(self): @@ -994,14 +991,13 @@ def _build_lmfit_parameters(self): params.add( f"knot_{i}", value=float(self.spline_log_dem[i]), - min = -20.0, - max = 0.0, + min=-20.0, + max=0.0, vary=True, ) return params - def _reconstruct_dem_from_knots(self, params): """ Construct DEM(T) on self.logT using spline of log10(DEM) at knot positions. @@ -1020,14 +1016,13 @@ def _reconstruct_dem_from_knots(self, params): # log_dem = interp_spline(self.logT) # log10(DEM) # dem = 10.0**log_dem # DEM in linear cm^-5 K^-1 - - #Or used the code above but switch from linear to kind="cubic" + + # Or used the code above but switch from linear to kind="cubic" cs = CubicSpline(self.spline_logT, knot_vals, bc_type="natural") log_dem = cs(self.logT) - dem = 10.0 ** log_dem + dem = 10.0**log_dem return dem - def _residuals(self, params): """ IDL equivalent of mpdemfunct. @@ -1056,314 +1051,6 @@ def _residuals(self, params): return residuals - # def _build_lmfit_parameters(self, n_knots: int = 6): - # """ - # Build lmfit.Parameters for the DEM spline knots. - - # Parameters - # ---------- - # n_knots : int, optional - # Number of spline knots across the logT grid. Default = 6. - - # Returns - # ------- - # params : lmfit.Parameters - # Parameters object containing log10(DEM/normalization_factor) values at knot points. - # Each parameter is named "knot_i" where i = 0..n_knots-1. - - # Notes - # ----- - # - IDL's `xrt_dem_iterative2.pro` seeds its fit by taking DEM estimates - # at peak response temperatures and spreading them across the grid. - # - Here, we select evenly spaced knots across the solver's logT range. - # - The stored value at each knot is: - # log10(DEM / normalization_factor) - # where `normalization_factor` is typically 1e17. - # - Bounds can be applied to prevent extreme DEM excursions if desired. - # """ - # if not hasattr(self, "_initial_log_dem"): - # raise AttributeError( - # "Initial DEM not available. Run _estimate_initial_dem() first." - # ) - - # # Choose evenly spaced knot positions across logT range - # knot_positions = np.linspace( - # self._minimum_bound_temperature, self._maximum_bound_temperature, n_knots - # ) - # self._knot_positions = knot_positions # store for later reconstruction - - # # Interpolate initial DEM estimate at these knot positions - # interp_func = interp1d( - # self.logT, - # self._initial_log_dem, - # kind="linear", - # bounds_error=False, - # fill_value="extrapolate", - # ) - # init_log_dem_at_knots = interp_func(knot_positions) - - # # Convert to log10(DEM/normalization_factor) - # init_scaled = init_log_dem_at_knots - np.log10(self._normalization_factor) - - # # Build lmfit Parameters - # params = Parameters() - # for i, val in enumerate(init_scaled): - # params.add( - # name=f"knot_{i}", - # value=val, - # min=-10, # optional bounds: avoid absurdly low - # max=50, # optional bounds: avoid absurdly high - # vary=True, - # ) - - # self._init_knot_params = params - # return params - - # def _reconstruct_dem_from_knots(self, params) -> np.ndarray: - # """ - # Reconstruct the DEM curve on the solver's logT grid from spline knot parameters. - - # Parameters - # ---------- - # params : lmfit.Parameters - # Knot parameters where each value is log10(DEM / normalization_factor). - - # Returns - # ------- - # dem_grid : ndarray - # DEM values on `self.logT` grid in linear space [cm^-5 K^-1]. - - # Notes - # ----- - # - Knot positions are stored in `self._knot_positions` when - # `_build_lmfit_parameters()` is called. - # - The stored parameter values are log10(DEM/normalization_factor). - # - Conversion back to DEM: - # DEM = normalization_factor * 10^(interp(log10 DEM/normalization_factor)) - # - Interpolation is linear in log space, as in IDL's `xrt_dem_iterative2.pro`. - # """ - # if not hasattr(self, "_knot_positions"): - # raise AttributeError( - # "Knot positions not found. Run _build_lmfit_parameters() first." - # ) - - # # Extract knot values from parameters (log10(DEM/normalization_factor)) - # knot_vals = np.array( - # [params[f"knot_{i}"].value for i in range(len(self._knot_positions))] - # ) - - # # Interpolate across solver grid in log space - # interp_func = interp1d( - # self._knot_positions, - # knot_vals, - # kind="linear", - # bounds_error=False, - # fill_value="extrapolate", - # ) - # log_dem_scaled = interp_func(self.logT) - - # # Convert back to DEM [cm^-5 K^-1] - # dem_grid = self._normalization_factor * ( - # 10.0**log_dem_scaled - # ) ## dem_grid now back in physical units - - # return dem_grid - - # # self._iteration_chi2 = [] - - # # def _residuals(self, params) -> np.ndarray: - # # """ - # # Residuals function for DEM fitting. - - # # Parameters - # # ---------- - # # params : lmfit.Parameters - # # Knot parameters, each storing log10(DEM / normalization_factor). - - # # Returns - # # ------- - # # residuals : ndarray - # # Vector of normalized residuals for each observed channel: - # # (I_obs - I_calc) / sigma - # # Shape = (n_filters,) - - # # Notes - # # ----- - # # - This is the core of the DEM solver. It reconstructs the DEM curve - # # from spline knot parameters, computes modeled intensities by - # # integrating DEM * Response over temperature, and returns the - # # residuals relative to observations. - # # - Integration is done using midpoint trapezoid approximation: - # # I_calc[i] = sum_j DEM_mid[j] * R_mid[i,j] * T_mid[j] * dlnT - # # """ - # # # 1. Reconstruct DEM on the grid - # # dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] - - # # # 2. Prepare midpoint arrays - # # dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) - # # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - # # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - - # # # 3. Compute modeled intensities - # # # Shape: (n_filters,) - # # I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - - # # # 4. Residuals: normalize by observational errors - # # # sigma = self.intensity_errors.to_value(u.DN / u.s) # ensure numeric - # # # residuals = (self._observed_intensities - I_calc) / sigma - # # # Use MC-perturbed intensities if present; otherwise the originals - - # # y_obs = getattr(self, "_active_observed_intensities", self._observed_intensities) - # # sigma = self.intensity_errors.to_value(u.DN / u.s) # numeric - # # residuals = (y_obs - I_calc) / sigma - - # # residuals = (y_obs - I_calc) / sigma - # # chi2_val = np.sum(residuals**2) - - # # # Log χ² per iteration - # # if not hasattr(self, "_iteration_chi2"): - # # self._iteration_chi2 = [] - # # self._iteration_chi2.append(chi2_val) - - # # return residuals - # def _residuals(self, params) -> np.ndarray: - # """ - # Residuals function for DEM fitting. - - # Returns - # ------- - # residuals : ndarray - # (I_obs - I_calc) / sigma, one per filter. - # """ - # # 1. Reconstruct DEM on the grid - # dem_grid = self._reconstruct_dem_from_knots(params) # [cm^-5 K^-1] - - # # 2. Midpoint integration setup - # dem_mid = 0.5 * (dem_grid[:-1] + dem_grid[1:]) - # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - - # # 3. Modeled intensities - # I_calc = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - - # # # 4. Residuals: normalize by observational errors - # # y_obs = getattr( - # # self, "_active_observed_intensities", self._observed_intensities - # # ) - # # sigma = self.intensity_errors.to_value(u.DN / u.s) - # # residuals = (y_obs - I_calc) / sigma - - # # # 5. Track χ² per iteration - # # chi2_val = np.sum(residuals**2) - # # if not hasattr(self, "_iteration_chi2"): - # # self._iteration_chi2 = [] - # # self._iteration_chi2.append(chi2_val) - - # # return residuals - - # # 4. Use either base or MC-perturbed observed intensities (physical units) - # y_obs_phys = getattr( - # self, "_active_observed_intensities", self._observed_intensities - # ) - # sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - - # # 5. Apply normalization_factor in an IDL-like way: - # # scale data, model, and errors by the same factor. - # # (This keeps residuals numerically identical, but keeps - # # internal numbers closer to order unity if desired.) - # nf = self._normalization_factor - - # y_scaled = y_obs_phys / nf - # I_calc_scaled = I_calc / nf - # sigma_scaled = sigma_phys / nf - - # residuals = (y_scaled - I_calc_scaled) / sigma_scaled - - # # 6. Track χ² per iteration (in scaled space — same χ² as unscaled) - # chi2_val = np.sum(residuals**2) - # if not hasattr(self, "_iteration_chi2"): - # self._iteration_chi2 = [] - # self._iteration_chi2.append(chi2_val) - - # return residuals - - # # def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): - # # """ - # # Fit the DEM using lmfit to minimize residuals. - - # # Parameters - # # ---------- - # # n_knots : int, optional - # # Number of spline knots across the logT grid. Default = 6. - # # method : str, optional - # # Minimization method passed to `lmfit.minimize`. - # # Common choices: "least_squares", "leastsq", "nelder". - # # Default = "least_squares". - # # **kwargs : dict - # # Additional keyword arguments forwarded to `lmfit.minimize`. - - # # Returns - # # ------- - # # result : lmfit.MinimizerResult - # # The lmfit result object containing fit information. - - # # Side Effects - # # ------------ - # # On successful fit, stores: - # # - self.dem : ndarray - # # Best-fit DEM(T) on self.logT [cm^-5 K^-1]. - # # - self.fitted_intensities : ndarray - # # Modeled intensities [DN/s/pix] for each filter. - # # - self.chi2 : float - # # Chi-squared (sum of squared residuals). - # # - self.redchi2 : float - # # Reduced chi-squared, normalized by (Nobs - Nparams). - - # # Notes - # # ----- - # # This method automatically builds the logT grid, interpolates - # # responses, and estimates an initial DEM if not already done. - # # """ - - # # # --- Auto-prepare prerequisites --- - # # if not hasattr(self, "logT") or not hasattr(self, "T"): - # # self.create_logT_grid() - - # # if not hasattr(self, "_response_matrix"): - # # self._interpolate_responses_to_grid() - - # # if not hasattr(self, "_initial_log_dem"): - # # self._estimate_initial_dem() - - # # self._last_n_knots = n_knots #Used for print in the summary function - - # # # 1. Build initial knot parameters - # # params = self._build_lmfit_parameters(n_knots=n_knots) - - # # # 2. Run minimization - # # result = minimize(self._residuals, params, method=method, **kwargs) - - # # # 3. On success, reconstruct DEM and fitted intensities - # # best_dem = self._reconstruct_dem_from_knots(result.params) - # # self.dem = best_dem # [cm^-5 K^-1] - - # # # Compute fitted intensities using midpoint integration - # # dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) - # # R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - # # T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - # # I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - - # # self.fitted_intensities = I_fit # [DN/s/pix] - # # sigma = self.intensity_errors.to_value(u.DN / u.s) - # # residuals = (self._observed_intensities - I_fit) / sigma - - # # # Chi-squared metrics - # # self.chi2 = np.sum(residuals**2) - # # dof = len(self._observed_intensities) - len(result.params) - # # self.redchi2 = self.chi2 / max(dof, 1) - - # # return result - def _solve_single_dem(self, observed_intensities_vals: np.ndarray): """ Solve the DEM once for a given set of observed intensities (base or MC-perturbed). @@ -1422,6 +1109,13 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): return dem_phys, modeled_intensities_phys, chisq, result + # ------------------------------------------------------------------------------------------------------------------------------- + + ################################################################################################################################ + ################################################################################################################################ + #############************************** Start of DEM SOLVER **************************########################## + ################## ##################### + def solve(self): """ High-level DEM solver. @@ -1503,12 +1197,128 @@ def solve(self): # Finished return self.dem + # #NOTEFORJOY NOV 19 + # def solve( + # self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True + # ): + # """ + # Run the full DEM solver, IDL-style. + + # This orchestrates: + # 1. Build temperature grid. + # 2. Interpolate responses (response matrix). + # 3. Estimate initial DEM. + # 4. Fit DEM with lmfit. + # 5. Optionally run Monte Carlo ensemble. + + # Parameters + # ---------- + # n_knots : int, optional + # Number of spline knots across logT. Default = 6. + # method : str, optional + # Minimization method for `lmfit.minimize`. Default = "least_squares". + # run_mc : bool, optional + # Whether to run Monte Carlo simulations (using self.monte_carlo_runs). + # Default = True. + + # Returns + # ------- + # results : dict + # Dictionary of solver outputs: + # - "temperature" : log10(T) grid + # - "dem" : best-fit DEM [cm^-5 K^-1] + # - "dem_err" : DEM uncertainty (if MC runs > 0) + # - "ifit" : fitted intensities [DN/s/pix] + # - "chi2" : χ² + # - "redchi2" : reduced χ² + # """ + # # IDL-STYLE NOSOLVE CHECk + # # IDL behavior: if all observed intensities are zero (or non-positive), + # # the DEM is trivially zero. Skip solving and return immediately. + # if np.all(self._observed_intensities <= 0): # == 0 + # warnings.warn( + # "\n\n All observed intensities are zero or non-positive. " + # "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" + # ) + + # # Ensure grid exists (IDL also returns logT_out even for nosolve) + # if not hasattr(self, "logT"): + # self.create_logT_grid() + + # self.dem = np.zeros_like(self.logT) + # self.fitted_intensities = np.zeros_like(self._observed_intensities) + # self.chi2 = 0.0 + # self.redchi2 = 0.0 + # return self + + # # 1. Ensure grid & responses + # self.create_logT_grid() + # self._interpolate_responses_to_grid() + + # # 2. Estimate initial DEM + # self._estimate_initial_dem() + + # # 3. Fit DEM + # result = self.fit_dem(n_knots=n_knots, method=method) + # result = self.fit_dem() + + # # 4. Monte Carlo (optional) + # if run_mc and self.monte_carlo_runs > 0: + # self.run_monte_carlo( + # n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method + # ) + + # # # 5. Bundle results + # # return { + # # "temperature": self.logT, + # # "dem": self.dem, + # # "dem_err": getattr(self, "dem_uncertainty", None), + # # "ifit": self.fitted_intensities, + # # "chi2": getattr(self, "chi2", None), + # # "redchi2": getattr(self, "redchi2", None), + # # "solver": self, + # # } + # return self + + # def to_dict(self): + # """Return solver outputs as a dictionary.""" + # return { + # "temperature": self.logT, + # "dem": getattr(self, "dem", None), + # "dem_err": getattr(self, "dem_uncertainty", None), + # "ifit": getattr(self, "fitted_intensities", None), + # "chi2": getattr(self, "chi2", None), + # "redchi2": getattr(self, "redchi2", None), + # } + + ################################################################################################################################ + ################################################################################################################################ + #############************************** END of DEM SOLVER **************************########################## + ################################################################################################################################ + ################################################################################################################################ + + # ------------------------------------------------------------------------------------------------------------------------------- + + ################################################################################################################################ + ################################################################################################################################ + #############************************** END of **************************########################## + ################################################################################################################################ + ################################################################################################################################ + + # ------------------------------------------------------------------------------------------------------------------------------- + + ################################################################################################################################ + ################################################################################################################################ + #############************************** Start of error bars / Monte Carlo **************************########################## + ################## ##################### + ################################################################################################################################ ################################################################################################################################ #############************************** END of **************************########################## - ################################################################################################################################# + ################################################################################################################################ + ################################################################################################################################ - # ---------------------------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------------------------------------------------------------------------------------- def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): """ @@ -1682,99 +1492,12 @@ def run_monte_carlo( return dem_ensemble - # #NOTEFORJOY NOV 19 - # def solve( - # self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True - # ): - # """ - # Run the full DEM solver, IDL-style. - - # This orchestrates: - # 1. Build temperature grid. - # 2. Interpolate responses (response matrix). - # 3. Estimate initial DEM. - # 4. Fit DEM with lmfit. - # 5. Optionally run Monte Carlo ensemble. - - # Parameters - # ---------- - # n_knots : int, optional - # Number of spline knots across logT. Default = 6. - # method : str, optional - # Minimization method for `lmfit.minimize`. Default = "least_squares". - # run_mc : bool, optional - # Whether to run Monte Carlo simulations (using self.monte_carlo_runs). - # Default = True. - - # Returns - # ------- - # results : dict - # Dictionary of solver outputs: - # - "temperature" : log10(T) grid - # - "dem" : best-fit DEM [cm^-5 K^-1] - # - "dem_err" : DEM uncertainty (if MC runs > 0) - # - "ifit" : fitted intensities [DN/s/pix] - # - "chi2" : χ² - # - "redchi2" : reduced χ² - # """ - # # IDL-STYLE NOSOLVE CHECk - # # IDL behavior: if all observed intensities are zero (or non-positive), - # # the DEM is trivially zero. Skip solving and return immediately. - # if np.all(self._observed_intensities <= 0): # == 0 - # warnings.warn( - # "\n\n All observed intensities are zero or non-positive. " - # "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" - # ) - - # # Ensure grid exists (IDL also returns logT_out even for nosolve) - # if not hasattr(self, "logT"): - # self.create_logT_grid() - - # self.dem = np.zeros_like(self.logT) - # self.fitted_intensities = np.zeros_like(self._observed_intensities) - # self.chi2 = 0.0 - # self.redchi2 = 0.0 - # return self - - # # 1. Ensure grid & responses - # self.create_logT_grid() - # self._interpolate_responses_to_grid() - - # # 2. Estimate initial DEM - # self._estimate_initial_dem() - - # # 3. Fit DEM - # result = self.fit_dem(n_knots=n_knots, method=method) - # result = self.fit_dem() - - # # 4. Monte Carlo (optional) - # if run_mc and self.monte_carlo_runs > 0: - # self.run_monte_carlo( - # n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method - # ) - - # # # 5. Bundle results - # # return { - # # "temperature": self.logT, - # # "dem": self.dem, - # # "dem_err": getattr(self, "dem_uncertainty", None), - # # "ifit": self.fitted_intensities, - # # "chi2": getattr(self, "chi2", None), - # # "redchi2": getattr(self, "redchi2", None), - # # "solver": self, - # # } - # return self + # ------------------------------------------------------------------------------------------------------------------------------- - # def to_dict(self): - # """Return solver outputs as a dictionary.""" - # return { - # "temperature": self.logT, - # "dem": getattr(self, "dem", None), - # "dem_err": getattr(self, "dem_uncertainty", None), - # "ifit": getattr(self, "fitted_intensities", None), - # "chi2": getattr(self, "chi2", None), - # "redchi2": getattr(self, "redchi2", None), - # } + ################################################################################################################################ + ################################################################################################################################ + #############************************** Start of error bars / Monte Carlo **************************########################## + ################## ##################### def summary(self): """ @@ -1884,6 +1607,14 @@ def summary(self): print("=" * 65) + ################################################################################################################################ + ################################################################################################################################ + #############************************** END of **************************########################## + ################################################################################################################################ + ################################################################################################################################ + + # ------------------------------------------------------------------------------------------------------------------------------- + # Attach plotting functions from plotting.py to the class XRTDEMIterative.plot_dem_results = dem_plotting.plot_dem_results From 22e92a636a60d58880fffcf241787519c8c2e21f Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 20 Nov 2025 20:04:23 -0500 Subject: [PATCH 091/121] Working on the MC' --- xrtpy/xrt_dem_iterative/dem_solver.py | 1090 ++++++++++++++----------- 1 file changed, 632 insertions(+), 458 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index ea1416b51..38d837a60 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -1109,520 +1109,694 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): return dem_phys, modeled_intensities_phys, chisq, result + ################################################################################################################################ + ################################################################################################################################ + #############************************** **************************########################## + ################################################################################################################################ + ################################################################################################################################ + # ------------------------------------------------------------------------------------------------------------------------------- ################################################################################################################################ ################################################################################################################################ - #############************************** Start of DEM SOLVER **************************########################## + #############************************** Start of error bars / Monte Carlo **************************########################## ################## ##################### - def solve(self): + def _run_monte_carlo(self, result_params): """ - High-level DEM solver. - - Mirrors IDL's xrt_dem_iterative2: - - validates inputs - - prepares temperature grid & responses - - solves for base DEM - - optionally runs Monte Carlo iterations - - After this call, the following attributes are set: - - self.logT : log10(T) grid (same as self.logT from create_logT_grid) - self.dem : base DEM(T) [cm^-5 K^-1] - self.chisq : χ² for base solution - self.modeled_intensities : modeled DN/s/pix for base solution - - If monte_carlo_runs > 0: - - self.dem_mc : array shape (n_runs+1, n_T). 0 = base, 1..N = MC. - self.chisq_mc : array shape (n_runs+1,). - self.obs_mc : array shape (n_runs+1, n_channels) of observed intensities. - self.mod_mc : array shape (n_runs+1, n_channels) of modeled intensities. + Replicates IDL's Monte Carlo loop. + Produces: + - self.mc_dem shape (n_T, N+1) + - self.mc_base_obs shape (n_obs, N+1) + - self.mc_mod_obs shape (n_obs, N+1) + - self.mc_chisq shape (N+1,) """ - # 0) Basic validation & preparation - self.validate_inputs() - # Temperature grid and responses - self.create_logT_grid() - self._interpolate_responses_to_grid() + n_obs = len(self._observed_intensities) + nT = len(self.logT) + N = self._monte_carlo_runs - # We don't scale intensities here; scaling will be done per run - base_obs_phys = self._observed_intensities.astype(float) # DN/s (no units) + # Prepare arrays + mc_dem = np.zeros((nT, N + 1)) + mc_base = np.zeros((n_obs, N + 1)) + mc_mod = np.zeros((n_obs, N + 1)) + mc_chi = np.zeros(N + 1) - # 1) Base DEM solution - dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( - observed_intensities_vals=base_obs_phys - ) - - # Store base solution - self.logT_solution = self.logT.copy() - self.dem = dem_base # [cm^-5 K^-1] - self.chisq = chisq_base - self.modeled_intensities = mod_base # DN/s/pix - - # For convenience: store base as the first "MC" entry - n_T = self.logT.size - n_ch = base_obs_phys.size - n_runs = self.monte_carlo_runs + # --- Base run first (IDL puts real data in column 0) --- + dem = self.dem # already scaled back by normalization + mc_dem[:, 0] = dem + mc_base[:, 0] = self._observed_intensities # unscaled + mc_mod[:, 0] = self.modeled_intensities # unscaled + mc_chi[0] = self.current_chi2 - self.dem_mc = np.zeros((n_runs + 1, n_T), dtype=float) - self.chisq_mc = np.zeros((n_runs + 1,), dtype=float) - self.obs_mc = np.zeros((n_runs + 1, n_ch), dtype=float) - self.mod_mc = np.zeros((n_runs + 1, n_ch), dtype=float) + # --- Run the MC loops --- + rng = np.random.default_rng() # like systime(1) - self.dem_mc[0, :] = dem_base - self.chisq_mc[0] = chisq_base - self.obs_mc[0, :] = base_obs_phys - self.mod_mc[0, :] = mod_base - - # 2) Monte Carlo runs (if requested) - if n_runs > 0: - rng = np.random.default_rng() - sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - - for ii in range(1, n_runs + 1): - # Perturb observed intensities (IDL: i_obs + randomn * i_err, clipped at 0) - noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) - obs_pert = base_obs_phys + noise - obs_pert = np.maximum(obs_pert, 0.0) - - dem_i, mod_i, chisq_i, _ = self._solve_single_dem(obs_pert) + for ii in range(1, N + 1): - self.dem_mc[ii, :] = dem_i - self.chisq_mc[ii] = chisq_i - self.obs_mc[ii, :] = obs_pert - self.mod_mc[ii, :] = mod_i - - # Finished - return self.dem - - # #NOTEFORJOY NOV 19 - # def solve( - # self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True - # ): - # """ - # Run the full DEM solver, IDL-style. - - # This orchestrates: - # 1. Build temperature grid. - # 2. Interpolate responses (response matrix). - # 3. Estimate initial DEM. - # 4. Fit DEM with lmfit. - # 5. Optionally run Monte Carlo ensemble. - - # Parameters - # ---------- - # n_knots : int, optional - # Number of spline knots across logT. Default = 6. - # method : str, optional - # Minimization method for `lmfit.minimize`. Default = "least_squares". - # run_mc : bool, optional - # Whether to run Monte Carlo simulations (using self.monte_carlo_runs). - # Default = True. - - # Returns - # ------- - # results : dict - # Dictionary of solver outputs: - # - "temperature" : log10(T) grid - # - "dem" : best-fit DEM [cm^-5 K^-1] - # - "dem_err" : DEM uncertainty (if MC runs > 0) - # - "ifit" : fitted intensities [DN/s/pix] - # - "chi2" : χ² - # - "redchi2" : reduced χ² - # """ - # # IDL-STYLE NOSOLVE CHECk - # # IDL behavior: if all observed intensities are zero (or non-positive), - # # the DEM is trivially zero. Skip solving and return immediately. - # if np.all(self._observed_intensities <= 0): # == 0 - # warnings.warn( - # "\n\n All observed intensities are zero or non-positive. " - # "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" - # ) - - # # Ensure grid exists (IDL also returns logT_out even for nosolve) - # if not hasattr(self, "logT"): - # self.create_logT_grid() + # Step 1: Perturbed intensities (scaled) + perturbed = ( + self.intensities_scaled + + rng.normal(size=n_obs) * self.sigma_scaled_intensity_errors + ) + perturbed = np.clip(perturbed, 0, None) - # self.dem = np.zeros_like(self.logT) - # self.fitted_intensities = np.zeros_like(self._observed_intensities) - # self.chi2 = 0.0 - # self.redchi2 = 0.0 - # return self + # Store unscaled in mc_base + mc_base[:, ii] = perturbed * self.normalization_factor - # # 1. Ensure grid & responses - # self.create_logT_grid() - # self._interpolate_responses_to_grid() + # If all zero → nosolve=True + if np.all(perturbed == 0): + mc_dem[:, ii] = 0.0 + mc_mod[:, ii] = 0.0 + mc_chi[ii] = 0.0 + continue - # # 2. Estimate initial DEM - # self._estimate_initial_dem() + # Step 2: assign perturbed intensities + self.intensities_scaled = perturbed - # # 3. Fit DEM - # result = self.fit_dem(n_knots=n_knots, method=method) - # result = self.fit_dem() + # Step 3: Rebuild spline system + self._prepare_spline_system() - # # 4. Monte Carlo (optional) - # if run_mc and self.monte_carlo_runs > 0: - # self.run_monte_carlo( - # n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method - # ) + # Step 4: Solve via lmfit + params = self._build_lmfit_parameters() + out = minimize(self._residuals, params, max_nfev=self.max_iterations) - # # # 5. Bundle results - # # return { - # # "temperature": self.logT, - # # "dem": self.dem, - # # "dem_err": getattr(self, "dem_uncertainty", None), - # # "ifit": self.fitted_intensities, - # # "chi2": getattr(self, "chi2", None), - # # "redchi2": getattr(self, "redchi2", None), - # # "solver": self, - # # } - # return self - - # def to_dict(self): - # """Return solver outputs as a dictionary.""" - # return { - # "temperature": self.logT, - # "dem": getattr(self, "dem", None), - # "dem_err": getattr(self, "dem_uncertainty", None), - # "ifit": getattr(self, "fitted_intensities", None), - # "chi2": getattr(self, "chi2", None), - # "redchi2": getattr(self, "redchi2", None), - # } + # Step 5: Reconstruct DEM + dem = self._reconstruct_dem_from_knots(out.params) + dem_scaled = dem * self.normalization_factor # unscale + mc_dem[:, ii] = dem_scaled - ################################################################################################################################ - ################################################################################################################################ - #############************************** END of DEM SOLVER **************************########################## - ################################################################################################################################ - ################################################################################################################################ + # Step 6: Compute modeled intensities + modeled = (self.pm_matrix @ dem) * self.abundances + mc_mod[:, ii] = modeled * self.normalization_factor - # ------------------------------------------------------------------------------------------------------------------------------- + # Step 7: Compute chi-square + resid = self._residuals(out.params) + mc_chi[ii] = np.sum(resid**2) - ################################################################################################################################ - ################################################################################################################################ - #############************************** END of **************************########################## - ################################################################################################################################ - ################################################################################################################################ + # store results + self.mc_dem = mc_dem + self.mc_base_obs = mc_base + self.mc_mod_obs = mc_mod + self.mc_chisq = mc_chi # ------------------------------------------------------------------------------------------------------------------------------- ################################################################################################################################ ################################################################################################################################ - #############************************** Start of error bars / Monte Carlo **************************########################## + #############************************** Start of DEM SOLVER **************************########################## ################## ##################### - ################################################################################################################################ - ################################################################################################################################ - #############************************** END of **************************########################## - ################################################################################################################################ - ################################################################################################################################ - - # ------------------------------------------------------------------------------------------------------------------------------- - - def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): - """ - Fit the DEM using lmfit to minimize residuals. - Tracks chi² per iteration (like IDL's XRT_ITER_DEMSTAT). - """ - - # --- Auto-prepare prerequisites --- - if not hasattr(self, "logT") or not hasattr(self, "T"): - self.create_logT_grid() - if not hasattr(self, "_response_matrix"): - self._interpolate_responses_to_grid() - if not hasattr(self, "_initial_log_dem"): - self._estimate_initial_dem() - - self._last_n_knots = n_knots # for summary() - - # Storage for iteration statistics - self._iter_stats = {"chisq": [], "iteration": []} - - def _callback(params, iter, resid, *args, **kwargs): - # Compute chi² at this iteration - chi2 = np.sum(resid**2) - self._iter_stats["chisq"].append(chi2) - self._iter_stats["iteration"].append(iter) - - # 1. Build initial knot parameters - params = self._build_lmfit_parameters(n_knots=n_knots) - - # 2. Run minimization - result = minimize( - self._residuals, - params, - method=method, - iter_cb=_callback, # <-- track stats - max_nfev=self.max_iterations, - **kwargs, - ) - - # 3. On success, reconstruct DEM + fitted intensities - best_dem = self._reconstruct_dem_from_knots(result.params) - self.dem = best_dem - - dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) - R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - - self.fitted_intensities = I_fit - sigma = self.intensity_errors.to_value(u.DN / u.s) - residuals = (self._observed_intensities - I_fit) / sigma - - # Chi^2 metrics - self.chi2 = np.sum(residuals**2) - dof = len(self._observed_intensities) - len(result.params) - self.redchi2 = self.chi2 / max(dof, 1) - self.dof = dof # save for summary - - return result - - def fit_with_multiple_methods( - self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs - ): + def solve(self): """ - Try multiple lmfit minimization methods and pick the best χ². - - Parameters - ---------- - methods : tuple of str, optional - Minimization methods to test. Default = ("leastsq", "least_squares", "nelder"). - n_knots : int, optional - Number of spline knots for DEM fit. Default = 6. - **kwargs : dict - Extra arguments passed to `lmfit.minimize`. + High-level DEM solver. - Returns - ------- - best_result : lmfit.MinimizerResult - Result from the method with lowest chi². + Replicates IDL’s xrt_dem_iterative2.pro behavior: + 1. Validate inputs + 2. Prepare logT grid and interpolate responses + 3. Solve ONE base DEM using original intensities + 4. If Monte Carlo requested, perform N perturbed solves + 5. Store all arrays cleanly for plotting and analysis + + After calling solve(), the following attributes exist: + + Base solution: + - self.logT (temperature grid) + - self.dem (DEM(T) in cm^-5 K^-1) + - self.chisq (chi-square of base fit) + - self.modeled_intensities + + Monte Carlo products (N = monte_carlo_runs, N>=0): + - self.mc_dem shape = (N+1, n_T) + - self.mc_chisq shape = (N+1,) + - self.mc_base_obs shape = (N+1, n_filters) + - self.mc_mod_obs shape = (N+1, n_filters) + + Column 0 always holds the BASE solution (unperturbed). + Columns 1..N hold Monte Carlo solutions. """ - if not hasattr(self, "_initial_log_dem"): - self._estimate_initial_dem() - - results = {} - best_chi2 = np.inf - best_result = None - best_method = None - - for method in methods: - print(f"\n>>> Trying method: {method}") - params = self._build_lmfit_parameters(n_knots=n_knots) - result = minimize(self._residuals, params, method=method, **kwargs) - - # Compute DEM + chi square for this fit - # SELFNOTEJOY - output currently does not have units. unts=cm^5 * K^-1 Make this a test - dem = self._reconstruct_dem_from_knots( - result.params - ) # SELFNOTEJOY - here is the stamp to defining the DEM - triple check - dem_mid = 0.5 * (dem[:-1] + dem[1:]) - R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - - sigma = self.intensity_errors.to_value(u.DN / u.s) - residuals = (self._observed_intensities - I_fit) / sigma - chi2 = np.sum(residuals**2) - - print(f"x square = {chi2:.3e}") - - results[method] = (result, chi2) - - if chi2 < best_chi2: - best_chi2 = chi2 - best_result = result - best_method = method - - print(f"\n>>> Best method: {best_method} with x square = {best_chi2:.3e}") - - # Store outputs from the best fit - best_dem = self._reconstruct_dem_from_knots(best_result.params) - self.dem = best_dem - dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) - R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) - T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) - self.fitted_intensities = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - sigma = self.intensity_errors.to_value(u.DN / u.s) - residuals = (self._observed_intensities - self.fitted_intensities) / sigma - self.chi2 = np.sum(residuals**2) - dof = len(self._observed_intensities) - len(best_result.params) - self.redchi2 = self.chi2 / max(dof, 1) - - return best_result - - def run_monte_carlo( - self, n_runs=None, n_knots=6, method="least_squares", random_seed=None - ): - from tqdm import tqdm # add this at top of file - - if n_runs is None: - n_runs = self._monte_carlo_runs - if n_runs <= 0: - raise ValueError("Monte Carlo runs disabled (n_runs=0).") - - if random_seed is not None: - np.random.seed(random_seed) - - sigma = self.intensity_errors.to_value(u.DN / u.s) - dem_ensemble = [] - - self._last_n_knots = n_knots + # 0) Validate inputs ------------------------------------------------------- + self.validate_inputs() - # --- progress bar - for i in tqdm(range(n_runs), desc="Monte Carlo DEM fits", unit="run"): - noisy_obs = self._observed_intensities + np.random.normal(0, sigma) - self._active_observed_intensities = noisy_obs - try: - params = self._build_lmfit_parameters(n_knots=n_knots) - result = minimize(self._residuals, params, method=method) - finally: - if hasattr(self, "_active_observed_intensities"): - delattr(self, "_active_observed_intensities") + # 1) Build temperature grid and response matrix -------------------------- + self.create_logT_grid() + self._interpolate_responses_to_grid() - dem_i = self._reconstruct_dem_from_knots(result.params) - dem_ensemble.append(dem_i) + # Base observed intensities (physical DN/s/pix) + base_obs_phys = self._observed_intensities.astype(float) - dem_ensemble = np.array(dem_ensemble) - self._dem_ensemble = dem_ensemble - self.dem_uncertainty = np.std(dem_ensemble, axis=0) - self.dem_median = np.median(dem_ensemble, axis=0) + # 2) Solve base DEM ------------------------------------------------------- + dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( + observed_intensities_vals=base_obs_phys + ) - return dem_ensemble + # Store base solution + self.logT_solution = self.logT.copy() + self.dem = dem_base + self.chisq = chisq_base + self.modeled_intensities = mod_base - # ------------------------------------------------------------------------------------------------------------------------------- + # 3) Allocate Monte Carlo arrays ------------------------------------------ + n_T = self.logT.size + n_ch = base_obs_phys.size + N = self.monte_carlo_runs - ################################################################################################################################ - ################################################################################################################################ - #############************************** Start of error bars / Monte Carlo **************************########################## - ################## ##################### + self.mc_dem = np.zeros((N + 1, n_T), dtype=float) + self.mc_chisq = np.zeros((N + 1,), dtype=float) + self.mc_base_obs = np.zeros((N + 1, n_ch), dtype=float) + self.mc_mod_obs = np.zeros((N + 1, n_ch), dtype=float) - def summary(self): - """ - Print a comprehensive summary of the DEM solver setup, - including inputs, solver configuration, fit results, - Monte Carlo ensemble status, and available plotting helpers. - """ - print("\nXRTpy DEM Iterative Setup Summary\n") - print("=" * 65) + # Column 0 = base solution + self.mc_dem[0, :] = dem_base + self.mc_chisq[0] = chisq_base + self.mc_base_obs[0, :] = base_obs_phys + self.mc_mod_obs[0, :] = mod_base - # Filters & Observations - print(f" Filters: {self.filter_names}") - print(f" Observed Intensities: {self.observed_intensities}") - print(f" Number of channels: {len(self._observed_intensities)}") + # 4) Monte Carlo Loop ----------------------------------------------------- + if N > 0: + rng = np.random.default_rng() + sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - # Errors - print(f" Intensity Errors: {self.intensity_errors}") - if self._intensity_errors is not None: - print(" Error model used: User-provided") - else: - print( - f" Error model used: Auto-estimated " - f"(obs * 0.03, min={self.min_observational_error.value} DN/s)" - ) - print(" [IDL reference: xrt_dem_iterative2.pro]") + for ii in range(1, N + 1): - # Temperature grid - print( - f" Temperature grid: logT {self.minimum_bound_temperature:.2f}–{self.maximum_bound_temperature:.2f}, step {self.logarithmic_temperature_step_size}" - ) - print(f" Temp bins: {len(self.logT)}") - print(f" dlogT: {self.dlogT:.3f}, dlnT: {self.dlnT:.3f}") - - # Solver setup - print(f" Solver factor: {self.normalization_factor:.1e}") - print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") - print(f" Max Iterations: {self.max_iterations}") - print(f" Knots (n_knots): {getattr(self, '_last_n_knots', 'default=6')}") - - if hasattr(self, "chi2"): - dof = len(self._observed_intensities) - len( - getattr(self, "_init_knot_params", []) - ) - print(f" χ²: {self.chi2:.4e} (dof={dof})") + if ii % max(1, N // 20) == 0: # print 5% updates + print(f" - MC run {ii}/{N}") - # Responses - print(f" Response unit: {self._response_unit}") - if hasattr(self, "_response_matrix"): - print( - f" Response matrix: {self._response_matrix.shape} (filters × bins)" - ) - else: - print(" Response matrix: Not yet built") - - # Fit results - if hasattr(self, "dem"): - print("\n Fit Results:") - print(f" DEM bins: {self.dem.shape}") - if hasattr(self, "chi2"): - print(f" Chi²: {self.chi2:.4e}") - if hasattr(self, "redchi2"): - print(f" Reduced Chi²: {self.redchi2:.4e}") - if hasattr(self, "fitted_intensities"): - print(f" Fitted Intensities: {self.fitted_intensities}") - - # Monte Carlo results - if hasattr(self, "_dem_ensemble"): - print("\n Monte Carlo Ensemble:") - n_mc = len(self._dem_ensemble) - print(f" Runs stored: {n_mc}") - dem_stack = np.array(self._dem_ensemble) - med = np.median(dem_stack, axis=0) - spread = np.percentile(dem_stack, [16, 84], axis=0) - print(" DEM median (log10 cm^-5 K^-1):") - print(f" First 5 bins: {np.log10(med[:5]+1e-40)}") - print(" DEM 1σ spread (first bin):") - print( - f" {np.log10(spread[0,0]+1e-40):.2f} – {np.log10(spread[1,0]+1e-40):.2f}" - ) - print(" Reproducibility: Run with random_seed for identical results") - - if hasattr(self, "chi2"): - print(f" Chi²: {self.chi2:.4e}") - if hasattr(self, "redchi2"): - print(f" Reduced Chi²: {self.redchi2:.4e}") - if hasattr(self, "dof"): - print(f" Degrees of Freedom: {self.dof}") - if hasattr(self, "_iter_stats") and len(self._iter_stats["chisq"]) > 0: - print(f" Iterations tracked: {len(self._iter_stats['chisq'])}") - print(f" Final Iter χ²: {self._iter_stats['chisq'][-1]:.4e}") - - # Plotting guidance - print("\n Plotting Options:") - if hasattr(self, "dem"): - print(" • plot_dem_results(results) → Quick plot from solve() dictionary") - print( - " • plot_dem_uncertainty() → Best-fit DEM + shaded ±1σ (if MC available)" - ) - print( - " • plot_idl_style() → IDL-style view (best-fit + MC curves)" - ) - print( - " • plot_dem_with_median_bins() → Median + closest DEM (IDL style extension)" - ) - print(" • plot_fit_residuals() → Observed vs fitted intensities") - print(" • plot_iteration_stats() ") + # Perturb intensities (IDL: i_obs + randn * i_err) + noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) + obs_pert = base_obs_phys + noise + obs_pert = np.maximum(obs_pert, 0.0) # IDL clips at zero - print("=" * 65) + # Solve DEM for perturbed intensities + dem_i, mod_i, chisq_i, _ = self._solve_single_dem(obs_pert) - ################################################################################################################################ - ################################################################################################################################ - #############************************** END of **************************########################## - ################################################################################################################################ - ################################################################################################################################ + # Store results + self.mc_dem[ii, :] = dem_i + self.mc_chisq[ii] = chisq_i + self.mc_base_obs[ii, :] = obs_pert + self.mc_mod_obs[ii, :] = mod_i - # ------------------------------------------------------------------------------------------------------------------------------- + # 5) Return DEM for convenience ------------------------------------------ + return self.dem # Attach plotting functions from plotting.py to the class XRTDEMIterative.plot_dem_results = dem_plotting.plot_dem_results -XRTDEMIterative.plot_dem_uncertainty = dem_plotting.plot_dem_uncertainty +# XRTDEMIterative.plot_dem_uncertainty = dem_plotting.plot_dem_uncertainty XRTDEMIterative.plot_idl_style = dem_plotting.plot_idl_style XRTDEMIterative.plot_fit_residuals = dem_plotting.plot_fit_residuals XRTDEMIterative.plot_dem_with_median_bins = dem_plotting.plot_dem_with_median_bins XRTDEMIterative.plot_iteration_stats = dem_plotting.plot_iteration_stats +XRTDEMIterative.plot_dem = dem_plotting.plot_dem +XRTDEMIterative.plot_dem_uncertainty = dem_plotting.plot_dem_uncertainty +XRTDEMIterative.plot_mc_fan = dem_plotting.plot_mc_fan +XRTDEMIterative.plot_mc_hist_at_temperature = dem_plotting.plot_mc_hist_at_temperature +XRTDEMIterative.plot_mc_chisq = dem_plotting.plot_mc_chisq +XRTDEMIterative.plot_modeled_vs_observed = dem_plotting.plot_modeled_vs_observed + +XRTDEMIterative.plot_dem_with_mc_vertical_bars = ( + dem_plotting.plot_dem_with_mc_vertical_bars +) +XRTDEMIterative.plot_dem_mc_vertical_bars = dem_plotting.plot_dem_mc_vertical_bars + + +# def solve(self): +# """ +# High-level DEM solver. + +# Mirrors IDL's xrt_dem_iterative2: +# - validates inputs +# - prepares temperature grid & responses +# - solves for base DEM +# - optionally runs Monte Carlo iterations + +# After this call, the following attributes are set: + +# self.logT : log10(T) grid (same as self.logT from create_logT_grid) +# self.dem : base DEM(T) [cm^-5 K^-1] +# self.chisq : χ² for base solution +# self.modeled_intensities : modeled DN/s/pix for base solution + +# If monte_carlo_runs > 0: + +# self.dem_mc : array shape (n_runs+1, n_T). 0 = base, 1..N = MC. +# self.chisq_mc : array shape (n_runs+1,). +# self.obs_mc : array shape (n_runs+1, n_channels) of observed intensities. +# self.mod_mc : array shape (n_runs+1, n_channels) of modeled intensities. +# """ +# # 0) Basic validation & preparation +# self.validate_inputs() + +# # Temperature grid and responses +# self.create_logT_grid() +# self._interpolate_responses_to_grid() + +# # We don't scale intensities here; scaling will be done per run +# base_obs_phys = self._observed_intensities.astype(float) # DN/s (no units) + +# # 1) Base DEM solution +# dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( +# observed_intensities_vals=base_obs_phys +# ) + +# # Store base solution +# self.logT_solution = self.logT.copy() +# self.dem = dem_base # [cm^-5 K^-1] +# self.chisq = chisq_base +# self.modeled_intensities = mod_base # DN/s/pix + +# # For convenience: store base as the first "MC" entry +# n_T = self.logT.size +# n_ch = base_obs_phys.size +# n_runs = self.monte_carlo_runs + +# self.dem_mc = np.zeros((n_runs + 1, n_T), dtype=float) +# self.chisq_mc = np.zeros((n_runs + 1,), dtype=float) +# self.obs_mc = np.zeros((n_runs + 1, n_ch), dtype=float) +# self.mod_mc = np.zeros((n_runs + 1, n_ch), dtype=float) + +# self.dem_mc[0, :] = dem_base +# self.chisq_mc[0] = chisq_base +# self.obs_mc[0, :] = base_obs_phys +# self.mod_mc[0, :] = mod_base + +# # 2) Monte Carlo runs (if requested) +# if n_runs > 0: +# rng = np.random.default_rng() +# sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + +# for ii in range(1, n_runs + 1): +# # Perturb observed intensities (IDL: i_obs + randomn * i_err, clipped at 0) +# noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) +# obs_pert = base_obs_phys + noise +# obs_pert = np.maximum(obs_pert, 0.0) + +# dem_i, mod_i, chisq_i, _ = self._solve_single_dem(obs_pert) + +# self.dem_mc[ii, :] = dem_i +# self.chisq_mc[ii] = chisq_i +# self.obs_mc[ii, :] = obs_pert +# self.mod_mc[ii, :] = mod_i + +# # Finished +# return self.dem + +# #NOTEFORJOY NOV 19 +# def solve( +# self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True +# ): +# """ +# Run the full DEM solver, IDL-style. + +# This orchestrates: +# 1. Build temperature grid. +# 2. Interpolate responses (response matrix). +# 3. Estimate initial DEM. +# 4. Fit DEM with lmfit. +# 5. Optionally run Monte Carlo ensemble. + +# Parameters +# ---------- +# n_knots : int, optional +# Number of spline knots across logT. Default = 6. +# method : str, optional +# Minimization method for `lmfit.minimize`. Default = "least_squares". +# run_mc : bool, optional +# Whether to run Monte Carlo simulations (using self.monte_carlo_runs). +# Default = True. + +# Returns +# ------- +# results : dict +# Dictionary of solver outputs: +# - "temperature" : log10(T) grid +# - "dem" : best-fit DEM [cm^-5 K^-1] +# - "dem_err" : DEM uncertainty (if MC runs > 0) +# - "ifit" : fitted intensities [DN/s/pix] +# - "chi2" : χ² +# - "redchi2" : reduced χ² +# """ +# # IDL-STYLE NOSOLVE CHECk +# # IDL behavior: if all observed intensities are zero (or non-positive), +# # the DEM is trivially zero. Skip solving and return immediately. +# if np.all(self._observed_intensities <= 0): # == 0 +# warnings.warn( +# "\n\n All observed intensities are zero or non-positive. " +# "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" +# ) + +# # Ensure grid exists (IDL also returns logT_out even for nosolve) +# if not hasattr(self, "logT"): +# self.create_logT_grid() + +# self.dem = np.zeros_like(self.logT) +# self.fitted_intensities = np.zeros_like(self._observed_intensities) +# self.chi2 = 0.0 +# self.redchi2 = 0.0 +# return self + +# # 1. Ensure grid & responses +# self.create_logT_grid() +# self._interpolate_responses_to_grid() + +# # 2. Estimate initial DEM +# self._estimate_initial_dem() + +# # 3. Fit DEM +# result = self.fit_dem(n_knots=n_knots, method=method) +# result = self.fit_dem() + +# # 4. Monte Carlo (optional) +# if run_mc and self.monte_carlo_runs > 0: +# self.run_monte_carlo( +# n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method +# ) + +# # # 5. Bundle results +# # return { +# # "temperature": self.logT, +# # "dem": self.dem, +# # "dem_err": getattr(self, "dem_uncertainty", None), +# # "ifit": self.fitted_intensities, +# # "chi2": getattr(self, "chi2", None), +# # "redchi2": getattr(self, "redchi2", None), +# # "solver": self, +# # } +# return self + +# def to_dict(self): +# """Return solver outputs as a dictionary.""" +# return { +# "temperature": self.logT, +# "dem": getattr(self, "dem", None), +# "dem_err": getattr(self, "dem_uncertainty", None), +# "ifit": getattr(self, "fitted_intensities", None), +# "chi2": getattr(self, "chi2", None), +# "redchi2": getattr(self, "redchi2", None), +# } + +################################################################################################################################ +################################################################################################################################ +#############************************** END of DEM SOLVER **************************########################## +################################################################################################################################ +################################################################################################################################ + + +# def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): +# """ +# Fit the DEM using lmfit to minimize residuals. +# Tracks chi² per iteration (like IDL's XRT_ITER_DEMSTAT). +# """ + +# # --- Auto-prepare prerequisites --- +# if not hasattr(self, "logT") or not hasattr(self, "T"): +# self.create_logT_grid() +# if not hasattr(self, "_response_matrix"): +# self._interpolate_responses_to_grid() +# if not hasattr(self, "_initial_log_dem"): +# self._estimate_initial_dem() + +# self._last_n_knots = n_knots # for summary() + +# # Storage for iteration statistics +# self._iter_stats = {"chisq": [], "iteration": []} + +# def _callback(params, iter, resid, *args, **kwargs): +# # Compute chi² at this iteration +# chi2 = np.sum(resid**2) +# self._iter_stats["chisq"].append(chi2) +# self._iter_stats["iteration"].append(iter) + +# # 1. Build initial knot parameters +# params = self._build_lmfit_parameters(n_knots=n_knots) + +# # 2. Run minimization +# result = minimize( +# self._residuals, +# params, +# method=method, +# iter_cb=_callback, # <-- track stats +# max_nfev=self.max_iterations, +# **kwargs, +# ) + +# # 3. On success, reconstruct DEM + fitted intensities +# best_dem = self._reconstruct_dem_from_knots(result.params) +# self.dem = best_dem + +# dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) +# R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) +# T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) +# I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + +# self.fitted_intensities = I_fit +# sigma = self.intensity_errors.to_value(u.DN / u.s) +# residuals = (self._observed_intensities - I_fit) / sigma + +# # Chi^2 metrics +# self.chi2 = np.sum(residuals**2) +# dof = len(self._observed_intensities) - len(result.params) +# self.redchi2 = self.chi2 / max(dof, 1) +# self.dof = dof # save for summary + +# return result + +# def fit_with_multiple_methods( +# self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs +# ): +# """ +# Try multiple lmfit minimization methods and pick the best χ². + +# Parameters +# ---------- +# methods : tuple of str, optional +# Minimization methods to test. Default = ("leastsq", "least_squares", "nelder"). +# n_knots : int, optional +# Number of spline knots for DEM fit. Default = 6. +# **kwargs : dict +# Extra arguments passed to `lmfit.minimize`. + +# Returns +# ------- +# best_result : lmfit.MinimizerResult +# Result from the method with lowest chi². +# """ + +# if not hasattr(self, "_initial_log_dem"): +# self._estimate_initial_dem() + +# results = {} +# best_chi2 = np.inf +# best_result = None +# best_method = None + +# for method in methods: +# print(f"\n>>> Trying method: {method}") +# params = self._build_lmfit_parameters(n_knots=n_knots) +# result = minimize(self._residuals, params, method=method, **kwargs) + +# # Compute DEM + chi square for this fit +# # SELFNOTEJOY - output currently does not have units. unts=cm^5 * K^-1 Make this a test +# dem = self._reconstruct_dem_from_knots( +# result.params +# ) # SELFNOTEJOY - here is the stamp to defining the DEM - triple check +# dem_mid = 0.5 * (dem[:-1] + dem[1:]) +# R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) +# T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) +# I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) + +# sigma = self.intensity_errors.to_value(u.DN / u.s) +# residuals = (self._observed_intensities - I_fit) / sigma +# chi2 = np.sum(residuals**2) + +# print(f"x square = {chi2:.3e}") + +# results[method] = (result, chi2) + +# if chi2 < best_chi2: +# best_chi2 = chi2 +# best_result = result +# best_method = method + +# print(f"\n>>> Best method: {best_method} with x square = {best_chi2:.3e}") + +# # Store outputs from the best fit +# best_dem = self._reconstruct_dem_from_knots(best_result.params) +# self.dem = best_dem +# dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) +# R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) +# T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) +# self.fitted_intensities = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) +# sigma = self.intensity_errors.to_value(u.DN / u.s) +# residuals = (self._observed_intensities - self.fitted_intensities) / sigma +# self.chi2 = np.sum(residuals**2) +# dof = len(self._observed_intensities) - len(best_result.params) +# self.redchi2 = self.chi2 / max(dof, 1) + +# return best_result + +# def run_monte_carlo( +# self, n_runs=None, n_knots=6, method="least_squares", random_seed=None +# ): +# from tqdm import tqdm # add this at top of file + +# if n_runs is None: +# n_runs = self._monte_carlo_runs +# if n_runs <= 0: +# raise ValueError("Monte Carlo runs disabled (n_runs=0).") + +# if random_seed is not None: +# np.random.seed(random_seed) + +# sigma = self.intensity_errors.to_value(u.DN / u.s) +# dem_ensemble = [] + +# self._last_n_knots = n_knots + +# # --- progress bar +# for i in tqdm(range(n_runs), desc="Monte Carlo DEM fits", unit="run"): +# noisy_obs = self._observed_intensities + np.random.normal(0, sigma) +# self._active_observed_intensities = noisy_obs +# try: +# params = self._build_lmfit_parameters(n_knots=n_knots) +# result = minimize(self._residuals, params, method=method) +# finally: +# if hasattr(self, "_active_observed_intensities"): +# delattr(self, "_active_observed_intensities") + +# dem_i = self._reconstruct_dem_from_knots(result.params) +# dem_ensemble.append(dem_i) + +# dem_ensemble = np.array(dem_ensemble) +# self._dem_ensemble = dem_ensemble +# self.dem_uncertainty = np.std(dem_ensemble, axis=0) +# self.dem_median = np.median(dem_ensemble, axis=0) + +# return dem_ensemble + +# # ------------------------------------------------------------------------------------------------------------------------------- + +# ################################################################################################################################ +# ################################################################################################################################ +# #############************************** Start of error Summary **************************########################## +# ################## ##################### + +# def summary(self): +# """ +# Print a comprehensive summary of the DEM solver setup, +# including inputs, solver configuration, fit results, +# Monte Carlo ensemble status, and available plotting helpers. +# """ +# print("\nXRTpy DEM Iterative Setup Summary\n") +# print("=" * 65) + +# # Filters & Observations +# print(f" Filters: {self.filter_names}") +# print(f" Observed Intensities: {self.observed_intensities}") +# print(f" Number of channels: {len(self._observed_intensities)}") + +# # Errors +# print(f" Intensity Errors: {self.intensity_errors}") +# if self._intensity_errors is not None: +# print(" Error model used: User-provided") +# else: +# print( +# f" Error model used: Auto-estimated " +# f"(obs * 0.03, min={self.min_observational_error.value} DN/s)" +# ) +# print(" [IDL reference: xrt_dem_iterative2.pro]") + +# # Temperature grid +# print( +# f" Temperature grid: logT {self.minimum_bound_temperature:.2f}–{self.maximum_bound_temperature:.2f}, step {self.logarithmic_temperature_step_size}" +# ) +# print(f" Temp bins: {len(self.logT)}") +# print(f" dlogT: {self.dlogT:.3f}, dlnT: {self.dlnT:.3f}") + +# # Solver setup +# print(f" Solver factor: {self.normalization_factor:.1e}") +# print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") +# print(f" Max Iterations: {self.max_iterations}") +# print(f" Knots (n_knots): {getattr(self, '_last_n_knots', 'default=6')}") + +# if hasattr(self, "chi2"): +# dof = len(self._observed_intensities) - len( +# getattr(self, "_init_knot_params", []) +# ) +# print(f" χ²: {self.chi2:.4e} (dof={dof})") + +# # Responses +# print(f" Response unit: {self._response_unit}") +# if hasattr(self, "_response_matrix"): +# print( +# f" Response matrix: {self._response_matrix.shape} (filters × bins)" +# ) +# else: +# print(" Response matrix: Not yet built") + +# # Fit results +# if hasattr(self, "dem"): +# print("\n Fit Results:") +# print(f" DEM bins: {self.dem.shape}") +# if hasattr(self, "chi2"): +# print(f" Chi²: {self.chi2:.4e}") +# if hasattr(self, "redchi2"): +# print(f" Reduced Chi²: {self.redchi2:.4e}") +# if hasattr(self, "fitted_intensities"): +# print(f" Fitted Intensities: {self.fitted_intensities}") + +# # Monte Carlo results +# if hasattr(self, "_dem_ensemble"): +# print("\n Monte Carlo Ensemble:") +# n_mc = len(self._dem_ensemble) +# print(f" Runs stored: {n_mc}") +# dem_stack = np.array(self._dem_ensemble) +# med = np.median(dem_stack, axis=0) +# spread = np.percentile(dem_stack, [16, 84], axis=0) +# print(" DEM median (log10 cm^-5 K^-1):") +# print(f" First 5 bins: {np.log10(med[:5]+1e-40)}") +# print(" DEM 1σ spread (first bin):") +# print( +# f" {np.log10(spread[0,0]+1e-40):.2f} – {np.log10(spread[1,0]+1e-40):.2f}" +# ) +# print(" Reproducibility: Run with random_seed for identical results") + +# if hasattr(self, "chi2"): +# print(f" Chi²: {self.chi2:.4e}") +# if hasattr(self, "redchi2"): +# print(f" Reduced Chi²: {self.redchi2:.4e}") +# if hasattr(self, "dof"): +# print(f" Degrees of Freedom: {self.dof}") +# if hasattr(self, "_iter_stats") and len(self._iter_stats["chisq"]) > 0: +# print(f" Iterations tracked: {len(self._iter_stats['chisq'])}") +# print(f" Final Iter χ²: {self._iter_stats['chisq'][-1]:.4e}") + +# # Plotting guidance +# print("\n Plotting Options:") +# if hasattr(self, "dem"): +# print(" • plot_dem_results(results) → Quick plot from solve() dictionary") +# print( +# " • plot_dem_uncertainty() → Best-fit DEM + shaded ±1σ (if MC available)" +# ) +# print( +# " • plot_idl_style() → IDL-style view (best-fit + MC curves)" +# ) +# print( +# " • plot_dem_with_median_bins() → Median + closest DEM (IDL style extension)" +# ) +# print(" • plot_fit_residuals() → Observed vs fitted intensities") +# print(" • plot_iteration_stats() ") + +# print("=" * 65) + +################################################################################################################################ +################################################################################################################################ +#############************************** END of **************************########################## +################################################################################################################################ +################################################################################################################################ + +# ------------------------------------------------------------------------------------------------------------------------------- # NOTEFROMJOYTOJOY From c426a25197efe2fa25a251449c69dc2c4b251f3a Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 21 Nov 2025 13:23:13 -0500 Subject: [PATCH 092/121] Removing code that will not be used --- xrtpy/xrt_dem_iterative/dem_solver.py | 414 -------------------------- 1 file changed, 414 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 38d837a60..25f0114c7 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -649,149 +649,9 @@ def _prepare_scaled_observations(self): # ------------------------------------------------------------------------------------------------------------------------------- - ################################################################################################################################ - ################################################################################################################################ #############************************** Start of INITIAL ROUGH DEM ESTIMATE **************************########################## ################## An estimated EM shape based on simple intensity-over-response peaks, smoothed across T. ##################### - # def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: - # """ - # Estimate an initial DEM curve from observed intensities and responses. - - # This follows the algorithm in IDL's `xrt_dem_iterative2.pro`, which uses - # response-peak inversion to generate a crude log10 DEM estimate per channel, - # then interpolates these estimates onto the solver's regular temperature grid. - - # Parameters - # ---------- - # cutoff : float, optional - # Fraction of the peak response to use for defining the "good" window - # around each filter's peak. Default is 1/e ≈ 0.3679. - - # Returns - # ------- - # est_log_dem_on_grid : ndarray - # Array of shape (n_temperatures,) giving the initial DEM estimate - # on `self.logT`. Values are log10(DEM) in [cm^-5 K^-1]. - # This can be used to seed the solver. - - # Notes - # ----- - # - Units: - # * Observed intensities: [DN s^-1 pix^-1] - # * Response: [DN s^-1 pix^-1 cm^5] - # * DEM(T): [cm^-5 K^-1] - # - For each filter: - # 1. Locate the peak of its response. - # 2. Define a window where response > cutoff * peak. - # 3. Compute the denominator integral: sum( T * R * dlnT ). - # 4. Estimate DEM_peak = I_obs / denom. - # 5. Store log10(DEM_peak) at the peak logT. - # - Duplicate peak logTs are merged by averaging. - # - If fewer than 2 valid points are found, falls back to a flat guess - # (log10 DEM = 22 everywhere). - # """ - # if not hasattr(self, "logT"): - # raise AttributeError( - # "Temperature grid missing. Call create_logT_grid() first." - # ) - # if not hasattr(self, "_response_matrix"): - # raise AttributeError( - # "Response matrix missing. Call _interpolate_responses_to_grid() first." - # ) - - # # Storage for peak locations and DEM estimates - # t_peaks = [] - # log_dem_estimates = [] - - # # Loop over each filter - # for i, (T_orig, R_orig, I_obs) in enumerate( - # zip( - # self.response_temperatures, - # self.response_values, - # self._observed_intensities, - # ) - # ): - # logT_orig = np.log10(T_orig.to_value(u.K)) - # R_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) - - # if I_obs <= 0 or np.all(R_vals <= 0): - # continue # skip unusable channel - - # # 1. Peak location - # max_idx = np.argmax(R_vals) - # peak_val = R_vals[max_idx] - # t_peak = ( - # np.round(logT_orig[max_idx] / self._logarithmic_temperature_step_size) - # * self._logarithmic_temperature_step_size - # ) - - # # 2. Good window (where R > cutoff * peak) - # good = np.where(R_vals > peak_val * cutoff)[0] - # if len(good) < 2: - # continue - - # # 3. Compute denominator integral: sum(T 8*R* dlnT) - # T_good = 10.0 ** logT_orig[good] - # R_good = R_vals[good] - # dlogT_native = np.diff(logT_orig).mean() - # dlnT_native = np.log(10.0) * dlogT_native - # denom = np.sum(T_good * R_good * dlnT_native) - - # if denom <= 0: - # continue - - # # 4. DEM estimate at peak - # dem_peak = I_obs / denom # [cm^-5 K^-1] - # if dem_peak <= 0 or not np.isfinite(dem_peak): - # continue - - # log_dem_est = np.log10(dem_peak) - # t_peaks.append(t_peak) - # log_dem_estimates.append(log_dem_est) - - # # 5. Handle duplicates: average log10 DEM at same t_peak - # if len(t_peaks) == 0: - # # Fallback: flat guess (IDL style) - # est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 - # self._initial_log_dem = est_log_dem_on_grid - # return est_log_dem_on_grid - - # uniq_t = {} - # for t, dem_val in zip(t_peaks, log_dem_estimates): - # if t in uniq_t: - # uniq_t[t].append(dem_val) - # else: - # uniq_t[t] = [dem_val] - # t_peaks_uniq = np.array(sorted(uniq_t.keys())) - # log_dem_uniq = np.array([np.mean(uniq_t[t]) for t in t_peaks_uniq]) - - # if len(t_peaks_uniq) < 2: - # # Not enough points > flat guess - # est_log_dem_on_grid = np.ones_like(self.logT) * 22.0 - # self._initial_log_dem = est_log_dem_on_grid - # return est_log_dem_on_grid - - # # 6. Interpolate sparse estimates onto the solver's grid - # interp_func = interp1d( - # t_peaks_uniq, - # log_dem_uniq, - # kind="linear", - # bounds_error=False, - # fill_value="extrapolate", - # ) - - # self._raw_estimated_dem_peaks = (t_peaks_uniq, log_dem_uniq) - # # est_log_dem_on_grid = interp_func(self.logT) - # # # Store for later use - # # self._initial_log_dem = est_log_dem_on_grid - - # #Use flat dem for inital guess - # est_log_dem_on_grid = np.ones_like(self.logT) * 1.0 - # self._initial_log_dem = est_log_dem_on_grid - - # return est_log_dem_on_grid - def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: """ Construct an initial DEM guess, mirroring IDL's xrt_dem_iter_estim. @@ -923,15 +783,10 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: return est_log_dem_on_grid #############************************** End of INITIAL DEM ESTIMATE **************************################################## - ################################################################################################################################ - ################################################################################################################################ # ------------------------------------------------------------------------------------------------------------------------------- - ################################################################################################################################ - ################################################################################################################################ #############************************** Start of **************************########################## - ################## ##################### def _prepare_spline_system(self): """ @@ -1110,17 +965,10 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): return dem_phys, modeled_intensities_phys, chisq, result ################################################################################################################################ - ################################################################################################################################ - #############************************** **************************########################## - ################################################################################################################################ - ################################################################################################################################ # ------------------------------------------------------------------------------------------------------------------------------- - ################################################################################################################################ - ################################################################################################################################ #############************************** Start of error bars / Monte Carlo **************************########################## - ################## ##################### def _run_monte_carlo(self, result_params): """ @@ -1202,10 +1050,7 @@ def _run_monte_carlo(self, result_params): # ------------------------------------------------------------------------------------------------------------------------------- - ################################################################################################################################ - ################################################################################################################################ #############************************** Start of DEM SOLVER **************************########################## - ################## ##################### def solve(self): """ @@ -1321,181 +1166,6 @@ def solve(self): XRTDEMIterative.plot_dem_mc_vertical_bars = dem_plotting.plot_dem_mc_vertical_bars -# def solve(self): -# """ -# High-level DEM solver. - -# Mirrors IDL's xrt_dem_iterative2: -# - validates inputs -# - prepares temperature grid & responses -# - solves for base DEM -# - optionally runs Monte Carlo iterations - -# After this call, the following attributes are set: - -# self.logT : log10(T) grid (same as self.logT from create_logT_grid) -# self.dem : base DEM(T) [cm^-5 K^-1] -# self.chisq : χ² for base solution -# self.modeled_intensities : modeled DN/s/pix for base solution - -# If monte_carlo_runs > 0: - -# self.dem_mc : array shape (n_runs+1, n_T). 0 = base, 1..N = MC. -# self.chisq_mc : array shape (n_runs+1,). -# self.obs_mc : array shape (n_runs+1, n_channels) of observed intensities. -# self.mod_mc : array shape (n_runs+1, n_channels) of modeled intensities. -# """ -# # 0) Basic validation & preparation -# self.validate_inputs() - -# # Temperature grid and responses -# self.create_logT_grid() -# self._interpolate_responses_to_grid() - -# # We don't scale intensities here; scaling will be done per run -# base_obs_phys = self._observed_intensities.astype(float) # DN/s (no units) - -# # 1) Base DEM solution -# dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( -# observed_intensities_vals=base_obs_phys -# ) - -# # Store base solution -# self.logT_solution = self.logT.copy() -# self.dem = dem_base # [cm^-5 K^-1] -# self.chisq = chisq_base -# self.modeled_intensities = mod_base # DN/s/pix - -# # For convenience: store base as the first "MC" entry -# n_T = self.logT.size -# n_ch = base_obs_phys.size -# n_runs = self.monte_carlo_runs - -# self.dem_mc = np.zeros((n_runs + 1, n_T), dtype=float) -# self.chisq_mc = np.zeros((n_runs + 1,), dtype=float) -# self.obs_mc = np.zeros((n_runs + 1, n_ch), dtype=float) -# self.mod_mc = np.zeros((n_runs + 1, n_ch), dtype=float) - -# self.dem_mc[0, :] = dem_base -# self.chisq_mc[0] = chisq_base -# self.obs_mc[0, :] = base_obs_phys -# self.mod_mc[0, :] = mod_base - -# # 2) Monte Carlo runs (if requested) -# if n_runs > 0: -# rng = np.random.default_rng() -# sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - -# for ii in range(1, n_runs + 1): -# # Perturb observed intensities (IDL: i_obs + randomn * i_err, clipped at 0) -# noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) -# obs_pert = base_obs_phys + noise -# obs_pert = np.maximum(obs_pert, 0.0) - -# dem_i, mod_i, chisq_i, _ = self._solve_single_dem(obs_pert) - -# self.dem_mc[ii, :] = dem_i -# self.chisq_mc[ii] = chisq_i -# self.obs_mc[ii, :] = obs_pert -# self.mod_mc[ii, :] = mod_i - -# # Finished -# return self.dem - -# #NOTEFORJOY NOV 19 -# def solve( -# self, n_knots: int = 6, method: str = "least_squares", run_mc: bool = True -# ): -# """ -# Run the full DEM solver, IDL-style. - -# This orchestrates: -# 1. Build temperature grid. -# 2. Interpolate responses (response matrix). -# 3. Estimate initial DEM. -# 4. Fit DEM with lmfit. -# 5. Optionally run Monte Carlo ensemble. - -# Parameters -# ---------- -# n_knots : int, optional -# Number of spline knots across logT. Default = 6. -# method : str, optional -# Minimization method for `lmfit.minimize`. Default = "least_squares". -# run_mc : bool, optional -# Whether to run Monte Carlo simulations (using self.monte_carlo_runs). -# Default = True. - -# Returns -# ------- -# results : dict -# Dictionary of solver outputs: -# - "temperature" : log10(T) grid -# - "dem" : best-fit DEM [cm^-5 K^-1] -# - "dem_err" : DEM uncertainty (if MC runs > 0) -# - "ifit" : fitted intensities [DN/s/pix] -# - "chi2" : χ² -# - "redchi2" : reduced χ² -# """ -# # IDL-STYLE NOSOLVE CHECk -# # IDL behavior: if all observed intensities are zero (or non-positive), -# # the DEM is trivially zero. Skip solving and return immediately. -# if np.all(self._observed_intensities <= 0): # == 0 -# warnings.warn( -# "\n\n All observed intensities are zero or non-positive. " -# "DEM cannot be solved. Returning zero DEM and zero fitted intensities. \n\n" -# ) - -# # Ensure grid exists (IDL also returns logT_out even for nosolve) -# if not hasattr(self, "logT"): -# self.create_logT_grid() - -# self.dem = np.zeros_like(self.logT) -# self.fitted_intensities = np.zeros_like(self._observed_intensities) -# self.chi2 = 0.0 -# self.redchi2 = 0.0 -# return self - -# # 1. Ensure grid & responses -# self.create_logT_grid() -# self._interpolate_responses_to_grid() - -# # 2. Estimate initial DEM -# self._estimate_initial_dem() - -# # 3. Fit DEM -# result = self.fit_dem(n_knots=n_knots, method=method) -# result = self.fit_dem() - -# # 4. Monte Carlo (optional) -# if run_mc and self.monte_carlo_runs > 0: -# self.run_monte_carlo( -# n_runs=self.monte_carlo_runs, n_knots=n_knots, method=method -# ) - -# # # 5. Bundle results -# # return { -# # "temperature": self.logT, -# # "dem": self.dem, -# # "dem_err": getattr(self, "dem_uncertainty", None), -# # "ifit": self.fitted_intensities, -# # "chi2": getattr(self, "chi2", None), -# # "redchi2": getattr(self, "redchi2", None), -# # "solver": self, -# # } -# return self - -# def to_dict(self): -# """Return solver outputs as a dictionary.""" -# return { -# "temperature": self.logT, -# "dem": getattr(self, "dem", None), -# "dem_err": getattr(self, "dem_uncertainty", None), -# "ifit": getattr(self, "fitted_intensities", None), -# "chi2": getattr(self, "chi2", None), -# "redchi2": getattr(self, "redchi2", None), -# } - ################################################################################################################################ ################################################################################################################################ #############************************** END of DEM SOLVER **************************########################## @@ -1503,65 +1173,6 @@ def solve(self): ################################################################################################################################ -# def fit_dem(self, n_knots: int = 6, method: str = "least_squares", **kwargs): -# """ -# Fit the DEM using lmfit to minimize residuals. -# Tracks chi² per iteration (like IDL's XRT_ITER_DEMSTAT). -# """ - -# # --- Auto-prepare prerequisites --- -# if not hasattr(self, "logT") or not hasattr(self, "T"): -# self.create_logT_grid() -# if not hasattr(self, "_response_matrix"): -# self._interpolate_responses_to_grid() -# if not hasattr(self, "_initial_log_dem"): -# self._estimate_initial_dem() - -# self._last_n_knots = n_knots # for summary() - -# # Storage for iteration statistics -# self._iter_stats = {"chisq": [], "iteration": []} - -# def _callback(params, iter, resid, *args, **kwargs): -# # Compute chi² at this iteration -# chi2 = np.sum(resid**2) -# self._iter_stats["chisq"].append(chi2) -# self._iter_stats["iteration"].append(iter) - -# # 1. Build initial knot parameters -# params = self._build_lmfit_parameters(n_knots=n_knots) - -# # 2. Run minimization -# result = minimize( -# self._residuals, -# params, -# method=method, -# iter_cb=_callback, # <-- track stats -# max_nfev=self.max_iterations, -# **kwargs, -# ) - -# # 3. On success, reconstruct DEM + fitted intensities -# best_dem = self._reconstruct_dem_from_knots(result.params) -# self.dem = best_dem - -# dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) -# R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) -# T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) -# I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - -# self.fitted_intensities = I_fit -# sigma = self.intensity_errors.to_value(u.DN / u.s) -# residuals = (self._observed_intensities - I_fit) / sigma - -# # Chi^2 metrics -# self.chi2 = np.sum(residuals**2) -# dof = len(self._observed_intensities) - len(result.params) -# self.redchi2 = self.chi2 / max(dof, 1) -# self.dof = dof # save for summary - -# return result - # def fit_with_multiple_methods( # self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs # ): @@ -1789,28 +1400,3 @@ def solve(self): # print(" • plot_iteration_stats() ") # print("=" * 65) - -################################################################################################################################ -################################################################################################################################ -#############************************** END of **************************########################## -################################################################################################################################ -################################################################################################################################ - -# ------------------------------------------------------------------------------------------------------------------------------- - - -# NOTEFROMJOYTOJOY -# Missin outputs -# 1 -# I'm missing BASE_OBS- It is a 2D array of intensity values for all DEM runs (base + MC) -# the set of observed intensities used in each DEM solution, including: -# Column 0 > the original observed intensities (your real data) -# Columns 1..MC_ITER > each Monte-Carlo–perturbed intensity vector - -# 2 -# MOD_OBS - Model intensities predicted by the DEM for each run. - -# 3 -# CHISQ- [1+MC_iter] -# chi-squre for each DEM solution -# Computed via mpdemfunct residuals squared and summed From 5352869fe465707cf6119d6713b0cfd94870f08b Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 21 Nov 2025 16:13:22 -0500 Subject: [PATCH 093/121] Worked on the code method and finally got a DEM and MC to work, main issue - use *result.params*, not params0. I think I finally got the DEM WORKINGGGGGG --- xrtpy/xrt_dem_iterative/dem_solver.py | 578 +++++++++++++++++--------- 1 file changed, 376 insertions(+), 202 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 25f0114c7..191afd505 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -814,6 +814,17 @@ def _prepare_spline_system(self): # pm_matrix = R(T) * T * dlnT (IDL line: emis * 10^t * alog(10^dt)) # units - DN/s/pix/cm^5 * K * dLnT * DEM == DN/s/PIX T_linear = self.T.to_value(u.K) + + # self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) NOV21 + + # NOV21 + # Was missing - normalization_factor - i think #NOTETOJOYNOV21 + # emissivity_matrix is shape (n_filters, n_T) + pm_phys = self._response_matrix * T_linear * self.dlnT # physical units + + # SCALE to match scaled DEM and scaled intensities + # self.pm_matrix = pm_phys / self.normalization_factor#THE FIX + self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) # Knot positions are evenly spaced in logT (IDL spl_t) @@ -824,7 +835,7 @@ def _prepare_spline_system(self): interp_init = interp1d( self.logT, self._initial_log_dem, # IDL is flat logDEM = 1.0 - kind="linear", # IDL uses a cubic spline later NOTEFORJOY NOV24 + kind="linear", # IDL uses a cubic spline later NOTEFORJOY NOV20 bounds_error=False, fill_value="extrapolate", ) @@ -898,7 +909,7 @@ def _residuals(self, params): # 4. Residuals = (i_mod - y_obs) * weights / sigma residuals = (i_mod - y_scaled) * self.weights / sigma_scaled - # Store χ² if desired + # chi^2 history, mostly for debugging chi2_val = np.sum(residuals**2) if not hasattr(self, "_iteration_chi2"): self._iteration_chi2 = [] @@ -906,59 +917,97 @@ def _residuals(self, params): return residuals - def _solve_single_dem(self, observed_intensities_vals: np.ndarray): - """ - Solve the DEM once for a given set of observed intensities (base or MC-perturbed). - - Parameters - ---------- - observed_intensities_vals : ndarray - 1D array of observed intensities in DN/s/pix (no units attached). + # def _solve_single_dem(self, observed_intensities_vals: np.ndarray): + # """ + # Solve the DEM once for a given set of observed intensities (base or MC-perturbed). + + # Parameters + # ---------- + # observed_intensities_vals : ndarray + # 1D array of observed intensities in DN/s/pix (no units attached). + + # Returns + # ------- + # dem_phys : ndarray + # DEM(T) in physical units [cm^-5 K^-1] on self.logT. + # modeled_intensities_phys : ndarray + # Modeled intensities in DN/s/pix for each channel. + # chisq : float + # Sum of squared residuals for this run. + # fit_result : lmfit.MinimizerResult + # Full lmfit result object (for diagnostics). + # """ + # # 1. Set scaled observations for this run (IDL: i_obs = i_obs/solv_factor) + # nf = self._normalization_factor + # self.intensities_scaled = observed_intensities_vals / nf + # # Errors are the same for all runs; scale once + # sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + # self.sigma_scaled_intensity_errors = sigma_phys / nf + + # # 2. If all intensities are zero → nosolve, DEM = 0 + # if np.all(self.intensities_scaled == 0.0): + # dem_scaled = np.zeros_like(self.logT, dtype=float) + # dem_phys = dem_scaled * nf + # modeled_intensities_phys = np.zeros_like(observed_intensities_vals) + # chisq = 0.0 + # fit_result = None + # return dem_phys, modeled_intensities_phys, chisq, fit_result + + # # 3. Initial DEM & spline system (IDL: xrt_dem_iter_estim + mp_prep) + # self._estimate_initial_dem() + # self._prepare_spline_system() + # params = self._build_lmfit_parameters() + + # # 4. Run the least-squares solver (IDL: xrt_dem_iter_solver + mpfit) + # result = minimize(self._residuals, params, max_nfev=self._max_iterations) + + # # 5. Reconstruct DEM in *scaled* units, then convert to physical + # dem_scaled = self._reconstruct_dem_from_knots(result.params) # cm^-5 K^-1 / nf + # dem_phys = dem_scaled * nf # undo normalization, like IDL + + # # 6. Modeled intensities (IDL: i_mod = dem ## pm * abunds) + # i_mod_scaled = (self.pm_matrix @ dem_scaled) * self.abundances + # modeled_intensities_phys = i_mod_scaled * nf # back to DN/s/pix + + # # 7. χ² from residuals + # resid = self._residuals(result.params) + # chisq = float(np.sum(resid**2)) + + # return dem_phys, modeled_intensities_phys, chisq, result - Returns - ------- - dem_phys : ndarray - DEM(T) in physical units [cm^-5 K^-1] on self.logT. - modeled_intensities_phys : ndarray - Modeled intensities in DN/s/pix for each channel. - chisq : float - Sum of squared residuals for this run. - fit_result : lmfit.MinimizerResult - Full lmfit result object (for diagnostics). - """ - # 1. Set scaled observations for this run (IDL: i_obs = i_obs/solv_factor) + def _solve_single_dem(self, observed_intensities_vals: np.ndarray): nf = self._normalization_factor + + # 1. scaled obs/errors self.intensities_scaled = observed_intensities_vals / nf - # Errors are the same for all runs; scale once sigma_phys = self.intensity_errors.to_value(u.DN / u.s) self.sigma_scaled_intensity_errors = sigma_phys / nf - # 2. If all intensities are zero → nosolve, DEM = 0 + # 2. trivial nosolve case if np.all(self.intensities_scaled == 0.0): - dem_scaled = np.zeros_like(self.logT, dtype=float) - dem_phys = dem_scaled * nf + dem_model = np.zeros_like(self.logT) + dem_phys = dem_model * nf modeled_intensities_phys = np.zeros_like(observed_intensities_vals) - chisq = 0.0 - fit_result = None - return dem_phys, modeled_intensities_phys, chisq, fit_result + return dem_phys, modeled_intensities_phys, 0.0, None + + # 3. initial guess (log10 DEM_model on grid) + init_log_dem = self._estimate_initial_dem() # flat ~ 1.0 in IDL + self._initial_log_dem = init_log_dem - # 3. Initial DEM & spline system (IDL: xrt_dem_iter_estim + mp_prep) - self._estimate_initial_dem() + # 4. spline system using that initial guess self._prepare_spline_system() - params = self._build_lmfit_parameters() + params0 = self._build_lmfit_parameters() # values = initial_log_dem at knots - # 4. Run the least-squares solver (IDL: xrt_dem_iter_solver + mpfit) - result = minimize(self._residuals, params, max_nfev=self._max_iterations) + # 5. run minimizer + result = minimize(self._residuals, params0, max_nfev=self._max_iterations) - # 5. Reconstruct DEM in *scaled* units, then convert to physical - dem_scaled = self._reconstruct_dem_from_knots(result.params) # cm^-5 K^-1 / nf - dem_phys = dem_scaled * nf # undo normalization, like IDL + # THIS is the critical part – use *result.params*, not params0 <<< + dem_model = self._reconstruct_dem_from_knots(result.params) # DEM_model(T) + dem_phys = dem_model * nf - # 6. Modeled intensities (IDL: i_mod = dem ## pm * abunds) - i_mod_scaled = (self.pm_matrix @ dem_scaled) * self.abundances - modeled_intensities_phys = i_mod_scaled * nf # back to DN/s/pix + i_mod_scaled = (self.pm_matrix @ dem_model) * self.abundances + modeled_intensities_phys = i_mod_scaled * nf - # 7. χ² from residuals resid = self._residuals(result.params) chisq = float(np.sum(resid**2)) @@ -1052,57 +1101,223 @@ def _run_monte_carlo(self, result_params): #############************************** Start of DEM SOLVER **************************########################## + # def solve(self): + # """ + # High-level DEM solver. + + # Pythonic analogue of IDL's xrt_dem_iterative2.pro: + + # 1. Validate inputs. + # 2. Build the logarithmic temperature grid and interpolate responses. + # 3. Solve ONE base DEM using the original (unperturbed) intensities. + # 4. If Monte Carlo is requested (monte_carlo_runs > 0), perform N + # perturbed solves by adding Gaussian noise to the base intensities. + # 5. Store all outputs on the instance for later analysis/plotting. + + # After calling solve(), the following attributes are defined: + + # Base solution + # ------------- + # logT : ndarray, shape (n_T,) + # Temperature grid (log10 K) on which the DEM is defined. + # dem : ndarray, shape (n_T,) + # Best-fit DEM(T) in linear units [cm^-5 K^-1]. + # This is already scaled back by `normalization_factor`. + # chisq : float + # Chi-square of the base fit (sum of squared residuals). + # modeled_intensities : ndarray, shape (n_channels,) + # Best-fit modeled intensities in physical units [DN s^-1 pix^-1]. + # _base_fit_result : lmfit.Result + # The lmfit result object for the base fit (for debugging/inspection). + + # Monte Carlo products + # -------------------- + # mc_dem : ndarray, shape (N+1, n_T) + # DEM curves for base (row 0) and each Monte Carlo run (rows 1..N). + # All in physical units [cm^-5 K^-1]. + # mc_chisq : ndarray, shape (N+1,) + # Chi-square values for base (index 0) and each MC run. + # mc_base_obs : ndarray, shape (N+1, n_channels) + # Observed intensities for base + each MC run [DN s^-1 pix^-1]. + # Row 0 is the original observation; rows 1..N are perturbed. + # mc_mod_obs : ndarray, shape (N+1, n_channels) + # Modeled intensities corresponding to each DEM realization, + # in physical units [DN s^-1 pix^-1]. + + # Notes + # ----- + # * Column/row 0 always holds the BASE (unperturbed) solution. + # Rows 1..N hold the Monte Carlo solutions. + # * Monte Carlo perturbations follow the IDL scheme: + # I_obs' = I_obs + N(0, sigma), with sigma from `intensity_errors`, + # and clipped at 0 (no negative intensities). + # """ + + # # ------------------------------------------------------------------ + # # 0) Validate inputs (IDL: argument checking at top of xrt_dem_iterative2) + # # ------------------------------------------------------------------ + # self.validate_inputs() + + # # ------------------------------------------------------------------ + # # 1) Build temperature grid and response matrix (IDL: regular logT grid + interpol) + # # ------------------------------------------------------------------ + # self.create_logT_grid() + # self._interpolate_responses_to_grid() + + # # Base observed intensities in physical units [DN/s/pix] + # base_obs_phys = self._observed_intensities.astype(float) + + # # ------------------------------------------------------------------ + # # 2) Solve base DEM (no perturbations) + # # This is the analogue of the "base" call to xrt_dem_iter_nowidget. + # # ------------------------------------------------------------------ + # dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( + # observed_intensities_vals=base_obs_phys + # ) + + # #first dem testing + # self.first = dem_base + + # # Store base solution on the instance + # # logT is already stored by create_logT_grid(), but keep alias if desired. + # self.logT_solution = self.logT.copy() + # self.dem = dem_base # DEM(T) in [cm^-5 K^-1] + # self.chisq = chisq_base # sum of squared residuals + # self.modeled_intensities = mod_base # [DN/s/pix] + # self._base_fit_result = base_result # lmfit.Result for debugging + + # # ------------------------------------------------------------------ + # # 3) Allocate Monte Carlo arrays + # # IDL: base_obs, dem_out, chisq, mod_obs grow in the 2nd dimension. + # # Here we store as (N+1, ...) with row 0 = base solution. + # # ------------------------------------------------------------------ + # n_T = self.logT.size + # n_ch = base_obs_phys.size + # N = self.monte_carlo_runs + + # self.mc_dem = np.zeros((N + 1, n_T), dtype=float) + # self.mc_chisq = np.zeros((N + 1,), dtype=float) + # self.mc_base_obs = np.zeros((N + 1, n_ch), dtype=float) + # self.mc_mod_obs = np.zeros((N + 1, n_ch), dtype=float) + + # # Row 0 = base solution (unperturbed) + # self.mc_dem[0, :] = dem_base + # self.mc_chisq[0] = chisq_base + # self.mc_base_obs[0, :] = base_obs_phys + # self.mc_mod_obs[0, :] = mod_base + + # # ------------------------------------------------------------------ + # # 4) Monte Carlo loop (IDL: Section 3.6) + # # ------------------------------------------------------------------ + # if N > 0: + # # Use default_rng() seeded from system entropy (IDL: seed = systime(1)) + # rng = np.random.default_rng() + + # # Base intensity errors in physical units [DN/s/pix] + # sigma_phys = self.intensity_errors.to_value(u.DN / u.s) + + # for ii in range(1, N + 1): + # # Optional lightweight progress indicator (~20 updates max) + # if ii % max(1, N // 20) == 0: + # print(f" - Monte Carlo run {ii}/{N}") + + # # --- 4a) Perturb intensities: I' = I + N(0, sigma), clipped at 0 --- + # noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) + # obs_pert = base_obs_phys + noise + # obs_pert = np.maximum(obs_pert, 0.0) # IDL: > 0.0 to avoid negatives + + # # --- 4b) Solve DEM for this perturbed realization --- + # dem_i, mod_i, chisq_i, _ = self._solve_single_dem( + # observed_intensities_vals=obs_pert + # ) + + # # --- 4c) Store Monte Carlo results --- + # self.mc_dem[ii, :] = dem_i + # self.mc_chisq[ii] = chisq_i + # self.mc_base_obs[ii, :] = obs_pert + # self.mc_mod_obs[ii, :] = mod_i + + # # ------------------------------------------------------------------ + # # 5) Return DEM for convenience (common pattern in Python APIs) + # # ------------------------------------------------------------------ + # return self.dem + def solve(self): """ High-level DEM solver. - Replicates IDL’s xrt_dem_iterative2.pro behavior: - 1. Validate inputs - 2. Prepare logT grid and interpolate responses - 3. Solve ONE base DEM using original intensities - 4. If Monte Carlo requested, perform N perturbed solves - 5. Store all arrays cleanly for plotting and analysis - - After calling solve(), the following attributes exist: - - Base solution: - - self.logT (temperature grid) - - self.dem (DEM(T) in cm^-5 K^-1) - - self.chisq (chi-square of base fit) - - self.modeled_intensities - - Monte Carlo products (N = monte_carlo_runs, N>=0): - - self.mc_dem shape = (N+1, n_T) - - self.mc_chisq shape = (N+1,) - - self.mc_base_obs shape = (N+1, n_filters) - - self.mc_mod_obs shape = (N+1, n_filters) - - Column 0 always holds the BASE solution (unperturbed). - Columns 1..N hold Monte Carlo solutions. + Python analogue of IDL's xrt_dem_iterative2.pro: + + 1. Validate inputs. + 2. Build the logT grid and interpolate temperature responses. + 3. Solve ONE base DEM using the original (unperturbed) intensities. + 4. If Monte Carlo is requested (monte_carlo_runs > 0), perform N + perturbed solves by adding Gaussian noise to the base intensities. + 5. Store all outputs on the instance for later analysis/plotting. + + After calling solve(), the following attributes are defined: + + Base solution + ------------- + logT : ndarray (n_T,) + log10 temperature grid [K]. + dem : ndarray (n_T,) + Best-fit DEM(T) in physical units [cm^-5 K^-1]. + chisq : float + Chi-square of the base fit (sum of squared residuals). + modeled_intensities : ndarray (n_channels,) + Best-fit modeled intensities in [DN s^-1 pix^-1]. + _base_fit_result : lmfit.MinimizerResult + Full lmfit result object for diagnostics. + + Monte Carlo products + -------------------- + mc_dem : ndarray (N+1, n_T) + DEM curves for base (row 0) and each Monte Carlo run (rows 1..N), + in physical units [cm^-5 K^-1]. + mc_chisq : ndarray (N+1,) + Chi-square values for base (index 0) and each MC run. + mc_base_obs : ndarray (N+1, n_channels) + Observed intensities [DN s^-1 pix^-1] for base + each MC run. + Row 0 = original observation; rows 1..N = perturbed. + mc_mod_obs : ndarray (N+1, n_channels) + Modeled intensities [DN s^-1 pix^-1] corresponding to mc_dem. """ - # 0) Validate inputs ------------------------------------------------------- + # -------------------------------------------------------------- + # 0) Validate inputs (IDL: argument checks near top) + # -------------------------------------------------------------- self.validate_inputs() - # 1) Build temperature grid and response matrix -------------------------- + # -------------------------------------------------------------- + # 1) Build logT grid and response matrix + # (IDL: regular logT grid + interpolated emissivities) + # -------------------------------------------------------------- self.create_logT_grid() self._interpolate_responses_to_grid() - # Base observed intensities (physical DN/s/pix) - base_obs_phys = self._observed_intensities.astype(float) + # Base observed intensities in physical units [DN/s/pix] + base_obs_phys = np.asarray(self._observed_intensities, dtype=float) - # 2) Solve base DEM ------------------------------------------------------- + # -------------------------------------------------------------- + # 2) Solve BASE DEM (unperturbed intensities) + # Corresponds to the first call to xrt_dem_iter_nowidget in IDL. + # -------------------------------------------------------------- dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( observed_intensities_vals=base_obs_phys ) # Store base solution - self.logT_solution = self.logT.copy() - self.dem = dem_base - self.chisq = chisq_base - self.modeled_intensities = mod_base - - # 3) Allocate Monte Carlo arrays ------------------------------------------ + self.logT_solution = self.logT.copy() # alias + self.dem = dem_base # [cm^-5 K^-1] + self.chisq = chisq_base # chi-square + self.modeled_intensities = mod_base # [DN/s/pix] + self._base_fit_result = base_result + + # -------------------------------------------------------------- + # 3) Allocate Monte Carlo arrays + # (IDL: base_obs, dem_out, chisq, mod_obs) + # -------------------------------------------------------------- n_T = self.logT.size n_ch = base_obs_phys.size N = self.monte_carlo_runs @@ -1112,39 +1327,113 @@ def solve(self): self.mc_base_obs = np.zeros((N + 1, n_ch), dtype=float) self.mc_mod_obs = np.zeros((N + 1, n_ch), dtype=float) - # Column 0 = base solution + # Row 0 = base solution (unperturbed) self.mc_dem[0, :] = dem_base self.mc_chisq[0] = chisq_base self.mc_base_obs[0, :] = base_obs_phys self.mc_mod_obs[0, :] = mod_base - # 4) Monte Carlo Loop ----------------------------------------------------- + # -------------------------------------------------------------- + # 4) Monte Carlo loop (IDL: Section 3.6) + # -------------------------------------------------------------- if N > 0: - rng = np.random.default_rng() + rng = np.random.default_rng() # like IDL's systime(1) seeding + + # Intensity errors in physical units [DN/s/pix] sigma_phys = self.intensity_errors.to_value(u.DN / u.s) for ii in range(1, N + 1): + # Lightweight progress indicator + if ii % max(1, N // 20) == 0: + print(f" - Monte Carlo run {ii}/{N}") - if ii % max(1, N // 20) == 0: # print 5% updates - print(f" - MC run {ii}/{N}") - - # Perturb intensities (IDL: i_obs + randn * i_err) + # ---- 4a) Perturb intensities: I' = I + N(0, sigma), clipped at 0 noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) obs_pert = base_obs_phys + noise - obs_pert = np.maximum(obs_pert, 0.0) # IDL clips at zero + obs_pert = np.maximum(obs_pert, 0.0) # IDL: >0 to avoid negatives - # Solve DEM for perturbed intensities - dem_i, mod_i, chisq_i, _ = self._solve_single_dem(obs_pert) + # ---- 4b) Solve DEM for this perturbed realization + dem_i, mod_i, chisq_i, _ = self._solve_single_dem( + observed_intensities_vals=obs_pert + ) - # Store results + # ---- 4c) Store Monte Carlo results self.mc_dem[ii, :] = dem_i self.mc_chisq[ii] = chisq_i self.mc_base_obs[ii, :] = obs_pert self.mc_mod_obs[ii, :] = mod_i - # 5) Return DEM for convenience ------------------------------------------ + # -------------------------------------------------------------- + # 5) Return DEM for convenience + # -------------------------------------------------------------- return self.dem + def plot_dem_with_monte_carlo( + self, + mc_color="black", + base_color="red", + alpha_mc=0.15, + lw_mc=1.2, + lw_base=2.0, + figsize=(10, 6), + show_envelope=True, + ): + """ + Plot DEM with Monte Carlo ensemble using step curves. + """ + + import matplotlib.pyplot as plt + + logT = self.logT + mc_dem = self.mc_dem # shape (N+1, n_T) + base_dem = self.dem # shape (n_T,) + N = mc_dem.shape[0] - 1 # number of MC runs + + plt.figure(figsize=figsize) + + # --- MC DEM curves (black) --- + for i in range(1, N + 1): + plt.step( + logT, + np.log10(mc_dem[i, :]), + where="mid", + color=mc_color, + alpha=alpha_mc, + linewidth=lw_mc, + ) + + # --- Optional uncertainty envelope (16–84 percentile) --- + if show_envelope and N > 1: + dem_low = np.percentile(mc_dem[1:, :], 16, axis=0) + dem_high = np.percentile(mc_dem[1:, :], 84, axis=0) + + plt.fill_between( + logT, + np.log10(dem_low), + np.log10(dem_high), + step="mid", + color="black", + alpha=0.2, + label="68% interval", + ) + + # --- Base DEM (red) --- + plt.step( + logT, + np.log10(base_dem), + where="mid", + color=base_color, + linewidth=lw_base, + label="Base DEM", + ) + + plt.xlabel("log₁₀(T) [K]") + plt.ylabel("log₁₀(DEM)") + plt.title(f"DEM Monte Carlo ({N} realizations)") + plt.grid(True) + plt.legend() + plt.show() + # Attach plotting functions from plotting.py to the class XRTDEMIterative.plot_dem_results = dem_plotting.plot_dem_results @@ -1173,121 +1462,6 @@ def solve(self): ################################################################################################################################ -# def fit_with_multiple_methods( -# self, methods=("leastsq", "least_squares", "nelder"), n_knots: int = 6, **kwargs -# ): -# """ -# Try multiple lmfit minimization methods and pick the best χ². - -# Parameters -# ---------- -# methods : tuple of str, optional -# Minimization methods to test. Default = ("leastsq", "least_squares", "nelder"). -# n_knots : int, optional -# Number of spline knots for DEM fit. Default = 6. -# **kwargs : dict -# Extra arguments passed to `lmfit.minimize`. - -# Returns -# ------- -# best_result : lmfit.MinimizerResult -# Result from the method with lowest chi². -# """ - -# if not hasattr(self, "_initial_log_dem"): -# self._estimate_initial_dem() - -# results = {} -# best_chi2 = np.inf -# best_result = None -# best_method = None - -# for method in methods: -# print(f"\n>>> Trying method: {method}") -# params = self._build_lmfit_parameters(n_knots=n_knots) -# result = minimize(self._residuals, params, method=method, **kwargs) - -# # Compute DEM + chi square for this fit -# # SELFNOTEJOY - output currently does not have units. unts=cm^5 * K^-1 Make this a test -# dem = self._reconstruct_dem_from_knots( -# result.params -# ) # SELFNOTEJOY - here is the stamp to defining the DEM - triple check -# dem_mid = 0.5 * (dem[:-1] + dem[1:]) -# R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) -# T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) -# I_fit = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) - -# sigma = self.intensity_errors.to_value(u.DN / u.s) -# residuals = (self._observed_intensities - I_fit) / sigma -# chi2 = np.sum(residuals**2) - -# print(f"x square = {chi2:.3e}") - -# results[method] = (result, chi2) - -# if chi2 < best_chi2: -# best_chi2 = chi2 -# best_result = result -# best_method = method - -# print(f"\n>>> Best method: {best_method} with x square = {best_chi2:.3e}") - -# # Store outputs from the best fit -# best_dem = self._reconstruct_dem_from_knots(best_result.params) -# self.dem = best_dem -# dem_mid = 0.5 * (best_dem[:-1] + best_dem[1:]) -# R_mid = 0.5 * (self._response_matrix[:, :-1] + self._response_matrix[:, 1:]) -# T_mid = 0.5 * (self.T[:-1] + self.T[1:]).to_value(u.K) -# self.fitted_intensities = np.sum(R_mid * dem_mid * T_mid * self.dlnT, axis=1) -# sigma = self.intensity_errors.to_value(u.DN / u.s) -# residuals = (self._observed_intensities - self.fitted_intensities) / sigma -# self.chi2 = np.sum(residuals**2) -# dof = len(self._observed_intensities) - len(best_result.params) -# self.redchi2 = self.chi2 / max(dof, 1) - -# return best_result - -# def run_monte_carlo( -# self, n_runs=None, n_knots=6, method="least_squares", random_seed=None -# ): -# from tqdm import tqdm # add this at top of file - -# if n_runs is None: -# n_runs = self._monte_carlo_runs -# if n_runs <= 0: -# raise ValueError("Monte Carlo runs disabled (n_runs=0).") - -# if random_seed is not None: -# np.random.seed(random_seed) - -# sigma = self.intensity_errors.to_value(u.DN / u.s) -# dem_ensemble = [] - -# self._last_n_knots = n_knots - -# # --- progress bar -# for i in tqdm(range(n_runs), desc="Monte Carlo DEM fits", unit="run"): -# noisy_obs = self._observed_intensities + np.random.normal(0, sigma) -# self._active_observed_intensities = noisy_obs -# try: -# params = self._build_lmfit_parameters(n_knots=n_knots) -# result = minimize(self._residuals, params, method=method) -# finally: -# if hasattr(self, "_active_observed_intensities"): -# delattr(self, "_active_observed_intensities") - -# dem_i = self._reconstruct_dem_from_knots(result.params) -# dem_ensemble.append(dem_i) - -# dem_ensemble = np.array(dem_ensemble) -# self._dem_ensemble = dem_ensemble -# self.dem_uncertainty = np.std(dem_ensemble, axis=0) -# self.dem_median = np.median(dem_ensemble, axis=0) - -# return dem_ensemble - -# # ------------------------------------------------------------------------------------------------------------------------------- - # ################################################################################################################################ # ################################################################################################################################ # #############************************** Start of error Summary **************************########################## From 27c23d2c106ae4d834aa7b8d3348d78e0cf8e51e Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 21 Nov 2025 19:48:03 -0500 Subject: [PATCH 094/121] Cleaning up plotting to just two. --- xrtpy/xrt_dem_iterative/dem_plotting.py | 389 +++++++----------------- 1 file changed, 117 insertions(+), 272 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_plotting.py b/xrtpy/xrt_dem_iterative/dem_plotting.py index 8ef58a8d2..1747c6bda 100644 --- a/xrtpy/xrt_dem_iterative/dem_plotting.py +++ b/xrtpy/xrt_dem_iterative/dem_plotting.py @@ -1,315 +1,160 @@ __all__ = [ - "plot_dem_results", - "plot_dem_uncertainty", - "plot_idl_style", - "plot_fit_residuals", - "plot_dem_with_median_bins", - "plot_iteration_stats", + "plot_dem", + "plot_dem_mc", + "plot_observed_vs_modeled", ] +import astropy.units as u import matplotlib.pyplot as plt import numpy as np -import astropy.units as u -############################################################################### -# Plotting function 1 -############################################################################### -def plot_dem_results(dem): +def plot_dem(solver): """ - Plot the fitted DEM solution (with optional Monte Carlo uncertainty). + Plot the base DEM solution (no Monte Carlo needed). Parameters ---------- - dem : XRTDEMIterative - Solver object. If not yet solved, .solve() will be called. + solver : XRTDEMIterative + A solved DEM object (xrtpy.xrt_dem_iterative.XRTDEMIterative). + If .dem does not exist yet, solve() will be called. """ - if not hasattr(dem, "dem"): - dem.solve() + # Ensure base DEM is available + if not hasattr(solver, "dem"): + solver.solve() - logT = dem.logT - best_fit = dem.dem - dem_err = getattr(dem, "dem_uncertainty", None) + logT = solver.logT + dem = np.asarray(solver.dem, dtype=float) - fig, ax = plt.subplots(figsize=(8, 6)) + # Avoid log10(0) + dem_safe = np.clip(dem, 1e-40, None) - ax.step( + plt.figure(figsize=(8, 5)) + plt.step( logT, - np.log10(best_fit + 1e-40), + np.log10(dem_safe), where="mid", - color="blue", - linewidth=2, - label="Best-fit DEM", + color="black", + linewidth=2.0, + label="Base DEM", ) - if dem_err is not None: - upper = np.log10(best_fit + dem_err + 1e-40) - lower = np.log10(np.clip(best_fit - dem_err, 1e-40, None)) - ax.fill_between( - logT, lower, upper, step="mid", color="blue", alpha=0.2, label="+/-1σ" - ) + plt.xlabel(r"log$_{10} T$ [K]") + plt.ylabel(r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]") + plt.title("Base DEM Solution") + plt.grid(True, alpha=0.3) + plt.xlim(logT.min(), logT.max()) - ax.set_xlabel("log10 T [K]") - ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") - ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim( - np.floor(np.log10(best_fit.min() + 1e-40)), - np.ceil(np.log10(best_fit.max() + 1e-40)), - ) - ax.set_title("DEM Solution") - ax.legend() - ax.grid(alpha=0.3) + # Nice y-limits based on finite values + finite = np.isfinite(np.log10(dem_safe)) + if np.any(finite): + y = np.log10(dem_safe[finite]) + pad = 0.3 * (y.max() - y.min() + 1e-6) + plt.ylim(y.min() - pad, y.max() + pad) + plt.legend() plt.tight_layout() plt.show() -############################################################################### -# Plotting function 2 -############################################################################### -def plot_dem_uncertainty(dem): - """ - Plot DEM with Monte Carlo uncertainty band. +def plot_dem_mc( + solver, + mc_color="gray", + base_color="red", + alpha_mc=0.2, + lw_mc=1.0, + lw_base=2.0, + show_envelope=True, +): """ - if not hasattr(dem, "dem"): - dem.solve() - if not hasattr(dem, "dem_uncertainty"): - raise AttributeError("No DEM uncertainty found. Run with monte_carlo_runs > 0.") - - logT = dem.logT - best_fit = dem.dem - dem_err = dem.dem_uncertainty - - fig, ax = plt.subplots(figsize=(8, 6)) - - ax.step( - logT, - np.log10(best_fit + 1e-40), - where="mid", - color="blue", - linewidth=2, - label="Best-fit DEM", - ) - - upper = np.log10(best_fit + dem_err + 1e-40) - lower = np.log10(np.clip(best_fit - dem_err, 1e-40, None)) - ax.fill_between( - logT, lower, upper, step="mid", color="blue", alpha=0.2, label="+/-1σ" - ) - - ax.set_xlabel("log10 T [K]") - ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") - ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim( - np.floor(np.log10(best_fit.min() + 1e-40)), - np.ceil(np.log10((best_fit + dem_err).max() + 1e-40)), - ) - ax.set_title("DEM with Monte Carlo Uncertainty") - ax.legend() - ax.grid(alpha=0.3) - - plt.tight_layout() - plt.show() + Plot DEM with Monte Carlo ensemble. + - Base DEM in red. + - Each Monte Carlo realization as a thin gray step curve. + - Optional 68% envelope (16–84 percentile) if MC is available. -############################################################################### -# Plotting function 3 -############################################################################### -def plot_idl_style(dem): + If no Monte Carlo results are present, this gracefully falls back + to plotting only the base DEM. """ - Faithful mirror of IDL's xrt_dem_iterative2.pro plotting style. - - - Black dotted lines -> Monte Carlo DEMs (if available) - - Green line -> Best-fit DEM - """ - if not hasattr(dem, "dem"): - dem.solve() - - logT = dem.logT - best_fit = dem.dem - - fig, ax = plt.subplots(figsize=(8, 6)) - - if hasattr(dem, "_dem_ensemble"): - mc_dems = np.array(dem._dem_ensemble) - for i in range(mc_dems.shape[0]): - ax.step( + # Ensure base DEM is available + if not hasattr(solver, "dem"): + solver.solve() + + logT = solver.logT + base_dem = np.asarray(solver.dem, dtype=float) + base_dem_safe = np.clip(base_dem, 1e-40, None) + + has_mc = hasattr(solver, "mc_dem") and solver.mc_dem is not None + + if has_mc: + mc_dem = np.asarray(solver.mc_dem, dtype=float) # shape (N+1, n_T) + # If someone set monte_carlo_runs=0, mc_dem may be (1, n_T) + N = max(0, mc_dem.shape[0] - 1) + else: + mc_dem = None + N = 0 + + plt.figure(figsize=(9, 6)) + + # --- Monte Carlo curves (index 1..N) --- + if has_mc and N > 0: + for i in range(1, N + 1): + dem_i = np.clip(mc_dem[i, :], 1e-40, None) + plt.step( logT, - np.log10(mc_dems[i] + 1e-40), + np.log10(dem_i), where="mid", - linestyle=":", - color="black", - alpha=0.3, - linewidth=0.6, + color=mc_color, + alpha=alpha_mc, + linewidth=lw_mc, ) - ax.step( - logT, - np.log10(best_fit + 1e-40), - where="mid", - color="green", - linewidth=2, - label="Best-fit DEM", - ) - - ax.set_xlabel("log10 T [K]") - ax.set_ylabel("log10 DEM [cm$^{-5}$ K$^{-1}$]") - ax.set_xlim(logT.min(), logT.max()) - ax.set_ylim( - np.floor(np.log10(best_fit.min() + 1e-40)), - np.ceil(np.log10(best_fit.max() + 1e-40)), - ) - ax.set_title("DEM (IDL Style)") - ax.legend() - ax.grid(alpha=0.3) - - plt.tight_layout() - plt.show() - - -############################################################################### -# Plotting function 4 -############################################################################### -def plot_fit_residuals(dem): - """ - Plot observed vs fitted intensities and residuals. - """ - if not hasattr(dem, "dem"): - dem.solve() - - if not hasattr(dem, "fitted_intensities"): - raise AttributeError( - "No fitted intensities found. Run fit_dem() or solve() first." - ) - - obs = dem._observed_intensities - fit = dem.fitted_intensities - sigma = dem.intensity_errors.to_value(u.DN / u.s) - - filters = dem.filter_names - indices = np.arange(len(obs)) - - plt.figure(figsize=(7, 5)) - plt.errorbar(indices, obs, yerr=sigma, fmt="o", label="Observed", color="black") - plt.plot(indices, fit, "s", label="Fitted", color="red") - plt.xticks(indices, filters, rotation=45) - plt.ylabel("Intensity [DN/s/pix]") - plt.title("Observed vs Fitted Intensities") - plt.legend() - plt.tight_layout() - plt.show() - - residuals = (obs - fit) / sigma - plt.figure(figsize=(7, 4)) - plt.axhline(0, color="gray", linestyle="--") - plt.plot(indices, residuals, "o", color="blue") - plt.xticks(indices, filters, rotation=45) - plt.ylabel("(Obs - Fit) / σ") - plt.title("Residuals per Filter") - plt.tight_layout() - plt.show() - - -############################################################################### -# Plotting function 5 -############################################################################### -def plot_dem_with_median_bins(dem): - """ - Reproduce IDL-style DEM plot with: - - Best-fit DEM (green) - - Monte Carlo DEMs (dotted black) - - Median DEM (blue) - - Closest DEM to the median (orange) - """ - if not hasattr(dem, "dem"): - dem.solve() - if not hasattr(dem, "_dem_ensemble"): - raise AttributeError( - "Monte Carlo ensemble not available. Run with monte_carlo_runs > 0." - ) + # Optional envelope (16–84 percentile over MC curves only) + if show_envelope and N > 1: + dem_low = np.percentile(mc_dem[1:, :], 16, axis=0) + dem_high = np.percentile(mc_dem[1:, :], 84, axis=0) - logT = dem.logT - mc_dems = np.array(dem._dem_ensemble) - best_fit = dem.dem + dem_low = np.clip(dem_low, 1e-40, None) + dem_high = np.clip(dem_high, 1e-40, None) - med = np.median(mc_dems, axis=0) - diffs = np.linalg.norm(mc_dems - med, axis=1) - closest_idx = np.argmin(diffs) - closest_dem = mc_dems[closest_idx] - - fig, ax = plt.subplots(figsize=(9, 6)) - - for i in range(mc_dems.shape[0]): - ax.step( - logT, - np.log10(mc_dems[i] + 1e-40), - where="mid", - linestyle=":", - color="black", - alpha=0.3, - linewidth=0.6, - ) + plt.fill_between( + logT, + np.log10(dem_low), + np.log10(dem_high), + step="mid", + color=mc_color, + alpha=0.2, + label="68% interval (MC)", + ) - ax.step( + # --- Base DEM --- + plt.step( logT, - np.log10(best_fit + 1e-40), + np.log10(base_dem_safe), where="mid", - color="green", - linewidth=2, - label="Obs DEM", + color=base_color, + linewidth=lw_base, + label="Base DEM", ) - ax.step( - logT, - np.log10(med + 1e-40), - where="mid", - color="blue", - linewidth=1.8, - label="Median in bins", - ) - ax.step( - logT, - np.log10(closest_dem + 1e-40), - where="mid", - color="orange", - linewidth=1.8, - label="Closest DEM to median", - ) - - ax.set_xlim(dem.min_T, dem.max_T) - ax.set_ylim(0, 30) - ax.set_xlabel("Log T (K)") - ax.set_ylabel("Log DEM [cm$^{-5}$ K$^{-1}$]") - ax.legend() - ax.grid(alpha=0.3) - ax.set_title("DEM with Monte Carlo Spread, Median, and Closest Fit (IDL Style)") - plt.tight_layout() - plt.show() - - -############################################################################### -# Plotting function 6 -############################################################################### -def plot_iteration_stats(dem): - """ - Plot x^2 convergence across solver iterations. - """ - if not hasattr(dem, "_iteration_chi2") or len(dem._iteration_chi2) == 0: - raise AttributeError( - "No iteration stats found. Run fit_dem() or solve() first." - ) - - chi2_vals = np.array(dem._iteration_chi2) - - fig, ax = plt.subplots(figsize=(8, 5)) - ax.plot(range(len(chi2_vals)), chi2_vals, lw=1.5) - ax.set_xlabel("Iteration") - ax.set_ylabel("Chi²") - ax.set_title("Chi² Convergence") - ax.grid(alpha=0.3) - - if chi2_vals.max() / max(chi2_vals.min(), 1e-10) > 1e4: - ax.set_yscale("log") + plt.xlabel(r"log$_{10} T$ [K]") + plt.ylabel(r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]") + title_suffix = f" ({N} MC realizations)" if has_mc and N > 0 else "" + plt.title("DEM with Monte Carlo" + title_suffix) + plt.grid(True, alpha=0.3) + plt.xlim(logT.min(), logT.max()) + + # y-limits based on base DEM and MC if available + logs = [np.log10(base_dem_safe)] + if has_mc and N > 0: + logs.append(np.log10(np.clip(mc_dem[1:, :], 1e-40, None)).ravel()) + logs_all = np.concatenate(logs) + finite = np.isfinite(logs_all) + if np.any(finite): + y = logs_all[finite] + pad = 0.3 * (y.max() - y.min() + 1e-6) + plt.ylim(y.min() - pad, y.max() + pad) + plt.legend() plt.tight_layout() plt.show() From 9ca92edd2b1e3cc8f3b18e54b2dc54fa665bf776 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 21 Nov 2025 19:52:43 -0500 Subject: [PATCH 095/121] Clean up code and added summary function --- xrtpy/xrt_dem_iterative/dem_solver.py | 505 +++++++------------------- 1 file changed, 130 insertions(+), 375 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 191afd505..78b14a893 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -628,26 +628,8 @@ def _prepare_scaled_observations(self): ############################ Everything line of code ABOVE is PREP for the DEM ############################################# - ################################################################################################################################ - ################################################################################################################################ - #################################### structure with all fields the DEM solver expects ########################################## - # 1 Temperature - self.logT , self.T - # 2 Response - note - interpolated onto your logT grid. - self.interpolated_responses, self._response_matrix - # 3 # of bins - n_bins - # 4 i_obs – self._observed_intensites - measured DN/s/pixel scaled by solv_factor - # self.observed_intensities - # 5 uncertainty on the intensity - Also scaled by solv_factor. - self.intensity_errors self.normalization_factor - # 6 units? - - ################################################################################################################################ - ################################################################################################################################ - ################################################################################################################################ - # **************************************************************************************************************************** ############################ Everything line of code BELOW is FOR the DEM ################################################## - # **************************************************************************************************************************** - - # ------------------------------------------------------------------------------------------------------------------------------- #############************************** Start of INITIAL ROUGH DEM ESTIMATE **************************########################## ################## An estimated EM shape based on simple intensity-over-response peaks, smoothed across T. ##################### @@ -917,64 +899,6 @@ def _residuals(self, params): return residuals - # def _solve_single_dem(self, observed_intensities_vals: np.ndarray): - # """ - # Solve the DEM once for a given set of observed intensities (base or MC-perturbed). - - # Parameters - # ---------- - # observed_intensities_vals : ndarray - # 1D array of observed intensities in DN/s/pix (no units attached). - - # Returns - # ------- - # dem_phys : ndarray - # DEM(T) in physical units [cm^-5 K^-1] on self.logT. - # modeled_intensities_phys : ndarray - # Modeled intensities in DN/s/pix for each channel. - # chisq : float - # Sum of squared residuals for this run. - # fit_result : lmfit.MinimizerResult - # Full lmfit result object (for diagnostics). - # """ - # # 1. Set scaled observations for this run (IDL: i_obs = i_obs/solv_factor) - # nf = self._normalization_factor - # self.intensities_scaled = observed_intensities_vals / nf - # # Errors are the same for all runs; scale once - # sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - # self.sigma_scaled_intensity_errors = sigma_phys / nf - - # # 2. If all intensities are zero → nosolve, DEM = 0 - # if np.all(self.intensities_scaled == 0.0): - # dem_scaled = np.zeros_like(self.logT, dtype=float) - # dem_phys = dem_scaled * nf - # modeled_intensities_phys = np.zeros_like(observed_intensities_vals) - # chisq = 0.0 - # fit_result = None - # return dem_phys, modeled_intensities_phys, chisq, fit_result - - # # 3. Initial DEM & spline system (IDL: xrt_dem_iter_estim + mp_prep) - # self._estimate_initial_dem() - # self._prepare_spline_system() - # params = self._build_lmfit_parameters() - - # # 4. Run the least-squares solver (IDL: xrt_dem_iter_solver + mpfit) - # result = minimize(self._residuals, params, max_nfev=self._max_iterations) - - # # 5. Reconstruct DEM in *scaled* units, then convert to physical - # dem_scaled = self._reconstruct_dem_from_knots(result.params) # cm^-5 K^-1 / nf - # dem_phys = dem_scaled * nf # undo normalization, like IDL - - # # 6. Modeled intensities (IDL: i_mod = dem ## pm * abunds) - # i_mod_scaled = (self.pm_matrix @ dem_scaled) * self.abundances - # modeled_intensities_phys = i_mod_scaled * nf # back to DN/s/pix - - # # 7. χ² from residuals - # resid = self._residuals(result.params) - # chisq = float(np.sum(resid**2)) - - # return dem_phys, modeled_intensities_phys, chisq, result - def _solve_single_dem(self, observed_intensities_vals: np.ndarray): nf = self._normalization_factor @@ -1101,147 +1025,6 @@ def _run_monte_carlo(self, result_params): #############************************** Start of DEM SOLVER **************************########################## - # def solve(self): - # """ - # High-level DEM solver. - - # Pythonic analogue of IDL's xrt_dem_iterative2.pro: - - # 1. Validate inputs. - # 2. Build the logarithmic temperature grid and interpolate responses. - # 3. Solve ONE base DEM using the original (unperturbed) intensities. - # 4. If Monte Carlo is requested (monte_carlo_runs > 0), perform N - # perturbed solves by adding Gaussian noise to the base intensities. - # 5. Store all outputs on the instance for later analysis/plotting. - - # After calling solve(), the following attributes are defined: - - # Base solution - # ------------- - # logT : ndarray, shape (n_T,) - # Temperature grid (log10 K) on which the DEM is defined. - # dem : ndarray, shape (n_T,) - # Best-fit DEM(T) in linear units [cm^-5 K^-1]. - # This is already scaled back by `normalization_factor`. - # chisq : float - # Chi-square of the base fit (sum of squared residuals). - # modeled_intensities : ndarray, shape (n_channels,) - # Best-fit modeled intensities in physical units [DN s^-1 pix^-1]. - # _base_fit_result : lmfit.Result - # The lmfit result object for the base fit (for debugging/inspection). - - # Monte Carlo products - # -------------------- - # mc_dem : ndarray, shape (N+1, n_T) - # DEM curves for base (row 0) and each Monte Carlo run (rows 1..N). - # All in physical units [cm^-5 K^-1]. - # mc_chisq : ndarray, shape (N+1,) - # Chi-square values for base (index 0) and each MC run. - # mc_base_obs : ndarray, shape (N+1, n_channels) - # Observed intensities for base + each MC run [DN s^-1 pix^-1]. - # Row 0 is the original observation; rows 1..N are perturbed. - # mc_mod_obs : ndarray, shape (N+1, n_channels) - # Modeled intensities corresponding to each DEM realization, - # in physical units [DN s^-1 pix^-1]. - - # Notes - # ----- - # * Column/row 0 always holds the BASE (unperturbed) solution. - # Rows 1..N hold the Monte Carlo solutions. - # * Monte Carlo perturbations follow the IDL scheme: - # I_obs' = I_obs + N(0, sigma), with sigma from `intensity_errors`, - # and clipped at 0 (no negative intensities). - # """ - - # # ------------------------------------------------------------------ - # # 0) Validate inputs (IDL: argument checking at top of xrt_dem_iterative2) - # # ------------------------------------------------------------------ - # self.validate_inputs() - - # # ------------------------------------------------------------------ - # # 1) Build temperature grid and response matrix (IDL: regular logT grid + interpol) - # # ------------------------------------------------------------------ - # self.create_logT_grid() - # self._interpolate_responses_to_grid() - - # # Base observed intensities in physical units [DN/s/pix] - # base_obs_phys = self._observed_intensities.astype(float) - - # # ------------------------------------------------------------------ - # # 2) Solve base DEM (no perturbations) - # # This is the analogue of the "base" call to xrt_dem_iter_nowidget. - # # ------------------------------------------------------------------ - # dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( - # observed_intensities_vals=base_obs_phys - # ) - - # #first dem testing - # self.first = dem_base - - # # Store base solution on the instance - # # logT is already stored by create_logT_grid(), but keep alias if desired. - # self.logT_solution = self.logT.copy() - # self.dem = dem_base # DEM(T) in [cm^-5 K^-1] - # self.chisq = chisq_base # sum of squared residuals - # self.modeled_intensities = mod_base # [DN/s/pix] - # self._base_fit_result = base_result # lmfit.Result for debugging - - # # ------------------------------------------------------------------ - # # 3) Allocate Monte Carlo arrays - # # IDL: base_obs, dem_out, chisq, mod_obs grow in the 2nd dimension. - # # Here we store as (N+1, ...) with row 0 = base solution. - # # ------------------------------------------------------------------ - # n_T = self.logT.size - # n_ch = base_obs_phys.size - # N = self.monte_carlo_runs - - # self.mc_dem = np.zeros((N + 1, n_T), dtype=float) - # self.mc_chisq = np.zeros((N + 1,), dtype=float) - # self.mc_base_obs = np.zeros((N + 1, n_ch), dtype=float) - # self.mc_mod_obs = np.zeros((N + 1, n_ch), dtype=float) - - # # Row 0 = base solution (unperturbed) - # self.mc_dem[0, :] = dem_base - # self.mc_chisq[0] = chisq_base - # self.mc_base_obs[0, :] = base_obs_phys - # self.mc_mod_obs[0, :] = mod_base - - # # ------------------------------------------------------------------ - # # 4) Monte Carlo loop (IDL: Section 3.6) - # # ------------------------------------------------------------------ - # if N > 0: - # # Use default_rng() seeded from system entropy (IDL: seed = systime(1)) - # rng = np.random.default_rng() - - # # Base intensity errors in physical units [DN/s/pix] - # sigma_phys = self.intensity_errors.to_value(u.DN / u.s) - - # for ii in range(1, N + 1): - # # Optional lightweight progress indicator (~20 updates max) - # if ii % max(1, N // 20) == 0: - # print(f" - Monte Carlo run {ii}/{N}") - - # # --- 4a) Perturb intensities: I' = I + N(0, sigma), clipped at 0 --- - # noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) - # obs_pert = base_obs_phys + noise - # obs_pert = np.maximum(obs_pert, 0.0) # IDL: > 0.0 to avoid negatives - - # # --- 4b) Solve DEM for this perturbed realization --- - # dem_i, mod_i, chisq_i, _ = self._solve_single_dem( - # observed_intensities_vals=obs_pert - # ) - - # # --- 4c) Store Monte Carlo results --- - # self.mc_dem[ii, :] = dem_i - # self.mc_chisq[ii] = chisq_i - # self.mc_base_obs[ii, :] = obs_pert - # self.mc_mod_obs[ii, :] = mod_i - - # # ------------------------------------------------------------------ - # # 5) Return DEM for convenience (common pattern in Python APIs) - # # ------------------------------------------------------------------ - # return self.dem - def solve(self): """ High-level DEM solver. @@ -1284,25 +1067,20 @@ def solve(self): Modeled intensities [DN s^-1 pix^-1] corresponding to mc_dem. """ - # -------------------------------------------------------------- - # 0) Validate inputs (IDL: argument checks near top) - # -------------------------------------------------------------- + # Validate inputs (IDL: argument checks near top) + self.validate_inputs() - # -------------------------------------------------------------- - # 1) Build logT grid and response matrix - # (IDL: regular logT grid + interpolated emissivities) - # -------------------------------------------------------------- + # 1) Build logT grid and response matrix - IDL: regular logT grid + interpolated emissivities + self.create_logT_grid() self._interpolate_responses_to_grid() # Base observed intensities in physical units [DN/s/pix] base_obs_phys = np.asarray(self._observed_intensities, dtype=float) - # -------------------------------------------------------------- - # 2) Solve BASE DEM (unperturbed intensities) - # Corresponds to the first call to xrt_dem_iter_nowidget in IDL. - # -------------------------------------------------------------- + # 2) Solve BASE DEM (unperturbed intensities) Corresponds to the first call to xrt_dem_iter_nowidget in IDL. + dem_base, mod_base, chisq_base, base_result = self._solve_single_dem( observed_intensities_vals=base_obs_phys ) @@ -1314,10 +1092,8 @@ def solve(self): self.modeled_intensities = mod_base # [DN/s/pix] self._base_fit_result = base_result - # -------------------------------------------------------------- - # 3) Allocate Monte Carlo arrays - # (IDL: base_obs, dem_out, chisq, mod_obs) - # -------------------------------------------------------------- + # 3) Allocate Monte Carlo arrays (IDL: base_obs, dem_out, chisq, mod_obs) + n_T = self.logT.size n_ch = base_obs_phys.size N = self.monte_carlo_runs @@ -1333,9 +1109,8 @@ def solve(self): self.mc_base_obs[0, :] = base_obs_phys self.mc_mod_obs[0, :] = mod_base - # -------------------------------------------------------------- - # 4) Monte Carlo loop (IDL: Section 3.6) - # -------------------------------------------------------------- + # 4) Monte Carlo loop + if N > 0: rng = np.random.default_rng() # like IDL's systime(1) seeding @@ -1427,150 +1202,130 @@ def plot_dem_with_monte_carlo( label="Base DEM", ) - plt.xlabel("log₁₀(T) [K]") - plt.ylabel("log₁₀(DEM)") + plt.xlabel("log10(T) [K]") + plt.ylabel("log10(DEM)") plt.title(f"DEM Monte Carlo ({N} realizations)") plt.grid(True) plt.legend() plt.show() + def summary(self): + """ + Print a complete diagnostic summary of the DEM solver state, + including input parameters, solver configuration, response matrix, + base DEM fit, Monte Carlo ensemble, and available plotting helpers. + + Designed to mimic the detailed transparent reporting style + of IDL’s xrt_dem_iterative2.pro, but more informative. + """ + import numpy as np + + print("\n" + "=" * 70) + print("XRTpy DEM Iterative — Solver Summary") + print("=" * 70) + + # ----------------------------------------------------- + print("\nINPUTS") + print("-" * 70) + print(f" Filters: {self.filter_names}") + print( + f" Observed Intensities: {np.array(self._observed_intensities)} DN/s/pix" + ) + print(f" Number of channels: {len(self._observed_intensities)}") + + # Error model + if self._intensity_errors is not None: + print(f" Intensity Errors: User-provided ({self._intensity_errors})") + else: + print(f" Intensity Errors: Auto-estimated (0.03*I, min=2 DN/s)") + print( + f" Error values: {self.intensity_errors.to_value('DN/s')} DN/s" + ) + + # ----------------------------------------------------- + print("\nTEMPERATURE GRID") + print("-" * 70) + if hasattr(self, "logT"): + print(f" logT range: {self.logT[0]:.2f} – {self.logT[-1]:.2f}") + print(f" Number of bins: {len(self.logT)}") + print(f" ΔlogT: {self.dlogT:.3f}") + print(f" ΔlnT: {self.dlnT:.3f}") + else: + print(" Grid has not been constructed yet (call solve()).") + + # ----------------------------------------------------- + print("\nRESPONSE MATRIX") + print("-" * 70) + if hasattr(self, "_response_matrix"): + print( + f" Response matrix shape: {self._response_matrix.shape} (filters × T bins)" + ) + print(f" Response unit: {self._response_unit}") + else: + print(" Response matrix not constructed yet.") + + # ----------------------------------------------------- + print("\nSOLVER CONFIGURATION") + print("-" * 70) + print(f" Normalization factor: {self.normalization_factor:.2e}") + print(f" Max iterations: {self.max_iterations}") + print(f" Monte Carlo runs: {self.monte_carlo_runs}") + if hasattr(self, "n_spl"): + print(f" Spline knots: {self.n_spl}") + else: + print(" Spline knots: (not prepared yet)") + + # ----------------------------------------------------- + print("\nINITIAL DEM GUESS") + print("-" * 70) + if hasattr(self, "_initial_log_dem"): + print(" Initial DEM (log10): flat initial estimate") + print(f" First 5 values: {self._initial_log_dem[:5]}") + else: + print(" Initial DEM has not been estimated yet.") + + # ----------------------------------------------------- + + print("\nBASE DEM SOLUTION") + print("-" * 70) + if hasattr(self, "dem"): + print(f" DEM shape: {self.dem.shape}") + print(f" First 5 DEM bins: {self.dem[:5]}") + print(f" log10(DEM) first 5: {np.log10(self.dem[:5] + 1e-99)}") + print(f" Chi-square: {self.chisq:.4e}") + print(f" Modeled intensities: {self.modeled_intensities}") + else: + print(" No DEM solution computed yet (call solve()).") + + # ----------------------------------------------------- + + print("\nMONTE CARLO ENSEMBLE") + print("-" * 70) + if hasattr(self, "mc_dem"): + N = self.mc_dem.shape[0] - 1 + print(f" MC realizations: {N}") + if N > 0: + median = np.median(self.mc_dem[1:], axis=0) + p16, p84 = np.percentile(self.mc_dem[1:], [16, 84], axis=0) + + print(f" MC DEM preview:") + print(f" Median (first 5): {median[:5]}") + print( + f" 1σ bounds (log10): " + f"{np.log10(p16[0] + 1e-99):.2f} – {np.log10(p84[0] + 1e-99):.2f}" + ) + else: + print(" MC array allocated but N=0 (no Monte Carlo).") + else: + print(" No Monte Carlo results available.") + + # ----------------------------------------------------- + print("\nPLOTTING OPTIONS") + print("-" * 70) + print(" • plot_dem() Base DEM only") + print(" • plot_dem_mc() Best-fit + MC curves") + print("\n" + "=" * 70 + "\n") + -# Attach plotting functions from plotting.py to the class -XRTDEMIterative.plot_dem_results = dem_plotting.plot_dem_results -# XRTDEMIterative.plot_dem_uncertainty = dem_plotting.plot_dem_uncertainty -XRTDEMIterative.plot_idl_style = dem_plotting.plot_idl_style -XRTDEMIterative.plot_fit_residuals = dem_plotting.plot_fit_residuals -XRTDEMIterative.plot_dem_with_median_bins = dem_plotting.plot_dem_with_median_bins -XRTDEMIterative.plot_iteration_stats = dem_plotting.plot_iteration_stats XRTDEMIterative.plot_dem = dem_plotting.plot_dem -XRTDEMIterative.plot_dem_uncertainty = dem_plotting.plot_dem_uncertainty -XRTDEMIterative.plot_mc_fan = dem_plotting.plot_mc_fan -XRTDEMIterative.plot_mc_hist_at_temperature = dem_plotting.plot_mc_hist_at_temperature -XRTDEMIterative.plot_mc_chisq = dem_plotting.plot_mc_chisq -XRTDEMIterative.plot_modeled_vs_observed = dem_plotting.plot_modeled_vs_observed - -XRTDEMIterative.plot_dem_with_mc_vertical_bars = ( - dem_plotting.plot_dem_with_mc_vertical_bars -) -XRTDEMIterative.plot_dem_mc_vertical_bars = dem_plotting.plot_dem_mc_vertical_bars - - -################################################################################################################################ -################################################################################################################################ -#############************************** END of DEM SOLVER **************************########################## -################################################################################################################################ -################################################################################################################################ - - -# ################################################################################################################################ -# ################################################################################################################################ -# #############************************** Start of error Summary **************************########################## -# ################## ##################### - -# def summary(self): -# """ -# Print a comprehensive summary of the DEM solver setup, -# including inputs, solver configuration, fit results, -# Monte Carlo ensemble status, and available plotting helpers. -# """ -# print("\nXRTpy DEM Iterative Setup Summary\n") -# print("=" * 65) - -# # Filters & Observations -# print(f" Filters: {self.filter_names}") -# print(f" Observed Intensities: {self.observed_intensities}") -# print(f" Number of channels: {len(self._observed_intensities)}") - -# # Errors -# print(f" Intensity Errors: {self.intensity_errors}") -# if self._intensity_errors is not None: -# print(" Error model used: User-provided") -# else: -# print( -# f" Error model used: Auto-estimated " -# f"(obs * 0.03, min={self.min_observational_error.value} DN/s)" -# ) -# print(" [IDL reference: xrt_dem_iterative2.pro]") - -# # Temperature grid -# print( -# f" Temperature grid: logT {self.minimum_bound_temperature:.2f}–{self.maximum_bound_temperature:.2f}, step {self.logarithmic_temperature_step_size}" -# ) -# print(f" Temp bins: {len(self.logT)}") -# print(f" dlogT: {self.dlogT:.3f}, dlnT: {self.dlnT:.3f}") - -# # Solver setup -# print(f" Solver factor: {self.normalization_factor:.1e}") -# print(f" Monte Carlo runs: {self.monte_carlo_runs or 'None'}") -# print(f" Max Iterations: {self.max_iterations}") -# print(f" Knots (n_knots): {getattr(self, '_last_n_knots', 'default=6')}") - -# if hasattr(self, "chi2"): -# dof = len(self._observed_intensities) - len( -# getattr(self, "_init_knot_params", []) -# ) -# print(f" χ²: {self.chi2:.4e} (dof={dof})") - -# # Responses -# print(f" Response unit: {self._response_unit}") -# if hasattr(self, "_response_matrix"): -# print( -# f" Response matrix: {self._response_matrix.shape} (filters × bins)" -# ) -# else: -# print(" Response matrix: Not yet built") - -# # Fit results -# if hasattr(self, "dem"): -# print("\n Fit Results:") -# print(f" DEM bins: {self.dem.shape}") -# if hasattr(self, "chi2"): -# print(f" Chi²: {self.chi2:.4e}") -# if hasattr(self, "redchi2"): -# print(f" Reduced Chi²: {self.redchi2:.4e}") -# if hasattr(self, "fitted_intensities"): -# print(f" Fitted Intensities: {self.fitted_intensities}") - -# # Monte Carlo results -# if hasattr(self, "_dem_ensemble"): -# print("\n Monte Carlo Ensemble:") -# n_mc = len(self._dem_ensemble) -# print(f" Runs stored: {n_mc}") -# dem_stack = np.array(self._dem_ensemble) -# med = np.median(dem_stack, axis=0) -# spread = np.percentile(dem_stack, [16, 84], axis=0) -# print(" DEM median (log10 cm^-5 K^-1):") -# print(f" First 5 bins: {np.log10(med[:5]+1e-40)}") -# print(" DEM 1σ spread (first bin):") -# print( -# f" {np.log10(spread[0,0]+1e-40):.2f} – {np.log10(spread[1,0]+1e-40):.2f}" -# ) -# print(" Reproducibility: Run with random_seed for identical results") - -# if hasattr(self, "chi2"): -# print(f" Chi²: {self.chi2:.4e}") -# if hasattr(self, "redchi2"): -# print(f" Reduced Chi²: {self.redchi2:.4e}") -# if hasattr(self, "dof"): -# print(f" Degrees of Freedom: {self.dof}") -# if hasattr(self, "_iter_stats") and len(self._iter_stats["chisq"]) > 0: -# print(f" Iterations tracked: {len(self._iter_stats['chisq'])}") -# print(f" Final Iter χ²: {self._iter_stats['chisq'][-1]:.4e}") - -# # Plotting guidance -# print("\n Plotting Options:") -# if hasattr(self, "dem"): -# print(" • plot_dem_results(results) → Quick plot from solve() dictionary") -# print( -# " • plot_dem_uncertainty() → Best-fit DEM + shaded ±1σ (if MC available)" -# ) -# print( -# " • plot_idl_style() → IDL-style view (best-fit + MC curves)" -# ) -# print( -# " • plot_dem_with_median_bins() → Median + closest DEM (IDL style extension)" -# ) -# print(" • plot_fit_residuals() → Observed vs fitted intensities") -# print(" • plot_iteration_stats() ") - -# print("=" * 65) +XRTDEMIterative.plot_dem_mc = dem_plotting.plot_dem_mc From ccdf1128976a8e15ce767e798d57f065c1be2f33 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 13:36:01 -0500 Subject: [PATCH 096/121] Cleaned up plotting functions --- xrtpy/xrt_dem_iterative/dem_plotting.py | 83 ++++++++++--------------- 1 file changed, 34 insertions(+), 49 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_plotting.py b/xrtpy/xrt_dem_iterative/dem_plotting.py index 1747c6bda..a88085b6e 100644 --- a/xrtpy/xrt_dem_iterative/dem_plotting.py +++ b/xrtpy/xrt_dem_iterative/dem_plotting.py @@ -1,23 +1,20 @@ __all__ = [ "plot_dem", "plot_dem_mc", - "plot_observed_vs_modeled", ] -import astropy.units as u import matplotlib.pyplot as plt import numpy as np def plot_dem(solver): """ - Plot the base DEM solution (no Monte Carlo needed). + Plot the base DEM solution in log10 psace (no Monte Carlo needed). Parameters ---------- solver : XRTDEMIterative - A solved DEM object (xrtpy.xrt_dem_iterative.XRTDEMIterative). - If .dem does not exist yet, solve() will be called. + Fully initialized DEM solver. """ # Ensure base DEM is available if not hasattr(solver, "dem"): @@ -28,11 +25,12 @@ def plot_dem(solver): # Avoid log10(0) dem_safe = np.clip(dem, 1e-40, None) + log_dem = np.log10(dem_safe) plt.figure(figsize=(8, 5)) plt.step( logT, - np.log10(dem_safe), + log_dem, where="mid", color="black", linewidth=2.0, @@ -42,14 +40,14 @@ def plot_dem(solver): plt.xlabel(r"log$_{10} T$ [K]") plt.ylabel(r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]") plt.title("Base DEM Solution") - plt.grid(True, alpha=0.3) + plt.grid(visible=True, alpha=0.3) plt.xlim(logT.min(), logT.max()) - # Nice y-limits based on finite values - finite = np.isfinite(np.log10(dem_safe)) + # y-limits based on finite values + finite = np.isfinite(log_dem) if np.any(finite): y = np.log10(dem_safe[finite]) - pad = 0.3 * (y.max() - y.min() + 1e-6) + pad = 0.25 * (y.max() - y.min() + 1e-6) plt.ylim(y.min() - pad, y.max() + pad) plt.legend() @@ -59,33 +57,35 @@ def plot_dem(solver): def plot_dem_mc( solver, - mc_color="gray", - base_color="red", - alpha_mc=0.2, + mc_color="black", + base_color="#1E90FF", + alpha_mc=0.18, # 0.2 lw_mc=1.0, lw_base=2.0, - show_envelope=True, + figsize=(9, 6), ): """ - Plot DEM with Monte Carlo ensemble. + Plot DEM with Monte Carlo ensemble (if present). - - Base DEM in red. - - Each Monte Carlo realization as a thin gray step curve. - - Optional 68% envelope (16–84 percentile) if MC is available. + - Base DEM: thick colored step curve - Dodger Blue + - MC curves: thin transparent Black + - Automatically chooses limits, even if MC not present If no Monte Carlo results are present, this gracefully falls back to plotting only the base DEM. """ + # Ensure base DEM is available if not hasattr(solver, "dem"): solver.solve() logT = solver.logT base_dem = np.asarray(solver.dem, dtype=float) - base_dem_safe = np.clip(base_dem, 1e-40, None) + base_safe_dem = np.clip(base_dem, 1e-100, None) # 1e-40 + log_base_dem = np.log10(base_safe_dem) + # CHecking for Monte Carlo has_mc = hasattr(solver, "mc_dem") and solver.mc_dem is not None - if has_mc: mc_dem = np.asarray(solver.mc_dem, dtype=float) # shape (N+1, n_T) # If someone set monte_carlo_runs=0, mc_dem may be (1, n_T) @@ -94,12 +94,12 @@ def plot_dem_mc( mc_dem = None N = 0 - plt.figure(figsize=(9, 6)) + plt.figure(figsize=figsize) - # --- Monte Carlo curves (index 1..N) --- + # Plot Monte Carlo curves (index 1..N) if has_mc and N > 0: for i in range(1, N + 1): - dem_i = np.clip(mc_dem[i, :], 1e-40, None) + dem_i = np.clip(mc_dem[i, :], 1e-100, None) # 1e-40 plt.step( logT, np.log10(dem_i), @@ -109,50 +109,35 @@ def plot_dem_mc( linewidth=lw_mc, ) - # Optional envelope (16–84 percentile over MC curves only) - if show_envelope and N > 1: - dem_low = np.percentile(mc_dem[1:, :], 16, axis=0) - dem_high = np.percentile(mc_dem[1:, :], 84, axis=0) - - dem_low = np.clip(dem_low, 1e-40, None) - dem_high = np.clip(dem_high, 1e-40, None) - - plt.fill_between( - logT, - np.log10(dem_low), - np.log10(dem_high), - step="mid", - color=mc_color, - alpha=0.2, - label="68% interval (MC)", - ) - - # --- Base DEM --- + # Base DEM plt.step( logT, - np.log10(base_dem_safe), + log_base_dem, where="mid", color=base_color, linewidth=lw_base, label="Base DEM", ) + suffix = f" ({N} MC realizations)" if N > 0 else "" + plt.title("DEM with Monte Carlo" + suffix) plt.xlabel(r"log$_{10} T$ [K]") plt.ylabel(r"log$_{10}$ DEM [cm$^{-5}$ K$^{-1}$]") - title_suffix = f" ({N} MC realizations)" if has_mc and N > 0 else "" - plt.title("DEM with Monte Carlo" + title_suffix) - plt.grid(True, alpha=0.3) + plt.grid(visible=True, alpha=0.3) plt.xlim(logT.min(), logT.max()) # y-limits based on base DEM and MC if available - logs = [np.log10(base_dem_safe)] + logs = [np.log10(log_base_dem)] + if has_mc and N > 0: - logs.append(np.log10(np.clip(mc_dem[1:, :], 1e-40, None)).ravel()) + logs.append(np.log10(np.clip(mc_dem[1:, :], 1e-100, None)).ravel()) # 1e-40 + logs_all = np.concatenate(logs) + finite = np.isfinite(logs_all) if np.any(finite): y = logs_all[finite] - pad = 0.3 * (y.max() - y.min() + 1e-6) + pad = 0.25 * (y.max() - y.min() + 1e-6) plt.ylim(y.min() - pad, y.max() + pad) plt.legend() From 18a60e661d7f4b6482a78881dc58af6a691dfef1 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 13:51:32 -0500 Subject: [PATCH 097/121] Cleaning up minor code issues --- xrtpy/xrt_dem_iterative/dem_solver.py | 183 +++++--------------------- 1 file changed, 36 insertions(+), 147 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 78b14a893..77e2ef399 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -10,6 +10,7 @@ import numpy as np from lmfit import Parameters, minimize from scipy.interpolate import interp1d + from xrtpy.util.filters import validate_and_format_filters from xrtpy.xrt_dem_iterative import dem_plotting @@ -124,11 +125,11 @@ def __init__( # Validate Monte Carlo setting if isinstance(monte_carlo_runs, bool): - raise ValueError( + raise TypeError( "monte_carlo_runs must be a non-negative whole number, not a boolean." ) elif ( - isinstance(monte_carlo_runs, (int, np.integer)) + isinstance(monte_carlo_runs, int | np.integer) or isinstance(monte_carlo_runs, float) and monte_carlo_runs.is_integer() ): @@ -143,7 +144,7 @@ def __init__( raise ValueError("monte_carlo_runs must be ≥ 0.") # Validate max_iterations - if not isinstance(max_iterations, (int, np.integer)) or max_iterations <= 0: + if not isinstance(max_iterations, int | np.integer) or max_iterations <= 0: raise ValueError("max_iterations must be a positive integer.") self._max_iterations = int(max_iterations) @@ -186,27 +187,21 @@ def __init__( f" Filter channels: {len(self.observed_channel)}\n" ) - # I am commenting this out because it is redundant since I am defining it below again. I wanna be consistent in using the same logT below.- Remove after testing - # self.logT = np.arange( - # self._minimum_bound_temperature, - # self._maximum_bound_temperature - # + self._logarithmic_temperature_step_size / 2, - # self._logarithmic_temperature_step_size, - # ) - try: - self._normalization_factor = float(normalization_factor) - if self._normalization_factor <= 0: - raise ValueError("normalization_factor must be a positive number.") - except Exception as e: - raise ValueError(f"Invalid normalization_factor: {e}") + value = float(normalization_factor) + except (TypeError, ValueError) as err: + raise ValueError(f"Invalid normalization_factor: {err}") from err + + if value <= 0: + raise ValueError("normalization_factor must be a positive number.") + + self._normalization_factor = value + # self._normalization_factor = value self._using_estimated_errors = ( False # track whether default error model has been used ) - #### TEST GIT CI TEST ##### - def validate_inputs(self) -> None: """ Validate user-provided inputs. Raises ValueError on any issue. @@ -288,7 +283,8 @@ def validate_inputs(self) -> None: if np.all(self._observed_intensities == 0): warnings.warn( "\n\n All observed intensities are zero. DEM solution will yield zero. " - "Object created, but solving will return DEM=0. \n\n" + "Object created, but solving will return DEM=0. \n\n", + stacklevel=2, ) # success -> no return value @@ -400,33 +396,16 @@ def intensity_errors(self) -> u.Quantity: if self._intensity_errors is not None: return self._intensity_errors * (u.DN / u.s) - # if self._using_estimated_errors: - # warnings.warn( - # "No intensity_errors provided. Using default model: " - # f"max(relative-error * observed_intensity, min_observational_error)\n" - # f"=> relative_error = {self.relative_error} =, min_observational_error = {self.min_observational_error.value} DN/s\n" - # "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", - # UserWarning, - # ) - # self._using_estimated_errors = True # suppress future warnings - if not self._using_estimated_errors: warnings.warn( - "No intensity_errors provided. Using default model: " - f"max(relative-error * observed_intensity, min_observational_error)\n" + "\n\nNo intensity_errors provided. Using default model: " + "max(relative-error * observed_intensity, min_observational_error)\n", f"=> relative_error = {self.relative_error} =, min_observational_error = {self.min_observational_error.value} DN/s\n" - "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro", + "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro\n\n", UserWarning, ) - self._using_estimated_errors = True - # NOTETOJOYWe can remove if no issues later - # #No units - added in the return - # estimated = np.maximum( - # self.relative_error * self._observed_intensities , - # self.min_observational_error.value, - # ) - # return estimated * (u.DN / u.s) + self._using_estimated_errors = True # Fixed in units estimated = np.maximum( @@ -551,10 +530,7 @@ def _interpolate_responses_to_grid(self): self.response_temperatures, self.response_values, strict=False ): # Make sure that R_orig.value is indeed in DN/s/pix per cm^5 logT_orig = np.log10(T_orig.to_value(u.K)) - # response_vals = R_orig.to_value((u.cm**5 * u.DN) / (u.pix * u.s)) - # response_vals = R_orig.to_value(u.DN / u.s / u.pix / (u.cm**5)) - # response_vals = R_orig.to_value((u.DN / u.s / u.pix) * u.cm**5) Comment on Nov14 - # response_vals = R_orig.to_value(u.DN / u.s / u.pix / u.cm**5) + response_vals = ( R_orig.value ) # already in correct physical units for XRTpy #NOTEFORJOY- TRIPLE check this @@ -747,14 +723,10 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: self._raw_estimated_dem_peaks = (np.array([]), np.array([])) # IDL BEHAVIOR: override with flat initial DEM - # xrt_dem_iter_estim ultimately does: - # dem = 0.0*findgen(nt) + 1.0 ; Use flat dem for initial guess - # on a regular logT grid. We mirror that here exactly: + # xrt_dem_iter_estim ultimately does: dem = 0.0*findgen(nt) + 1.0 ; Use flat dem for initial guess on a regular logT grid. We mirror that here exactly: # est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 1.0 NOV20 - # est_log_dem_on_grid = np.ones_like(self.logT, dtype=float) * 0.0 #NOTEFORJOY - # or est_log_dem_on_grid = np.zeros_like(self.logT) # Return the intial first guessed DEM @@ -768,8 +740,6 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: # ------------------------------------------------------------------------------------------------------------------------------- - #############************************** Start of **************************########################## - def _prepare_spline_system(self): """ Pythonic, IDL version of mp_prep. @@ -797,15 +767,14 @@ def _prepare_spline_system(self): # units - DN/s/pix/cm^5 * K * dLnT * DEM == DN/s/PIX T_linear = self.T.to_value(u.K) - # self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) NOV21 - - # NOV21 - # Was missing - normalization_factor - i think #NOTETOJOYNOV21 - # emissivity_matrix is shape (n_filters, n_T) - pm_phys = self._response_matrix * T_linear * self.dlnT # physical units - - # SCALE to match scaled DEM and scaled intensities - # self.pm_matrix = pm_phys / self.normalization_factor#THE FIX + # REMOVEJOY + # # self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) NOV21 + # # NOV21 + # # Was missing - normalization_factor - i think #NOTETOJOYNOV21 + # # emissivity_matrix is shape (n_filters, n_T) + # pm_phys = self._response_matrix * T_linear * self.dlnT # physical units + # # SCALE to match scaled DEM and scaled intensities + # # self.pm_matrix = pm_phys / self.normalization_factor#THE FIX self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) @@ -854,17 +823,6 @@ def _reconstruct_dem_from_knots(self, params): knot_vals = np.array([params[f"knot_{i}"].value for i in range(self.n_spl)]) - # interp_spline = interp1d( - # self.spline_logT, - # knot_vals, - # kind="linear", #IDL uses cubic spline interpolation NOTEFORJOY NOV20 - # bounds_error=False, - # fill_value="extrapolate", - # ) - - # log_dem = interp_spline(self.logT) # log10(DEM) - # dem = 10.0**log_dem # DEM in linear cm^-5 K^-1 - # Or used the code above but switch from linear to kind="cubic" cs = CubicSpline(self.spline_logT, knot_vals, bc_type="natural") log_dem = cs(self.logT) @@ -923,7 +881,9 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): params0 = self._build_lmfit_parameters() # values = initial_log_dem at knots # 5. run minimizer - result = minimize(self._residuals, params0, max_nfev=self._max_iterations) + result = minimize( + self._residuals, params0, max_nfev=self._max_iterations + ) # method='leastsq' # THIS is the critical part – use *result.params*, not params0 <<< dem_model = self._reconstruct_dem_from_knots(result.params) # DEM_model(T) @@ -937,13 +897,11 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): return dem_phys, modeled_intensities_phys, chisq, result - ################################################################################################################################ - # ------------------------------------------------------------------------------------------------------------------------------- #############************************** Start of error bars / Monte Carlo **************************########################## - def _run_monte_carlo(self, result_params): + def _run_monte_carlo(self): """ Replicates IDL's Monte Carlo loop. Produces: @@ -963,7 +921,7 @@ def _run_monte_carlo(self, result_params): mc_mod = np.zeros((n_obs, N + 1)) mc_chi = np.zeros(N + 1) - # --- Base run first (IDL puts real data in column 0) --- + # Base run first (IDL puts real data in column 0) dem = self.dem # already scaled back by normalization mc_dem[:, 0] = dem mc_base[:, 0] = self._observed_intensities # unscaled @@ -974,7 +932,6 @@ def _run_monte_carlo(self, result_params): rng = np.random.default_rng() # like systime(1) for ii in range(1, N + 1): - # Step 1: Perturbed intensities (scaled) perturbed = ( self.intensities_scaled @@ -1021,8 +978,6 @@ def _run_monte_carlo(self, result_params): self.mc_mod_obs = mc_mod self.mc_chisq = mc_chi - # ------------------------------------------------------------------------------------------------------------------------------- - #############************************** Start of DEM SOLVER **************************########################## def solve(self): @@ -1143,72 +1098,6 @@ def solve(self): # -------------------------------------------------------------- return self.dem - def plot_dem_with_monte_carlo( - self, - mc_color="black", - base_color="red", - alpha_mc=0.15, - lw_mc=1.2, - lw_base=2.0, - figsize=(10, 6), - show_envelope=True, - ): - """ - Plot DEM with Monte Carlo ensemble using step curves. - """ - - import matplotlib.pyplot as plt - - logT = self.logT - mc_dem = self.mc_dem # shape (N+1, n_T) - base_dem = self.dem # shape (n_T,) - N = mc_dem.shape[0] - 1 # number of MC runs - - plt.figure(figsize=figsize) - - # --- MC DEM curves (black) --- - for i in range(1, N + 1): - plt.step( - logT, - np.log10(mc_dem[i, :]), - where="mid", - color=mc_color, - alpha=alpha_mc, - linewidth=lw_mc, - ) - - # --- Optional uncertainty envelope (16–84 percentile) --- - if show_envelope and N > 1: - dem_low = np.percentile(mc_dem[1:, :], 16, axis=0) - dem_high = np.percentile(mc_dem[1:, :], 84, axis=0) - - plt.fill_between( - logT, - np.log10(dem_low), - np.log10(dem_high), - step="mid", - color="black", - alpha=0.2, - label="68% interval", - ) - - # --- Base DEM (red) --- - plt.step( - logT, - np.log10(base_dem), - where="mid", - color=base_color, - linewidth=lw_base, - label="Base DEM", - ) - - plt.xlabel("log10(T) [K]") - plt.ylabel("log10(DEM)") - plt.title(f"DEM Monte Carlo ({N} realizations)") - plt.grid(True) - plt.legend() - plt.show() - def summary(self): """ Print a complete diagnostic summary of the DEM solver state, @@ -1237,7 +1126,7 @@ def summary(self): if self._intensity_errors is not None: print(f" Intensity Errors: User-provided ({self._intensity_errors})") else: - print(f" Intensity Errors: Auto-estimated (0.03*I, min=2 DN/s)") + print(" Intensity Errors: Auto-estimated (0.03*I, min=2 DN/s)") print( f" Error values: {self.intensity_errors.to_value('DN/s')} DN/s" ) @@ -1308,7 +1197,7 @@ def summary(self): median = np.median(self.mc_dem[1:], axis=0) p16, p84 = np.percentile(self.mc_dem[1:], [16, 84], axis=0) - print(f" MC DEM preview:") + print(" MC DEM preview:") print(f" Median (first 5): {median[:5]}") print( f" 1σ bounds (log10): " From 337c481f515bde1cd643ed8cf7a9bfb9ff2fc2ea Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 14:06:17 -0500 Subject: [PATCH 098/121] Removed xrt_dem_statistics.py and monte_carlo_iteration.py --- xrtpy/xrt_dem_iterative/__init__.py | 2 -- xrtpy/xrt_dem_iterative/monte_carlo_iteration.py | 7 ------- xrtpy/xrt_dem_iterative/xrt_dem_statistics.py | 12 ------------ 3 files changed, 21 deletions(-) delete mode 100644 xrtpy/xrt_dem_iterative/monte_carlo_iteration.py delete mode 100644 xrtpy/xrt_dem_iterative/xrt_dem_statistics.py diff --git a/xrtpy/xrt_dem_iterative/__init__.py b/xrtpy/xrt_dem_iterative/__init__.py index f6650a508..c45214265 100644 --- a/xrtpy/xrt_dem_iterative/__init__.py +++ b/xrtpy/xrt_dem_iterative/__init__.py @@ -6,7 +6,5 @@ __all__ = [ "XRTDEMIterative", - "MonteCarloIteration", - "ComputeDEMStatistics" ] diff --git a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py b/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py deleted file mode 100644 index fd73422ce..000000000 --- a/xrtpy/xrt_dem_iterative/monte_carlo_iteration.py +++ /dev/null @@ -1,7 +0,0 @@ -__all__ = [ - "MonteCarloIteration", -] - - -class MonteCarloIteration: - print("MonteCarloIteration info coming soon.. ") diff --git a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py b/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py deleted file mode 100644 index 86cc7968f..000000000 --- a/xrtpy/xrt_dem_iterative/xrt_dem_statistics.py +++ /dev/null @@ -1,12 +0,0 @@ -__all__ = [ - "SplineSmoothing", - "ErrorPropagation", -] - - -class SplineSmoothing: - print("Will be updating soon... SplineSmoothing") - - -class ErrorPropagation: - print("Will be updating soon... SplineSmoothing") From 952e4cb6373b8e864c79e62a804959e72ec00eb3 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 14:49:37 -0500 Subject: [PATCH 099/121] Cleaned up summary function --- xrtpy/xrt_dem_iterative/dem_solver.py | 104 ++++++++++++-------------- 1 file changed, 46 insertions(+), 58 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 77e2ef399..480ba6bef 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -767,15 +767,6 @@ def _prepare_spline_system(self): # units - DN/s/pix/cm^5 * K * dLnT * DEM == DN/s/PIX T_linear = self.T.to_value(u.K) - # REMOVEJOY - # # self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) NOV21 - # # NOV21 - # # Was missing - normalization_factor - i think #NOTETOJOYNOV21 - # # emissivity_matrix is shape (n_filters, n_T) - # pm_phys = self._response_matrix * T_linear * self.dlnT # physical units - # # SCALE to match scaled DEM and scaled intensities - # # self.pm_matrix = pm_phys / self.normalization_factor#THE FIX - self.pm_matrix = (self._response_matrix * T_linear * self.dlnT).astype(float) # Knot positions are evenly spaced in logT (IDL spl_t) @@ -899,8 +890,6 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): # ------------------------------------------------------------------------------------------------------------------------------- - #############************************** Start of error bars / Monte Carlo **************************########################## - def _run_monte_carlo(self): """ Replicates IDL's Monte Carlo loop. @@ -978,7 +967,7 @@ def _run_monte_carlo(self): self.mc_mod_obs = mc_mod self.mc_chisq = mc_chi - #############************************** Start of DEM SOLVER **************************########################## + # ------------------------------------------------------------------------------------------------------------------------------- def solve(self): """ @@ -1077,81 +1066,81 @@ def solve(self): if ii % max(1, N // 20) == 0: print(f" - Monte Carlo run {ii}/{N}") - # ---- 4a) Perturb intensities: I' = I + N(0, sigma), clipped at 0 + # 4a) Perturb intensities: I' = I + N(0, sigma), clipped at 0 noise = rng.normal(loc=0.0, scale=sigma_phys, size=base_obs_phys.shape) obs_pert = base_obs_phys + noise obs_pert = np.maximum(obs_pert, 0.0) # IDL: >0 to avoid negatives - # ---- 4b) Solve DEM for this perturbed realization + # 4b) Solve DEM for this perturbed realization dem_i, mod_i, chisq_i, _ = self._solve_single_dem( observed_intensities_vals=obs_pert ) - # ---- 4c) Store Monte Carlo results + # 4c) Store Monte Carlo results self.mc_dem[ii, :] = dem_i self.mc_chisq[ii] = chisq_i self.mc_base_obs[ii, :] = obs_pert self.mc_mod_obs[ii, :] = mod_i - # -------------------------------------------------------------- # 5) Return DEM for convenience - # -------------------------------------------------------------- return self.dem def summary(self): """ - Print a complete diagnostic summary of the DEM solver state, - including input parameters, solver configuration, response matrix, - base DEM fit, Monte Carlo ensemble, and available plotting helpers. - - Designed to mimic the detailed transparent reporting style - of IDL’s xrt_dem_iterative2.pro, but more informative. + Print a detailed, diagnostic summary of the DEM solver state. + + This provides: + • Input observation details + • Temperature grid configuration + • Response matrix status + • Spline system configuration + • Base DEM fit results + • Monte Carlo statistics (if available) + • Available plotting helpers """ - import numpy as np print("\n" + "=" * 70) print("XRTpy DEM Iterative — Solver Summary") print("=" * 70) # ----------------------------------------------------- - print("\nINPUTS") - print("-" * 70) - print(f" Filters: {self.filter_names}") + print("\nINPUT DATA") + print("-" * 76) + print(f" Filters: {self.filter_names}") print( - f" Observed Intensities: {np.array(self._observed_intensities)} DN/s/pix" + f" Observed Intensities: {np.array(self._observed_intensities)} DN/s/pix" ) - print(f" Number of channels: {len(self._observed_intensities)}") + print(f" Number of channels: {len(self._observed_intensities)}") # Error model if self._intensity_errors is not None: - print(f" Intensity Errors: User-provided ({self._intensity_errors})") + print(" Intensity Errors: User-provided") else: - print(" Intensity Errors: Auto-estimated (0.03*I, min=2 DN/s)") - print( - f" Error values: {self.intensity_errors.to_value('DN/s')} DN/s" - ) + print(" Intensity Errors: Auto-estimated (3% of I, min=2 DN/s)") + + print(f" Error values (DN/s): {self.intensity_errors.to_value('DN/s')}\n") # ----------------------------------------------------- print("\nTEMPERATURE GRID") print("-" * 70) if hasattr(self, "logT"): - print(f" logT range: {self.logT[0]:.2f} – {self.logT[-1]:.2f}") - print(f" Number of bins: {len(self.logT)}") - print(f" ΔlogT: {self.dlogT:.3f}") - print(f" ΔlnT: {self.dlnT:.3f}") + print( + f" logT range: {self.logT[0]:.2f} to {self.logT[-1]:.2f}" + ) + print(f" Number of temperature bins: {len(self.logT)}") + print(f" logT (grid spacing): {self.dlogT:.3f}") + print(f" lnT (natural log spacing): {self.dlnT:.3f}") else: - print(" Grid has not been constructed yet (call solve()).") + print(" Grid has not been constructed (call solve()).") # ----------------------------------------------------- print("\nRESPONSE MATRIX") print("-" * 70) if hasattr(self, "_response_matrix"): - print( - f" Response matrix shape: {self._response_matrix.shape} (filters × T bins)" - ) - print(f" Response unit: {self._response_unit}") + print(f" Matrix shape: {self._response_matrix.shape} (filters x T bins)") + print(f" Response units: {self._response_unit}") else: - print(" Response matrix not constructed yet.") + print(" Response matrix not constructed.") # ----------------------------------------------------- print("\nSOLVER CONFIGURATION") @@ -1160,18 +1149,18 @@ def summary(self): print(f" Max iterations: {self.max_iterations}") print(f" Monte Carlo runs: {self.monte_carlo_runs}") if hasattr(self, "n_spl"): - print(f" Spline knots: {self.n_spl}") + print(f" Number of spline knots: {self.n_spl}") + print(f" Knot positions (logT): {getattr(self, 'spline_logT', 'N/A')}") else: - print(" Spline knots: (not prepared yet)") - + print(" Spline system not prepared yet.") # ----------------------------------------------------- print("\nINITIAL DEM GUESS") print("-" * 70) if hasattr(self, "_initial_log_dem"): - print(" Initial DEM (log10): flat initial estimate") - print(f" First 5 values: {self._initial_log_dem[:5]}") + print(" Initial DEM assumption: flat log10(DEM) (IDL-style)") + print(f" First 5 bins (log10): {self._initial_log_dem[:5]}") else: - print(" Initial DEM has not been estimated yet.") + print(" Initial DEM has not been estimated.") # ----------------------------------------------------- @@ -1196,11 +1185,10 @@ def summary(self): if N > 0: median = np.median(self.mc_dem[1:], axis=0) p16, p84 = np.percentile(self.mc_dem[1:], [16, 84], axis=0) - - print(" MC DEM preview:") + print(" MC DEM statistics (first T-bin):") print(f" Median (first 5): {median[:5]}") print( - f" 1σ bounds (log10): " + f" 1x bounds (log10): " f"{np.log10(p16[0] + 1e-99):.2f} – {np.log10(p84[0] + 1e-99):.2f}" ) else: @@ -1209,11 +1197,11 @@ def summary(self): print(" No Monte Carlo results available.") # ----------------------------------------------------- - print("\nPLOTTING OPTIONS") - print("-" * 70) - print(" • plot_dem() Base DEM only") - print(" • plot_dem_mc() Best-fit + MC curves") - print("\n" + "=" * 70 + "\n") + print("\nPLOTTING HELPERS") + print("-" * 76) + print(" • plot_dem() – Base DEM only") + print(" • plot_dem_mc() – Base DEM + MC ensemble") + print("\n" + "=" * 76 + "\n") XRTDEMIterative.plot_dem = dem_plotting.plot_dem From b6c8262a2f7c600a8dc637470131ba70c361c559 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 18:41:40 -0500 Subject: [PATCH 100/121] Working on test now --- .../test/test_dem_input_validation.py | 122 +++++++----------- 1 file changed, 48 insertions(+), 74 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index 925158043..b6b4c7eb8 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -1,13 +1,15 @@ - from importlib.resources import files - import numpy as np import pytest import sunpy import sunpy.io.special import sunpy.map + from xrtpy.response.channel import Channel +from xrtpy.response.tools import generate_temperature_responses +from xrtpy.xrt_dem_iterative import XRTDEMIterative + channel_names = [ "Al-mesh", @@ -27,17 +29,13 @@ ] - - - - @pytest.mark.parametrize("channel_name", channel_names) def test_channel_name(channel_name): channel = Channel(channel_name) assert channel.name == channel_name -#filename = Path(__file__).parent.parent.absolute() / "data" / "xrt_channels_v0017.genx" +# filename = Path(__file__).parent.parent.absolute() / "data" / "xrt_channels_v0017.genx" filename = files("xrtpy.response.data") / "xrt_channels_v0017.genx" v6_genx = sunpy.io.special.genx.read_genx(filename) @@ -60,71 +58,47 @@ def test_channel_name(channel_name): "C-poly/Ti-poly": 13, } -def validate_inputs(self): - """ - Run all internal validation checks again. Raises if any inputs are invalid. - Useful for debugging or after programmatic changes. - """ - # Check shape of intensity_errors - if self._intensity_errors is not None: - if self._intensity_errors.shape != self._observed_intensities.shape: - raise ValueError("Length of intensity_errors must match observed_intensities.") - - # Check consistency between filters, intensities, and responses - if not ( - len(self._observed_intensities) - == len(self.responses) - == len(self.observed_channel) - ): - raise ValueError( - f"\nLength mismatch in inputs:\n" - f" Observed intensities: {len(self._observed_intensities)}\n" - f" Responses: {len(self.responses)}\n" - f" Filter channels: {len(self.observed_channel)}\n" - ) - - # Check temperature grid - if self._dT <= 0: - raise ValueError("dT must be a positive scalar.") - - for r in self.responses: - logT_grid = np.log10(r.temperature.value) - if not (self._min_T >= logT_grid.min() and self._max_T <= logT_grid.max()): - raise ValueError( - f"The specified temperature range [{self._min_T}, {self._max_T}] is outside the bounds of one or more filter response grids.\n" - "Please ensure the temperature range fits within all responses." - ) - - -import pytest - -from xrtpy.response.tools import generate_temperature_responses -from xrtpy.xrt_dem_iterative import XRTDEMIterative - - -def test_validate_inputs_good_case(): - filters = ["Be-thin", "Be-med"] - i_obs = [10000.0, 20000.0] - resp = generate_temperature_responses(filters, obs_date="2007-07-10") - dem = XRTDEMIterative(filters, i_obs, resp) - dem.validate_inputs() # Should NOT raise - -def test_validate_inputs_mismatched_errors(): - filters = ["Be-thin", "Be-med"] - i_obs = [10000.0, 20000.0] - i_err = [100.0] # Wrong length - resp = generate_temperature_responses(filters, obs_date="2007-07-10") - dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) - with pytest.raises(ValueError, match="intensity_errors must match"): - dem.validate_inputs() - - - -#Test to add later -#both should be True -# np.allclose(x.intensities_scaled, -# x.observed_intensities.value / x.normalization_factor) - -# np.allclose(x.sigma_scaled_intensity_errors, -# x.intensity_errors.to_value(u.DN/u.s) / x.normalization_factor) +def test_dem_validate_inputs_basic(): + # Minimal “realistic” inputs for a DEM solve + filters = ["Al-poly", "Ti-poly"] + intensities = np.array([2500.0, 1800.0]) + + # Use real responses from XRTpy + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=8.0, + logarithmic_temperature_step_size=0.1, + monte_carlo_runs=0, + ) + + # Should NOT raise any error + x.validate_inputs() + + +def test_dem_temperature_grid(): + filters = ["Al-poly"] + intensities = np.array([1500.0]) + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=7.5, + logarithmic_temperature_step_size=0.1, + ) + + x.create_logT_grid() + + assert np.isclose(x.logT[0], 5.5) + assert np.isclose(x.logT[-1], 7.5) + assert len(x.logT) == 21 # (7.5-5.5)/0.1 + 1 = 21 + assert np.isclose(x.dlogT, 0.1) + assert np.isclose(x.dlnT, np.log(10) * 0.1) From 473a0cbb1b4249d3b7702709a1dbe09509c8816b Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 19:21:46 -0500 Subject: [PATCH 101/121] Adding more test --- .../test/test_dem_input_validation.py | 189 ++++++++++++++++++ 1 file changed, 189 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index b6b4c7eb8..c46337a23 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -102,3 +102,192 @@ def test_dem_temperature_grid(): assert len(x.logT) == 21 # (7.5-5.5)/0.1 + 1 = 21 assert np.isclose(x.dlogT, 0.1) assert np.isclose(x.dlnT, np.log(10) * 0.1) + + +def test_validate_inputs_good_case(): + filters = ["Be-thin", "Be-med"] + i_obs = [10000.0, 20000.0] + resp = generate_temperature_responses(filters, obs_date="2007-07-10") + dem = XRTDEMIterative(filters, i_obs, resp) + dem.validate_inputs() # Should NOT raise + +def test_validate_inputs_mismatched_errors(): + filters = ["Be-thin", "Be-med"] + i_obs = [10000.0, 20000.0] + i_err = [100.0] # Wrong length + resp = generate_temperature_responses(filters, obs_date="2007-07-10") + dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) + with pytest.raises(ValueError, match="intensity_errors must match"): + dem.validate_inputs() + + + +# # ------------------------------------------------------------------------- +# # 1) Valid configuration should pass validate_inputs +# # ------------------------------------------------------------------------- + +# def test_validate_inputs_valid_configuration(basic_responses, basic_intensities): +# x = XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=8.0, +# logarithmic_temperature_step_size=0.1, +# monte_carlo_runs=0, +# normalization_factor=1e21, +# ) + +# # Should not raise +# x.validate_inputs() + + +# # ------------------------------------------------------------------------- +# # 2) Empty observed_channel should raise +# # ------------------------------------------------------------------------- + +# def test_empty_observed_channel_raises(basic_responses, basic_intensities): +# with pytest.raises(ValueError, match="`observed_channel` is required"): +# XRTDEMIterative( +# observed_channel=[], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# ) + + +# # ------------------------------------------------------------------------- +# # 3) Mismatched lengths of intensities / responses / channels +# # ------------------------------------------------------------------------- + +# def test_length_mismatch_raises(): +# responses = [DummyResponse("F1"), DummyResponse("F2")] +# intensities = np.array([1000.0]) # only one value + +# with pytest.raises(ValueError, match="Length mismatch"): +# XRTDEMIterative( +# observed_channel=["F1", "F2"], +# observed_intensities=intensities, +# temperature_responses=responses, +# ) + + +# # ------------------------------------------------------------------------- +# # 4) Temperature range outside response grid should raise +# # ------------------------------------------------------------------------- + +# def test_temperature_range_outside_responses_raises(basic_responses, basic_intensities): +# # min T too low +# with pytest.raises(ValueError, match="outside the bounds"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=4.0, # below dummy response range +# maximum_bound_temperature=8.0, +# ) + +# # max T too high +# with pytest.raises(ValueError, match="outside the bounds"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=9.0, # above dummy response range +# ) + + +# # ------------------------------------------------------------------------- +# # 5) Negative or zero logarithmic_temperature_step_size should raise +# # ------------------------------------------------------------------------- + +# def test_negative_logarithmic_temperature_step_size_raises(basic_responses, basic_intensities): +# with pytest.raises(ValueError, match="logarithmic_temperature_step_size must be a positive"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=8.0, +# logarithmic_temperature_step_size=-0.1, +# ) + + +# def test_too_few_temperature_bins_raises(basic_responses, basic_intensities): +# # Choose a huge step so that fewer than 4 bins are produced +# with pytest.raises(ValueError, match="Temperature grid must have at least 4 points"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=5.8, +# logarithmic_temperature_step_size=0.5, +# ) + + +# # ------------------------------------------------------------------------- +# # 6) Monte Carlo runs validation +# # ------------------------------------------------------------------------- + +# def test_monte_carlo_runs_negative_raises(): +# with pytest.raises(ValueError, match="must be ≥ 0"): +# make_iterative(monte_carlo_runs=-1) + + +# def test_monte_carlo_runs_bool_raises(): +# with pytest.raises(ValueError, match="must be a non-negative whole number, not a boolean"): +# make_iterative(monte_carlo_runs=True) + + +# def test_monte_carlo_runs_float_non_integer_raises(): +# with pytest.raises(ValueError, match="Decimal values are not allowed"): +# make_iterative(monte_carlo_runs=3.5) + + +# def test_monte_carlo_runs_zero_ok(): +# x = make_iterative(monte_carlo_runs=0) +# assert x.monte_carlo_runs == 0 + + +# def test_monte_carlo_runs_positive_integer_ok(): +# x = make_iterative(monte_carlo_runs=10) +# assert x.monte_carlo_runs == 10 + + +# # ------------------------------------------------------------------------- +# # 7) Intensity errors validation in validate_inputs +# # ------------------------------------------------------------------------- + +# def test_intensity_errors_length_mismatch_raises(basic_responses, basic_intensities): +# x = XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# intensity_errors=np.array([1.0, 2.0]), # wrong length +# ) + +# with pytest.raises(ValueError, match="Length of intensity_errors must match"): +# x.validate_inputs() + + +# def test_intensity_errors_negative_raises(basic_responses, basic_intensities): +# x = XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# intensity_errors=np.array([1.0, -2.0, 3.0]), +# ) + +# with pytest.raises(ValueError, match="must be finite and >= 0"): +# x.validate_inputs() + + +# #Test to add later +# #both should be True +# # np.allclose(x.intensities_scaled, +# # x.observed_intensities.value / x.normalization_factor) + +# # np.allclose(x.sigma_scaled_intensity_errors, +# # x.intensity_errors.to_value(u.DN/u.s) / x.normalization_factor) + From fda3d0edd5f140c74d76dfd58032732aab4734c7 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 19:36:33 -0500 Subject: [PATCH 102/121] Correction to warning error output --- xrtpy/xrt_dem_iterative/dem_solver.py | 40 ++++++++++++++------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 480ba6bef..ad0118d9c 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -398,13 +398,17 @@ def intensity_errors(self) -> u.Quantity: if not self._using_estimated_errors: warnings.warn( - "\n\nNo intensity_errors provided. Using default model: " - "max(relative-error * observed_intensity, min_observational_error)\n", - f"=> relative_error = {self.relative_error} =, min_observational_error = {self.min_observational_error.value} DN/s\n" - "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro\n\n", - UserWarning, + ( + "\n\nNo intensity_errors provided. Using default model: " + "max(relative-error * observed_intensity, min_observational_error)\n" + f"=> relative_error = {self.relative_error}, " + f"min_observational_error = {self.min_observational_error.value} DN/s\n" + "See: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro\n\n" + ), + category=UserWarning, + stacklevel=2, ) - + self._using_estimated_errors = True # Fixed in units @@ -1099,13 +1103,13 @@ def summary(self): • Available plotting helpers """ - print("\n" + "=" * 70) - print("XRTpy DEM Iterative — Solver Summary") - print("=" * 70) + print("\n" + "=" * 76) + print(" XRTpy DEM Iterative — Solver Summary") + print("=" * 76) # ----------------------------------------------------- print("\nINPUT DATA") - print("-" * 76) + print("-" * 70) print(f" Filters: {self.filter_names}") print( f" Observed Intensities: {np.array(self._observed_intensities)} DN/s/pix" @@ -1124,11 +1128,9 @@ def summary(self): print("\nTEMPERATURE GRID") print("-" * 70) if hasattr(self, "logT"): - print( - f" logT range: {self.logT[0]:.2f} to {self.logT[-1]:.2f}" - ) - print(f" Number of temperature bins: {len(self.logT)}") - print(f" logT (grid spacing): {self.dlogT:.3f}") + print( f" logT range: {self.logT[0]:.2f} to {self.logT[-1]:.2f}") + print(f" Number of temperature bins: {len(self.logT)}") + print(f" logT (grid spacing): {self.dlogT:.3f}") print(f" lnT (natural log spacing): {self.dlnT:.3f}") else: print(" Grid has not been constructed (call solve()).") @@ -1137,7 +1139,7 @@ def summary(self): print("\nRESPONSE MATRIX") print("-" * 70) if hasattr(self, "_response_matrix"): - print(f" Matrix shape: {self._response_matrix.shape} (filters x T bins)") + print(f" Matrix shape: {self._response_matrix.shape} (filters x T bins)") print(f" Response units: {self._response_unit}") else: print(" Response matrix not constructed.") @@ -1145,9 +1147,9 @@ def summary(self): # ----------------------------------------------------- print("\nSOLVER CONFIGURATION") print("-" * 70) - print(f" Normalization factor: {self.normalization_factor:.2e}") - print(f" Max iterations: {self.max_iterations}") - print(f" Monte Carlo runs: {self.monte_carlo_runs}") + print(f" Normalization factor: {self.normalization_factor:.2e}") + print(f" Max iterations: {self.max_iterations}") + print(f" Monte Carlo runs: {self.monte_carlo_runs}") if hasattr(self, "n_spl"): print(f" Number of spline knots: {self.n_spl}") print(f" Knot positions (logT): {getattr(self, 'spline_logT', 'N/A')}") From 766370d1d3e4cf63c220babb95bb9213a30cfec0 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Mon, 24 Nov 2025 19:59:43 -0500 Subject: [PATCH 103/121] Adding more test --- .../test/test_dem_input_validation.py | 243 +++++------------- 1 file changed, 63 insertions(+), 180 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index c46337a23..c51be6f22 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -1,16 +1,15 @@ from importlib.resources import files + +import astropy.units as u import numpy as np import pytest import sunpy import sunpy.io.special import sunpy.map - - from xrtpy.response.channel import Channel from xrtpy.response.tools import generate_temperature_responses from xrtpy.xrt_dem_iterative import XRTDEMIterative - channel_names = [ "Al-mesh", "Al-poly", @@ -35,30 +34,6 @@ def test_channel_name(channel_name): assert channel.name == channel_name -# filename = Path(__file__).parent.parent.absolute() / "data" / "xrt_channels_v0017.genx" -filename = files("xrtpy.response.data") / "xrt_channels_v0017.genx" - -v6_genx = sunpy.io.special.genx.read_genx(filename) -v6_genx_s = v6_genx["SAVEGEN0"] - -_channel_name_to_index_mapping = { - "Al-mesh": 0, - "Al-poly": 1, - "C-poly": 2, - "Ti-poly": 3, - "Be-thin": 4, - "Be-med": 5, - "Al-med": 6, - "Al-thick": 7, - "Be-thick": 8, - "Al-poly/Al-mesh": 9, - "Al-poly/Ti-poly": 10, - "Al-poly/Al-thick": 11, - "Al-poly/Be-thick": 12, - "C-poly/Ti-poly": 13, -} - - def test_dem_validate_inputs_basic(): # Minimal “realistic” inputs for a DEM solve filters = ["Al-poly", "Ti-poly"] @@ -107,187 +82,95 @@ def test_dem_temperature_grid(): def test_validate_inputs_good_case(): filters = ["Be-thin", "Be-med"] i_obs = [10000.0, 20000.0] - resp = generate_temperature_responses(filters, obs_date="2007-07-10") + resp = generate_temperature_responses(filters, "2007-07-10") dem = XRTDEMIterative(filters, i_obs, resp) dem.validate_inputs() # Should NOT raise + def test_validate_inputs_mismatched_errors(): filters = ["Be-thin", "Be-med"] i_obs = [10000.0, 20000.0] i_err = [100.0] # Wrong length - resp = generate_temperature_responses(filters, obs_date="2007-07-10") + resp = generate_temperature_responses(filters, "2007-07-10") dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) with pytest.raises(ValueError, match="intensity_errors must match"): dem.validate_inputs() +def test_create_logT_grid(): + # Simple single-channel setup + filters = ["Al-poly"] + intensities = np.array([1500.0]) + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") -# # ------------------------------------------------------------------------- -# # 1) Valid configuration should pass validate_inputs -# # ------------------------------------------------------------------------- - -# def test_validate_inputs_valid_configuration(basic_responses, basic_intensities): -# x = XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=8.0, -# logarithmic_temperature_step_size=0.1, -# monte_carlo_runs=0, -# normalization_factor=1e21, -# ) - -# # Should not raise -# x.validate_inputs() - - -# # ------------------------------------------------------------------------- -# # 2) Empty observed_channel should raise -# # ------------------------------------------------------------------------- - -# def test_empty_observed_channel_raises(basic_responses, basic_intensities): -# with pytest.raises(ValueError, match="`observed_channel` is required"): -# XRTDEMIterative( -# observed_channel=[], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# ) + # Construct DEM object + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=7.5, + logarithmic_temperature_step_size=0.1, + ) + # Create grid + x.create_logT_grid() -# # ------------------------------------------------------------------------- -# # 3) Mismatched lengths of intensities / responses / channels -# # ------------------------------------------------------------------------- + # 1 — Correct start and end + assert x.logT[0] == pytest.approx(5.5) + assert x.logT[-1] == pytest.approx(7.5) -# def test_length_mismatch_raises(): -# responses = [DummyResponse("F1"), DummyResponse("F2")] -# intensities = np.array([1000.0]) # only one value - -# with pytest.raises(ValueError, match="Length mismatch"): -# XRTDEMIterative( -# observed_channel=["F1", "F2"], -# observed_intensities=intensities, -# temperature_responses=responses, -# ) + # 2 — Correct number of bins: (7.5 - 5.5)/0.1 + 1 = 21 + assert len(x.logT) == 21 + assert x.n_bins == 21 + # 3 — Correct spacing (uniform) + diffs = np.diff(x.logT) + assert np.allclose(diffs, 0.1, atol=1e-12) -# # ------------------------------------------------------------------------- -# # 4) Temperature range outside response grid should raise -# # ------------------------------------------------------------------------- - -# def test_temperature_range_outside_responses_raises(basic_responses, basic_intensities): -# # min T too low -# with pytest.raises(ValueError, match="outside the bounds"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=4.0, # below dummy response range -# maximum_bound_temperature=8.0, -# ) + # 4 — dlogT and dlnT correct + assert x.dlogT == pytest.approx(0.1) + assert x.dlnT == pytest.approx(np.log(10) * 0.1) -# # max T too high -# with pytest.raises(ValueError, match="outside the bounds"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=9.0, # above dummy response range -# ) + # 5 — T = 10**logT + assert np.allclose(x.T.to_value(u.K), 10**x.logT) + # 6 — logT strictly increasing + assert np.all(np.diff(x.logT) > 0) -# # ------------------------------------------------------------------------- -# # 5) Negative or zero logarithmic_temperature_step_size should raise -# # ------------------------------------------------------------------------- -# def test_negative_logarithmic_temperature_step_size_raises(basic_responses, basic_intensities): -# with pytest.raises(ValueError, match="logarithmic_temperature_step_size must be a positive"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=8.0, -# logarithmic_temperature_step_size=-0.1, -# ) +def test_estimate_initial_dem(): + # Step 1: Simple DEM case + filters = ["Al-poly", "Ti-poly"] + intensities = np.array([1500.0, 2300.0]) + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") -# def test_too_few_temperature_bins_raises(basic_responses, basic_intensities): -# # Choose a huge step so that fewer than 4 bins are produced -# with pytest.raises(ValueError, match="Temperature grid must have at least 4 points"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=5.8, -# logarithmic_temperature_step_size=0.5, -# ) - - -# # ------------------------------------------------------------------------- -# # 6) Monte Carlo runs validation -# # ------------------------------------------------------------------------- - -# def test_monte_carlo_runs_negative_raises(): -# with pytest.raises(ValueError, match="must be ≥ 0"): -# make_iterative(monte_carlo_runs=-1) - - -# def test_monte_carlo_runs_bool_raises(): -# with pytest.raises(ValueError, match="must be a non-negative whole number, not a boolean"): -# make_iterative(monte_carlo_runs=True) - + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=7.5, + logarithmic_temperature_step_size=0.1, + ) -# def test_monte_carlo_runs_float_non_integer_raises(): -# with pytest.raises(ValueError, match="Decimal values are not allowed"): -# make_iterative(monte_carlo_runs=3.5) - - -# def test_monte_carlo_runs_zero_ok(): -# x = make_iterative(monte_carlo_runs=0) -# assert x.monte_carlo_runs == 0 - - -# def test_monte_carlo_runs_positive_integer_ok(): -# x = make_iterative(monte_carlo_runs=10) -# assert x.monte_carlo_runs == 10 + # Step 2: Create temperature grid & response matrix + x.create_logT_grid() + x._interpolate_responses_to_grid() + # Step 3: Compute initial DEM + est = x._estimate_initial_dem() -# # ------------------------------------------------------------------------- -# # 7) Intensity errors validation in validate_inputs -# # ------------------------------------------------------------------------- - -# def test_intensity_errors_length_mismatch_raises(basic_responses, basic_intensities): -# x = XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# intensity_errors=np.array([1.0, 2.0]), # wrong length -# ) - -# with pytest.raises(ValueError, match="Length of intensity_errors must match"): -# x.validate_inputs() - - -# def test_intensity_errors_negative_raises(basic_responses, basic_intensities): -# x = XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# intensity_errors=np.array([1.0, -2.0, 3.0]), -# ) - -# with pytest.raises(ValueError, match="must be finite and >= 0"): -# x.validate_inputs() + # TEST 1: Correct length + assert len(est) == len(x.logT) + # TEST 2: All values should be exactly 0.0 ( Python implementation overrides with flat logDEM = 0) + assert np.allclose(est, 0.0) -# #Test to add later -# #both should be True -# # np.allclose(x.intensities_scaled, -# # x.observed_intensities.value / x.normalization_factor) + # TEST 3: Internal storage _initial_log_dem should match + assert np.allclose(x._initial_log_dem, est) -# # np.allclose(x.sigma_scaled_intensity_errors, -# # x.intensity_errors.to_value(u.DN/u.s) / x.normalization_factor) + # TEST 4: Returned DEM should be finite + assert np.all(np.isfinite(est)) From d11ef990577a304e4d91447e0dc6f352962f2da7 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 13:45:13 -0500 Subject: [PATCH 104/121] spline testing --- .../test/test_dem_input_validation.py | 289 ++++++++++++++++++ 1 file changed, 289 insertions(+) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index c51be6f22..0dec46527 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -174,3 +174,292 @@ def test_estimate_initial_dem(): # TEST 4: Returned DEM should be finite assert np.all(np.isfinite(est)) + + +def test_prepare_spline_system(): + # Setup: 3 channels → n_spl = 2 + + filters = ["Al-poly", "Ti-poly", "Be-thin"] + intensities = np.array([1000.0, 2000.0, 1500.0]) + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=8.0, + logarithmic_temperature_step_size=0.1, + ) + + # Need logT, response matrix, initial DEM + x.create_logT_grid() + x._interpolate_responses_to_grid() + x._estimate_initial_dem() # sets _initial_log_dem + + # Prepare spline system + x._prepare_spline_system() + + + # TEST 1 — n_spl formula: n_channels=3 → n_spl=2 + assert x.n_spl == 2 + + # TEST 2 — spline_logT: correct shape and increasing + assert len(x.spline_logT) == 2 + assert np.all(np.diff(x.spline_logT) > 0) + + # TEST 3 — spline_log_dem should be zeros (flat initial DEM) + assert len(x.spline_log_dem) == 2 + assert np.allclose(x.spline_log_dem, 0.0) + + # TEST 4 — pm_matrix has correct shape + assert x.pm_matrix.shape == (3, len(x.logT)) + + # pm_matrix should be >= 0 + assert np.all(x.pm_matrix >= 0) + + # TEST 5 — weights and abundances + assert np.all(x.weights == 1.0) + assert np.all(x.abundances == 1.0) + +# def test_interpolate_responses_to_grid(): +# # ------------------------------- +# # Step 1: Setup a simple DEM case +# # ------------------------------- +# filters = ["Al-poly", "Ti-poly"] +# intensities = np.array([1000.0, 2000.0]) + +# responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + +# x = XRTDEMIterative( +# observed_channel=filters, +# observed_intensities=intensities, +# temperature_responses=responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=8.0, +# logarithmic_temperature_step_size=0.1, +# ) + +# # ------------------------------- +# # Step 2: Create temperature grid +# # ------------------------------- +# x.create_logT_grid() + +# # ------------------------------- +# # Step 3: Interpolate responses +# # ------------------------------- +# x._interpolate_responses_to_grid() + +# # ------------------------------- +# # TEST 1: Correct response matrix shape +# # ------------------------------- +# n_filters = len(filters) +# n_T = len(x.logT) + +# assert x.response_matrix.shape == (n_filters, n_T) +# # TEST 2: Response values must be non-negative +# assert np.all(x.response_matrix >= 0) + +# # TEST 3: Responses at the boundaries are very small but not negative +# assert np.all(x.response_matrix[:, 0] >= 0) +# assert np.all(x.response_matrix[:, -1] >= 0) + +# # TEST 4: Values near boundaries should be small +# assert np.all(x.response_matrix[:, 0] < 1e-27) +# assert np.all(x.response_matrix[:, -1] < 1e-27) + + +# # TEST 3: Interpolated values are finite + +# assert np.all(np.isfinite(x.response_matrix)) + +# # TEST 4: Boundary values should be significantly smaller than peak response +# for i in range(n_filters): +# peak = np.max(x.response_matrix[i]) +# left = x.response_matrix[i, 0] +# right = x.response_matrix[i, -1] + +# # Boundaries should be at least 100x smaller than peak +# assert left < peak / 100 +# assert right < peak / 100 + +# # ------------------------------- +# # TEST 5: Temperature ordering preserved +# # Response_matrix row i should roughly follow original shape: +# # no reversed ordering, no nan blocks. +# # ------------------------------- +# for i in range(n_filters): +# # Check no negative values +# assert np.all(x.response_matrix[i] >= 0) + +# # Should have at least 2 non-zero values inside range +# assert np.count_nonzero(x.response_matrix[i]) > 2 + +# # ------------------------------------------------------------------------- +# # 1) Valid configuration should pass validate_inputs +# # ------------------------------------------------------------------------- + +# def test_validate_inputs_valid_configuration(basic_responses, basic_intensities): +# x = XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=8.0, +# logarithmic_temperature_step_size=0.1, +# monte_carlo_runs=0, +# normalization_factor=1e21, +# ) + +# # Should not raise +# x.validate_inputs() + + +# # ------------------------------------------------------------------------- +# # 2) Empty observed_channel should raise +# # ------------------------------------------------------------------------- + +# def test_empty_observed_channel_raises(basic_responses, basic_intensities): +# with pytest.raises(ValueError, match="`observed_channel` is required"): +# XRTDEMIterative( +# observed_channel=[], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# ) + + +# # ------------------------------------------------------------------------- +# # 3) Mismatched lengths of intensities / responses / channels +# # ------------------------------------------------------------------------- + +# def test_length_mismatch_raises(): +# responses = [DummyResponse("F1"), DummyResponse("F2")] +# intensities = np.array([1000.0]) # only one value + +# with pytest.raises(ValueError, match="Length mismatch"): +# XRTDEMIterative( +# observed_channel=["F1", "F2"], +# observed_intensities=intensities, +# temperature_responses=responses, +# ) + + +# # ------------------------------------------------------------------------- +# # 4) Temperature range outside response grid should raise +# # ------------------------------------------------------------------------- + +# def test_temperature_range_outside_responses_raises(basic_responses, basic_intensities): +# # min T too low +# with pytest.raises(ValueError, match="outside the bounds"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=4.0, # below dummy response range +# maximum_bound_temperature=8.0, +# ) + +# # max T too high +# with pytest.raises(ValueError, match="outside the bounds"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=9.0, # above dummy response range +# ) + + +# # ------------------------------------------------------------------------- +# # 5) Negative or zero logarithmic_temperature_step_size should raise +# # ------------------------------------------------------------------------- + +# def test_negative_logarithmic_temperature_step_size_raises(basic_responses, basic_intensities): +# with pytest.raises(ValueError, match="logarithmic_temperature_step_size must be a positive"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=8.0, +# logarithmic_temperature_step_size=-0.1, +# ) + + +# def test_too_few_temperature_bins_raises(basic_responses, basic_intensities): +# # Choose a huge step so that fewer than 4 bins are produced +# with pytest.raises(ValueError, match="Temperature grid must have at least 4 points"): +# XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# minimum_bound_temperature=5.5, +# maximum_bound_temperature=5.8, +# logarithmic_temperature_step_size=0.5, +# ) + + +# # ------------------------------------------------------------------------- +# # 6) Monte Carlo runs validation +# # ------------------------------------------------------------------------- + +# def test_monte_carlo_runs_negative_raises(): +# with pytest.raises(ValueError, match="must be ≥ 0"): +# make_iterative(monte_carlo_runs=-1) + + +# def test_monte_carlo_runs_bool_raises(): +# with pytest.raises(ValueError, match="must be a non-negative whole number, not a boolean"): +# make_iterative(monte_carlo_runs=True) + + +# def test_monte_carlo_runs_float_non_integer_raises(): +# with pytest.raises(ValueError, match="Decimal values are not allowed"): +# make_iterative(monte_carlo_runs=3.5) + + +# def test_monte_carlo_runs_zero_ok(): +# x = make_iterative(monte_carlo_runs=0) +# assert x.monte_carlo_runs == 0 + + +# def test_monte_carlo_runs_positive_integer_ok(): +# x = make_iterative(monte_carlo_runs=10) +# assert x.monte_carlo_runs == 10 + + +# # ------------------------------------------------------------------------- +# # 7) Intensity errors validation in validate_inputs +# # ------------------------------------------------------------------------- + +# def test_intensity_errors_length_mismatch_raises(basic_responses, basic_intensities): +# x = XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# intensity_errors=np.array([1.0, 2.0]), # wrong length +# ) + +# with pytest.raises(ValueError, match="Length of intensity_errors must match"): +# x.validate_inputs() + + +# def test_intensity_errors_negative_raises(basic_responses, basic_intensities): +# x = XRTDEMIterative( +# observed_channel=["Filter-1", "Filter-2", "Filter-3"], +# observed_intensities=basic_intensities, +# temperature_responses=basic_responses, +# intensity_errors=np.array([1.0, -2.0, 3.0]), +# ) + +# with pytest.raises(ValueError, match="must be finite and >= 0"): +# x.validate_inputs() + + +# #Test to add later +# #both should be True +# # np.allclose(x.intensities_scaled, +# # x.observed_intensities.value / x.normalization_factor) + +# # np.allclose(x.sigma_scaled_intensity_errors, +# # x.intensity_errors.to_value(u.DN/u.s) / x.normalization_factor) From 895834c2ffe7e21ec47dca3251f0dc0ad91a6e7e Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 14:57:29 -0500 Subject: [PATCH 105/121] Creating test for residuals - dummy data --- .../test/test_dem_input_validation.py | 123 +++++++++++++----- 1 file changed, 91 insertions(+), 32 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index 0dec46527..798e71002 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -9,6 +9,7 @@ from xrtpy.response.channel import Channel from xrtpy.response.tools import generate_temperature_responses from xrtpy.xrt_dem_iterative import XRTDEMIterative +from lmfit import Parameters channel_names = [ "Al-mesh", @@ -35,11 +36,8 @@ def test_channel_name(channel_name): def test_dem_validate_inputs_basic(): - # Minimal “realistic” inputs for a DEM solve filters = ["Al-poly", "Ti-poly"] intensities = np.array([2500.0, 1800.0]) - - # Use real responses from XRTpy responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") x = XRTDEMIterative( @@ -90,7 +88,7 @@ def test_validate_inputs_good_case(): def test_validate_inputs_mismatched_errors(): filters = ["Be-thin", "Be-med"] i_obs = [10000.0, 20000.0] - i_err = [100.0] # Wrong length + i_err = [100.0] # Wrong length - should be two error/ uncertainties resp = generate_temperature_responses(filters, "2007-07-10") dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) with pytest.raises(ValueError, match="intensity_errors must match"): @@ -98,12 +96,11 @@ def test_validate_inputs_mismatched_errors(): def test_create_logT_grid(): - # Simple single-channel setup + filters = ["Al-poly"] intensities = np.array([1500.0]) - responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + responses = generate_temperature_responses(filters, "2018-10-27T00:00:00") - # Construct DEM object x = XRTDEMIterative( observed_channel=filters, observed_intensities=intensities, @@ -113,35 +110,32 @@ def test_create_logT_grid(): logarithmic_temperature_step_size=0.1, ) - # Create grid x.create_logT_grid() - # 1 — Correct start and end + #1 — Correct start and end assert x.logT[0] == pytest.approx(5.5) assert x.logT[-1] == pytest.approx(7.5) - # 2 — Correct number of bins: (7.5 - 5.5)/0.1 + 1 = 21 + #2 — Correct number of bins: (7.5 - 5.5)/0.1 + 1 = 21 assert len(x.logT) == 21 assert x.n_bins == 21 - # 3 — Correct spacing (uniform) + #3 — Correct spacing (uniform) diffs = np.diff(x.logT) assert np.allclose(diffs, 0.1, atol=1e-12) - # 4 — dlogT and dlnT correct + #4 — dlogT and dlnT correct assert x.dlogT == pytest.approx(0.1) assert x.dlnT == pytest.approx(np.log(10) * 0.1) - # 5 — T = 10**logT + #5 — T = 10**logT assert np.allclose(x.T.to_value(u.K), 10**x.logT) - # 6 — logT strictly increasing + #6 — logT strictly increasing assert np.all(np.diff(x.logT) > 0) def test_estimate_initial_dem(): - - # Step 1: Simple DEM case filters = ["Al-poly", "Ti-poly"] intensities = np.array([1500.0, 2300.0]) responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") @@ -152,10 +146,9 @@ def test_estimate_initial_dem(): temperature_responses=responses, minimum_bound_temperature=5.5, maximum_bound_temperature=7.5, - logarithmic_temperature_step_size=0.1, ) - # Step 2: Create temperature grid & response matrix + #Step 2: Create temperature grid & response matrix x.create_logT_grid() x._interpolate_responses_to_grid() @@ -165,43 +158,44 @@ def test_estimate_initial_dem(): # TEST 1: Correct length assert len(est) == len(x.logT) - # TEST 2: All values should be exactly 0.0 ( Python implementation overrides with flat logDEM = 0) + #TEST 2: All values should be exactly 0.0 ( Python implementation overrides with flat logDEM = 0) assert np.allclose(est, 0.0) - # TEST 3: Internal storage _initial_log_dem should match + #TEST 3: Internal storage _initial_log_dem should match assert np.allclose(x._initial_log_dem, est) - # TEST 4: Returned DEM should be finite + #TEST 4: Returned DEM should be finite assert np.all(np.isfinite(est)) - def test_prepare_spline_system(): - # Setup: 3 channels → n_spl = 2 - + """ + 1. _prepare_spline_system runs without errors + 2. n_spl computed correctly + 3. spline_logT shape and monotonicity + 4. spline_log_dem has correct values + 5. pm_matrix has correct shape + 6. weights and abundances are all ones + """ filters = ["Al-poly", "Ti-poly", "Be-thin"] intensities = np.array([1000.0, 2000.0, 1500.0]) - responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + responses = generate_temperature_responses(filters, "2012-10-27T12:30:00") x = XRTDEMIterative( observed_channel=filters, observed_intensities=intensities, temperature_responses=responses, - minimum_bound_temperature=5.5, - maximum_bound_temperature=8.0, - logarithmic_temperature_step_size=0.1, ) - # Need logT, response matrix, initial DEM + # logT, response matrix, initial DEM x.create_logT_grid() x._interpolate_responses_to_grid() - x._estimate_initial_dem() # sets _initial_log_dem + x._estimate_initial_dem() # sets _initial_log_dem # Prepare spline system x._prepare_spline_system() - - # TEST 1 — n_spl formula: n_channels=3 → n_spl=2 + # TEST 1 — n_spl formula: n_channels=3 > n_spl=2 assert x.n_spl == 2 # TEST 2 — spline_logT: correct shape and increasing @@ -222,6 +216,71 @@ def test_prepare_spline_system(): assert np.all(x.weights == 1.0) assert np.all(x.abundances == 1.0) + +def test_residuals_simple_case(): + """ + Create a fully synthetic DEM / response case so the forward model has a predictable value. + This isolates and tests the math inside `_residuals`. + """ + filters = ["Dummy"] + intensities = np.array([10.0]) # I_obs + responses = generate_temperature_responses(["Al-poly"], "2012-10-27T00:00:00") + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=6.5, + logarithmic_temperature_step_size=0.1, + ) + + # STEP 2 — Temperature grid + x.create_logT_grid() + N = len(x.logT) + + # Synthetic pm_matrix = constant 2 everywhere + x.pm_matrix = np.ones((1, N)) * 2.0 + + + # STEP 3 — Construct synthetic spline state + x.spline_logT = np.array([x.logT[0], x.logT[-1]]) + x.spline_log_dem = np.array([0.0, 0.0]) # log10(DEM)=0 → DEM=1 + x.n_spl = 2 + + params = Parameters() + params.add("knot_0", value=0.0, min=-20, max=0) + params.add("knot_1", value=0.0, min=-20, max=0) + + + # STEP 4 synthetic errors + x.intensities_scaled = np.array([10.0]) + x.sigma_scaled_intensity_errors = np.array([1.0]) + + + #MISSING IN ORIGINAL TEST: Need abundances and weights (normally set in _prepare_spline_system) + x.abundances = np.ones(1) + x.weights = np.ones(1) + + # STEP 5 — Compute residuals + residuals = x._residuals(params) + + # Expected residual: + # DEM(T)=1 > pm(T)=2 > I_model = 2*N + # residual = (2N - I_obs) / sigma + expected_I_model = 2.0 * N + expected_residual = expected_I_model - 10.0 # sigma = 1 + + assert residuals.shape == (1,) + assert np.isfinite(residuals[0]) + assert np.isclose(residuals[0], expected_residual) + + + + +# ----------------------------------- TEST Against IDL ------------------------------------------ + + # def test_interpolate_responses_to_grid(): # # ------------------------------- # # Step 1: Setup a simple DEM case From 264a734acb5f5f37020faee246957481b30a9783 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 16:16:10 -0500 Subject: [PATCH 106/121] Adding more test, and cleaning up formatting --- .../xrt_IDL_DEM_2012_10_27_MC100.sav | Bin 0 -> 22480 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 xrtpy/xrt_dem_iterative/test/IDL_DEM_testing_sav_files/xrt_IDL_DEM_2012_10_27_MC100.sav diff --git a/xrtpy/xrt_dem_iterative/test/IDL_DEM_testing_sav_files/xrt_IDL_DEM_2012_10_27_MC100.sav b/xrtpy/xrt_dem_iterative/test/IDL_DEM_testing_sav_files/xrt_IDL_DEM_2012_10_27_MC100.sav new file mode 100644 index 0000000000000000000000000000000000000000..eac9fc1a1bb746b6b0d25545f6ff1f67371efac0 GIT binary patch literal 22480 zcmeIac{o34sg&-dx``{Vo1_rK?QU(dOmbN2nZ&viR%uf5k^`?c2IW~OA6WMpL9$jB)5 zNcaCe{i-h%(umKV_Ov1)U z*aQiiAz|~1N}7u#Y=wlaeW}+X{d3YmlI2MGf0h1y*@A@Oze-f3^S@;wy|XDJ$>eI2 zFw*mXkN=j%nS}h`2;mYFJeIConLPk^#~)E z&#O$#UPQr%Qx=2g=ZV}`p$b&%I)p=p6Hd5(!htzAgSw_Aw3*0p^>)dC1=hp%Iv%&6 z;&kZ6?U!Yt=rl#3AGMoJwWo*auC_yKOeT+h6y z_kcetFRAuQj>i1U_Tga(;A#v%K{py`Vao9fJAXq%E` z4f!nyojc#RiAjC?k31Dq&o}NYgV~$v*V`v{W1!uskprI>iNnXuCGI{FAPSWq?qy7> zC34+#7Jddx5%wLHvgHy|*#AW)?&}aYp6FmZ&FfVJzq!s;kB0KX$G~bH(R-3m#m0QH zX7fG#T<-Bu*ib==SlUU^?d!<@_@R_jV;4#s;+UHh@<73w^#UsoA!JsU5C82U02`qU zy5#dlFj1v$%xU5T{a>G_QJcF$uP@Jb%I6QEFF^HS2ahQX<|rgTKUxK2Qf-s4^|Pih z8C8vd{1dI-+iOclp00y=yoGG#>URtXw7d0MLxC`ec^AU6dpnUYUPAXUgNw*5wObVb zkx4izxAps^*kPZViZm17EA~? z?M+=f{S^zC@%mX^UWiBkp>9fjoi)Pn+MT`UE(j3?YTic~3oVG;4{c{E=nfN(3d&@4 zvk!34$M-jz>jSi*6=xky{0_5p$!T6fsgNIK?R#`}5i)H3ANNasgrdsx@{hgS;M*<_ zIlY^0FgKLEJ(k4+Dd+<(($HN(DouU9>h?*bIN}tMtHcGXx|{XGM#->ny1jU(1TV}U z4fI|P6oeUjN+D@+ewZ;_9U8Ph2Q$Ru2RclrU}{lrlzKesuRJ|l#*XYd*aT(;1)a{< z7%`ClU{KNb1BB6nRb~KJD^aLemCT=GN96JAop;@6CmhMM1F1)AabScxx;%mxPaLx; z5fapd*%JzC)*m*ZNTTv_nE-AaN8`*RL&SNvG;2Af2RrH{a6efG+LJr1;`^0uaRXKT`2w{lg*BB zyhvQ+^driWDv1((_31!oXCj-$Pj{I865&CgkwJYjSN956Sn_iKWEiu;}&Sr=>A1eEc2HyZ`+G zsJtk!(^GZ;y6$Qv@(5o-vKR4F8qH20pUha(nS%T|$Lf%7E-@>7i~+Z+E6&rC?M8;0#U}oe^T-5E&m3Q7YVX&u zf44T#4_4RR_qd1Wpzh2;<~Iu^#Fn{&Alvg_iL0LJ9F2!Zi1PgEkZZ&OktJ{UCy+dz zIMqMqEB5+4K})wMz|~y_pWL&#nK40zhW*b*!!JHXe&e&wm+$i<)5RLw0MQL(GwG91 zBIHrz)V3rFPX|;iTT(SSZh|V6=M8ITuc7jezUE76r%?9idO_Su1B&pQ zUF+`Ew7!h7W$nQuHFmi-8(@8-@UbSX1a`{#bFd#hR{jT7_bk8HQT6a-By|qzgCmrrqjwPP!x#G^Oqk=yg zmvPHUjtifayWr2^k-9WL8d(0MaOHY62dt->57fzoA?26t4A=S-a7Uv?MBS+#t^UBA z-LsbzfPL&sEKipQ@{s8k)}AcJvC7>ELIJ&mZ|(C)xe6DeWGnxO$=xm@Tj6)4jd3vH z5!o^KJWzz7jU|hEp2~ws_TExX4w|Ub#O+>K`wm%O^L%n;`+}4o){YKZ$s=vh9rNAS z0NG*;DvT#LQFvl*zM&`{B@0X1IUZHv?w=1&Wv|^q5owK^GESe6XX<+{IWdYmTO%dj z{*Xn6)GXz08+xRPkvEUy-i;K;J1S{YZo(RmNw7?gH!Or$NhU4|{T+XLqn=T0bDjaq z@C`Xrb7Ktg5^oie`bL=ElDlNP{)>3eP2Md*9Zlr2yVuMlo*``1^1psN*o~bdjzL8m z*=SyA6Rfjg0#nm=l*JDFp;GCg!rmYX`1G9Bc#J0jMzY?H99fJ)#s|qg6ovay>R6%e z{X`{{y~0xY>lLY=SO?wLy|5oPsXP!~xedkU9mb#NufV4SJ;$)zQ26F|hrSk#0=J(zNa1~8G*Xohc{S}N%l zj~vlVl{YxFWfvMdslT`AHbX<3Ugt*ccJQ@7-wV%Pqua{QeY$S4f;#ODOWC$0@0|m{0N^#sx=alFHH*}?!Z!$c38`frf?ROvk1XU~d=i+zq zLV=ST^RBHKQ2l37VDT|voz(?a`QcI)qzNH*5kM(hEbs< zR2cpoVZ0?Yza8eJ?uup6QNg4nLw)qXF#LKfYAQ5z5PtE#px(t&3BQE)*A|bRgfZg~ zG6hYhf0~~D>Zf-H(<{M*d@$d*-{kkG4udXQzBg(*LKwd&jf~)XN)%b0-$NODkH~E= z`*S-sl(4_B-gx^$dDwCo^5)iRBKVi#?R}H|>)S1$ zs=|V<|0)5)acqoC5?hgKA)HptrVrUl1nXvOx{xz3OxRkV3t4X+dG_p^GH$>AwNK#e zOQbNFViOHJ3@d+<+N&h@!u)s;mtzVoOz*!Tv|tzsBX3P|di%AZ*TdYV<3jm=3$H#D-JYfdSCenv+Uv|&f#35BtrO6Q4tN18}z}#&0L>2@de}BZ%|+- zYf$;k4rw&cG%5x=3e`pPA`%vMl2rGFmH^MdMP;B$E@afSs z)L&H9Jzsnq%_!70vb#Pb-po9yQP_?W>=W;+6nK!ft1cs1uL~xwMl60|(}eGnSHGXL zxDAb0Pls(}CPMAS&kJ9~dErywa-xDmE>z2oTrnzL{X5_878xF!^Pd5eSAPAYVLa$( z@6dNLG>Xt4&i9JS;UMyE8nC54OCoal3nK(qO9;o0N_oe5!*KZF)|!dBt7t#GdbXBm zhu^>N_x)bn0_7I^8*bia@D3N%xo+=(s;;Gt0;YE8kI$OvOx*$-k(#vjTX%1xF$Y zQ#T$Ty|t4>!NMHH7vd|C%Vr)w<~U6_P8s*#uvf*gIl4PZ;}Ll3ZLX&}T^7=&GaL^n z;fG$z)n9=xYM}q@3x@A!XpyX1n%qc59!0MO^pP(xAt5s{BrO$%hxJR_&7Vdf9!d=3 z`4WiIYf@DKD(<*rLzJ?_LI$Qf5(dam9)ZRY|8JEp+aXK6#fB;7A;j|1lo)ZC!#OGA z2DkQ9I3z(6cc#YjZ+rVGBxvI#qYkENZrQr`cH*T?=`};MS;8Qo;(GtXQ$)U>hX>Q| z9Yn4vb8r@a4dKX<_$lGO7!E%DEZl$gDcXdNd0jN`hS>wtqDdN>kgvIIbR%va($CiL z&CAh1+SRis(y`wJZ6IQK%|zY01{5yu<uhM6wu^L?rxVPULKRguZ*uYP)&D;OjXRf7fV{vo%tObp6Rnbf#& zn=o#Wk-OGDM-=(L>N)2zPUMyiQOHuT5%y}ybo|?*u(xA*qTI0pt@!K9AGXxOWa7P1 z^_(qG$VX9`?tK$-L{ma+L#3fcw&;+9_d6K%EmO-W62&c%@@t3di*b9J+{CksgGerCBI3CiYfrUa}*c9A^F*T zxfx>LJ(6HNyl=in`y?lr>k*8w$ZNi7^=ZTzKcz8kHlQa(~VehT9&gA2NC&G0%%=f;txM7Up8 zFFm(93r~t-UfemZ2L)#{)jyw(f|iP_ME&O=82ntmPRDf#W*+)a+w7T#h2_bTch4JP zv4MWw*|m{-ES%HIXw$ zPp7z=PMrMh>~54?gMYkh#`a$ShSzi&GCtoN+h8j}XgwSGrR5SH9Q4ZRoOVO)nB>Q499K}G-JNOd=6w{9CK|7p zZ$YM(ec{M-8@FnH&fEX#8PddNd%VtmkK3|n0$;T3LniWO!}KSkq;Z*;Z5E0AYu|bd ze(88I(15w3$&LGAtay3v$?6kAri3x42E)D+SBSzE&$S(#^h9n4HaVTBCmiV9MjvFU zWB<*o3$j1i&|3UKMn^&d%y}`Lb~YV>@_HrrcRck_{G3}Zf!Y^ZrlSmLAFIMbXDsFQ zPc6u-1 ziC#43%334heXIG?sZSA3m76yhljsQTXQd4VZ3hWiW%4~t!iNZY~!F;!?^&UZXoguR;%^!Qy^$qv1q+-5&JQwY<7e;&XX65NGqKorba={^0Jj7Zw zqj$pvHGXZ}xX{su8fP0`V@ns}ea#^4$7E>GGrkZd_8v_xl}zx*siDz??_0$K8i=oT zAD+2Xi^|cjo}TdtKoP&(lo|yGWFZUNKD|V#8B!MF;InmHo45affzn1F_@AyJC~jQG z+r_;uxsz0c#dfO~!%>Yy;f@DQ=K`#V+_P!hG^6SXmr{kd`M=KMY{bXo_q43hCxVLk zO|c=eeP?diulg0H?@2z$tR`TN#;4Jt+Z`EljzdOLD=M7)e!5PA7Y#p#y>(u8LetQv zrEh8p#L=%OFPz~-=`0VbM(HVJrWCuHImiwx`|m#peEbf^%3=!6kl4X~gNW-^qc+fc zm|0}ymLv4udi1`FhZXvQ#+BJ)$^U8o^*>FXzxJSeQu^Lrr~=b(G4zkU2GD=CXis0_ zHo|Dis=qC>N8U{!YNQsODlAwTX$-esBjFChr@^Q=Lbtp6J5u@foOmMfDQbz3V8~ zM|Ng^K{)a|oxIt$Fo1NrM22$9BCOngFRIn&2{Ui&p2RhNhe_crbU%*t!qlYJWlt&* znA1%syRjhwi=uLcLrLp8HVpQk;0vEV@$sJPesLN;;)<1_SSU9^7$2K= zzsX=q?A=k_EVe#Ns9$UJdfVtg7#-%H-FI!0I23DjS~D|(;8L8k_98?use$wLo<4S{ zdQcvw-w>jm8$!MsOMO=Bxu=<@sz#J%vr$6f^aPXU$&yEbPg^E5?@hg_d$5yHYwyxg z`;nPWZRT|CPnlcfwC6rnM zKkG=!NLK^n4(cl7NbSG+>D8v5`uNxZESxuQY(6=O!Hk+?6y+ZX<9V)wYbSJxqC139 z`|WK+?)yaw+9(ymp79K?rRWFjHPJP%P~k#Ln`3pqIjCXm>g?>z&n=J>$aY~$(Fb0> zslOpQ{T|B9<>(78&_PeGlwQO!I#_dmbbRdNHQY9-pO>@RhxAeh{iBO#EufHfJ`NVH!ve7QPF!0#q^CWI{04*RF2%vj-ON&8;=3 z+W+`FI}&Uwk}beON=9nR!V80`DVQHF2@xh+$NgUK`9KsEXMN5Z|3>6Kzg(TL?FwNx z>r+!%oQ%B>IC94%D$(k4Tk_K5n=qq5>%Z4$8p?EJj?Q2C1@BK>x~{z?0orE|ztHnM z46BKrn#)@wkn8dzuhcD@DBgFz+N1a}iZN8QDqL+r?j&(5_ELIeNM^oq(q|qin7WMw z#%y5CXMkP!*$Y_f;4qK;ib$@vQ_N{%2U0&OyKlzoiS!F4qh5dV|FQPqXZOUBF6Ch` zds;1dsW1TpY3$^7srnN}ALwXBMJI`Z($Tv@bgzlr14g^voo*)Vza8}48S07s5p+Bl z?}5iBCmip-R)N{{!L79VfOq=S4_GXGQDSy zIhS@6g=kA;-&{R~qI0R3+q$0==l0c{l-z|J-)+~*^+s`rMfzhaJ}acMD=5iVvV@J< z<4&T>DX@51@~qg0?J&)yC4NFR1_mr22%pH8|4;w%z2DrL+yxeQqBbAz_r~BY5=m{N zjD&H4y4De06QW2(_UaqCJ49}A$$^}Or-Z$v_@6;Mjy=B?gD#D;qS^1yx!zAcLI3c+ zC-YKW@XRaBXGY`^#HU-hag{Q|6DNxx#|j%Le(T{$4En)0;Xgq)<#=H%O5Mr(Y5~l( zp160N`#CJM=5Zcuj)3`n(m$(){9(qbs{UZ}GZ>HKIuL!47KS@)78|;9p?{l+>aO_> z=+KbzetnM}nwFfkTtvG5%G0xu`s<#16itqG115~tlO#NZhSe*;ZZU{yfZSjE*G6HgG3N27U zTXLv_f*U#nm%YXvqhRBt-9Cv;9o(gDGpt+1iu}FGZgrx+k=5ixy7mz^B!8H$@g=wx z#=hM?EXKMFUz=#wydeh~QaAD|-(80KzN$t!T4rdaXr(atA_iTrzPkLpLJuQetvOdj znEqoA3c^CS^{RoztqVSTmo8z@t|=?IVt?XrGi9K11S!7F`%<`AbCt-=79oU#2)1MMH|^GS_>{vAnLdEPRu!IkQ~`tUc3kT9!HDJ#K^+j#-kd4 zH4&u9^4(LetBj1j_W2bb2;>$d^J~Ut6v@}R@@s22%5E?5yn8F{pJorVelkqmAB`G@ zoli>W2HDM!vnuy@A9WRuX)Wpp+^rxkstnsn-C`w5ZiLs1>7FIB-b&{mD?3fN%lX*3 zv!)W%T5*r1PUPd`@Q@=)H0pTBy(v_t(Hw=g%*7?AEFpLH(t>=6DN5Xn%MvA)P~Dhf zTkO(hJjAvBB=EK?>W;k$*4@~ODqIuUx@YZCP+x{GH7XhDWR71cr4NLa;;lUi0jn^- zLwfqI<4yQ26#aXg`4?zZ$;B6Mj>1tKuv)WgclEBk1ur`0W?kK%H^wDodC2q1p%aH)B17`)H zsYh(npHBe3&U^M0)w{qTu}amc84L?4gU3hHcK)?*eaZG!qpP-HIb{4K?K}$x@6+CO z<~kc;yibBW{FNwC=&JZEF8dgfr&5{rX(5rYA9$Ra)qVxLnS_;6SzS?&{_%dRlbKLC zHha23Pyu=+^Ksb1!mT|4})}Q z;Zo&;ZPM3gAyp!J-cRis)cI@}Dg9A{(QPX?XD{xAl|8MtAA6i&b99$d9cv=iWOA=KTRBJ|6QJ)LmkpN%e^^VSDl7 zycIuD^wa8NOx3^mH%+@|2geAziR+h*5)819@s+=F`ffZ<>@?VGbpd{F>wNc1FAZwD z$CzPr3p5It&(Uh+!nA`_{|o(F$X-7ZT@<<#RT@I_#NIa}1aCC@H*n+ry;k(rpZ}nk z$6gxhN)2TEJvT5;q`>O(lLzO1=)+7xE#1JNEsRSWiTN)wz<5;Mz-NInm_8*<(YiDa zbG9dLe_dDo+unYrRB4qr`hi);<-@_pyz#Pv_vIDJdxWvghNI7t4^ia8UgPxbGKn8u zC^K83Ash})Q_<8|;XqyB0t4p|+G|;Mgnyug^#|qp`sS<97L`raE_Mkzs~oPym=D2* zE0<$u&?buA@SI8~D@C0)pAuWyO*Cw$_m`^URpS!v*@x#gBzJl>W5`-I+hy zE9Ha0w3dT4ghCVp#9XynpF0vpPGz)vIC6-B?DPGo+d<^=1=^ph)h8Ub9F$_tI*NmG zMx|edHPK=Ji1hBY>qsVG=6KHJ8ho9UyYMN{9eQG~Zgu^Xj^v3GCf0>QC^m#ZA{er*`YP)SqI}1Xfx0znB8$C`?1c4k;#9=iPVW2wf`&KgyzFfT zOb$yrXj)&11{{hp-lk5-H+CUDI>{ZGh0~`A_lwBkyOfbF9fjg#4IGuognP{HT80FL z;NEXJZN3~AQGtrbujmXViZsV$J)ysV933>1j(**^L%u()u(KO!T{SflY%-8$ude6j zvtiuoa_RQ07%MV3b+*+UphV`?(~mQl9{$x&mmW=uu(&apaOsQ$zWRw5$K4a1UwRYz zOBB>8am+;CCZ+B+(@7$?d^se1ZV%x!TqfsMI*nuRjbxmzGNNk(od9(=4{njX7aBbB z7255{rjIxZLEE5A2}|U2m|o4Tum8Vzp#(IsVB$$7z^0{ddftL@aOp2doBMuvh11Ad|&@Q}d1tP z&EnRB!=+E1qSh$TxyoBY(^nlSSuS?CjU#kZx@j;Kq{F~xl~4(ZUr1%*d2`FjS(Gii zR2S$ujE3X}2`TGa(BwWN&-cyOsCB=#jeBGjMUpdmO~TZXre}=HHgpfns&*{L-DZQK zs+VWjSx>_7TPa?i?>R8bZL^9iQINQrvrr3*pJKCn|N?Ehc~a9Qxlzo zWygiXNG7_0?8%8g51{d7TKFQ5~>4heq^MVJUbw!y_)K!IYcKbaX`N@zQDP#Yt$rADkUTGR= z4nld;_n}`!F3=cef1O==9s1~mk8)?r>R3ykrid#&qQe^JCA}$DO0>L+a{KW74srMS z^_RnK_lT3q?$^_`>n1Oc{A0xjI%2;m!xiT$%B z*a^xTPm<4}wN2y?AHgUnP20{w^?4Hb9_~Drayd~uA@X8Yw)!J&i&~`vKL$m#9jp)Y z1#V-|es%QG!IFa_z;^G$_=6vh;o9!IH?)D)zVVPPD^Zp$Byg7F2a#3Q%pD`LPMpGeEv@!6f=asIxnl1pOyo0r_E}L3^`oOI z-bnJI;4irp2KLX$OCh(l^7CVqR^2Gh=6i#%BD`D8{ViI`H)$Loe~!nTl|G3dZb8#~ z8XlTWbqHDlqqBQ1q5PjB?w#uOC>TIKwj;?FInKz5XEsmaPGM1Qx3L&xF$ur%b7=^7 zs&<$kHTsCGl{eSBI4%CRx87!nOy=q?uyp5`d_&A(a9#Xk!9xv%$(59N+UioGm`Y}n zP$TgNuhkttxLFW(=fuh%u_$5pxeWpLqi4}%Q}?jq3s>md73vwu^b%5e)HO_9I3V6q z#;D!55uW5v6#jY@4ex1I+jrb*gZ4suBR~Eu81zsTEsQXM(I@VOJMDhMaIQ@tUs)ye z_n*&>WT$}MR8#*$!Bx=rFlFIJLo*DuuWXlhn1`vYie`*$f$+P+N+o0~HEiCAcYM}7 zqb)W(JfO_z0s0D^wh{v6sJ@Fg;gLK!t~KWxYif=X!8y#)^I3vK=_@m?h>J&vtc1~q zp2{lX%$AtM`NnAkoitvMigf>!0*<3jDBM8ar z3PtR)TyU%Z#8h~s46@OTzBzEy2l-X*YmR78q0lNHkJ_2{D00nluuR_>McB&YnWXqo z$oE{h7Ka21*sZ@-GY>+Z*`89eG!x`V3H!37P=_r1M_GgR4tlnLXr$!D!d<51z!j`+v=vqWyjkzU#feZt=EM#Nf968091 zX_8INqvg?#)+P6&Fp@6y+ulwHGUH>`pyDJvRr~IDSNgHEAm0oQiF3iQy!+8cC{X?XbhA7pfO;7@MHSO;*Z-MIxxZcgkx1vbwrPGq`H*mK+dEfDS3#f44@Y}8Lk8$ti-m%OCQl72d!|JttwJ5#2 zUCH$kBLB$?QYZFOAY(#rw6bLZl6keaTXKK@r|IWE{bviE+MDCzB>qjb+yC+c2G!jB zdM5cjVLX=XxmDhgD0&rKHap!;#*{tys_Ij@V%%?N3k41~<4DG_BdrC6Yl1yOc z{OfNSf`d@nUA6G(O)3=4DiXeS%FxCTS-tyCA}n8J(UVYKM-FknU%pR+P*i#-*7kik ziU`k#X4b|c=SqIZ4zqNmSA0S9RE(4-Gvpc?VKxuTtKay89|uP6~xO)SP?}Z zZ+WJ34iULe-ie04b0qAhYbsfv+F|dPRIbp%Y&`nZpRHbO4CYu|9~^5mfJ&`!R+H9X zsI)!qt<*#sN4RD`$G>MoN{a($+P_Mnpd8uGMzb?0QMc>(4$cu2pgb1cNzaYjM-HaP z+&K@wFPJvuG$=!VTdYO*dn@?<(&yoH#UA)Z%R&CKT^%~_FmYXpt%Cs#=MF3BtuT?q zaK-0|8mYbgpY?O;_cZt`N5Smu+xL@aJTc&ten*V7H(^v%)8Qp_o+#vVku_?qAo6$= zeo=~f5RM}8=RJ9iaWLrF@46iy(DwNq4)2U(u%LhQ1oM?zs8BgyBOT}rg&B+qN&+vT zvFXlygyChF(ZBA1xg1EhaPD=_k`ihD&E$J{Ya_C$*qEsY8sYYz+fCCxY9g7*YuRS8 z1ejm`!BL(X4bx^U)W4YRVJgV6vcZ9r_ry*Y6{pD$3%5A__X zQrvRlAeg^t3D+wr$3U~!t+#J_5{7f0*Ive|5CtmN<%(u55P2MN!7c0FgrmT)R~3UK z_LUX~aa-&})8hu~CvqM@d!#6zKU+71MZ9g?nc4`pTQo&f+PuNMU95=KvmflgMpg90 zC4qlpv5s?>DkQw(`c~;p1LcS3`{u1le&yD6zX+-Gr1e=sn+-9vNSAW?zUoyEWN_H^ zm}z(~QcN`Lbr+U^iMHO4j)Kha<_vT|ZG2k;(b~hdPmaOYB6ciWgzm_-M$%UJZL`BW*e? zYtfSH=@&|N3mAJ>cHr7+YItYLN>|!<3|?(;b!ocwLB;dq^U}f$(7)e>ob2=kBvX3g z@>|*l=`GTeP8KL4eSk;5HE%ak-nz%kb@v=B2YN6njxWH}CFVQ(j)%jDNb$+Vz7QDf zmiWM-bPNVt{{)Na*1=G`LGv^dCk$2BobLK#@K4jv|H|{v>Tmzk^{KgW=lEW4H+F5` z@7xw*r1?}nSCvSdtqHz6oMF}Z{Se_yFSoBQqKzmqx;NdzBT2ktby&*%@`G@`{Ow)B zSs9#L?%Uv|2}FN8k)Foe*O9MuX(E-xUoI3VGRE*st9^mPmaX2ldRh zDUv&&!)La6Ck`znr;D5&9f*PNp6Xg}Bo9Du?scflx{MU2$&NcZI#J42MK|JT03OQT zvxctAXzF)DAi8!E(JyLsMa>^2tz&o;_dLQK$5^#?@9u=(RX>yaKJAB*WipM6#pN*Y zo#xAkSPKmGez2o(xdIa(>k2(MxM0Cr?yFs`;9r0Ar>$fS3vmZb2lymj>v*BRFH_cd zAv<9xLq1in9846Lvrk?wNh9(C&GR|OO9`i-%H~tT7jgK6k+i3_FWPx1u{5?6!|a-J zO6v3@$Qv{19OPSv=Zkr&l%7W*SFn2b8=Ylnn*T)Ypi>Lu#$$Ojh4!$Td2T;#A~ljv zQXEb>5Qk({^U@P^`TyFm*ww@0^O z;BJOmYtp_C3#@)_1~B6ub?g2f0T`wb5mn-jgCVb^kecngV7#hT=7<+1EHYE9yUYb5 zCF4RSDaff~AI@|5hhz+)_9u%?|4#+#)nAC`bf%@t z?9qOW)c|$$RZ&qIbjZeC=41T_>C0heFL}*_s2G^ewA15ora!m7IhT36+*A|<5aSvD?Xy$!_{DM-;=K6t1_0TK$xCA%M%%|HetyaK*=hx)W zbpm>%J)0_NbfM?P*NR6+y`lfU^J?e|G8j>n8hoV?t*y1PqL2}N3T(vFEiDhP zpPb?q3X6yI+ahe#+;3`E`oKLvt0n9hyL~Hzvoz z^juK>GpCvi=Q~uY-=!EQXM;-aF()+SQ&E9V{ozyADU?akH18yj$K6^ZHMd&LQS?@* zg#Pp=6rw3TJ6_X_yx)Rc--rD{_Vby^Me0)j)9uy&>-+tU&-D~|W}MO-1M^#nr`=Q< z@bVj1-*Uy(KXiNcByIhvZ?MD7)xox<>)uy_6PBBeS9`^?y=4(oQI)gL`c zW8=p#Wt=8<@@fnebYHo{xiJjc(zZOTuN0wre|=hk^)(nibxVfaupcRkgw_0V4kJTw zbdU<27H+G3A*rfLS~n)V+vi6`Cd?FF8S?SE3;l8858`W6p+oN0>CbvG@HPL2V$hH| zeDxFSj{m(0-;!VE?qU~*@9DY=DO4Z+fA-*ws59dln9@H!U$!=he%}?2&gA6~hs?%q z^c&|9`E=!n(vwN+v*rfPzDx%ZPUUCMGslzTh`y4@GKD4D%O=XJTSxXHk^d19KU73PSXA{l0xy}ucKi8-BreoFl$&ms`O5<-yD`X*D`p2`)KcP zic%rd`U6Kg+z9+p9!aHRHw3TF7I z*PuJEObInLFSBc#0-)tUI+icDz|Vuq)ZCfiimw?}iMd*=X{9&Je9(sT{| zCD!Un7##^ioq|1|motchz0DK7aVLq~xKRP>MbiFA`|Kn#f)#L>Vou<0?hAA@)xKzz z-VYo32YE8{1faR4(rjac1zLU~uS2Xl%!sHa4$(Q{PEQF#3Y#*NUwCTZuK58~?p)IJ z;JJ^ol)GqU#uDc>c~XXm4R#M{GnbAyXa2zNG(W`3t11f9T^A8+%{;`41|bZjQK@QBQ> zqxbltP)5LPAoNiw3Q=Bj(7RrRl9Ts_DvsFUft%ti3QyF~z|!-iOj|OVD9(t-_}8L= ztk)w!)&kT%AS|6@k%fDb%Djrtbfcv39~&@+TaYUZMC;epT1I!mCIXSERrrWC-hYM~(QDy<4TF+$4$o%WVf5T9)|X5q{@qD#PM>8bOz*YO54QG#8H!=) zvA`4mwEpwI^8C-{6aS7sJ$D=wJGdFa{DujC=Eo0sxmokcsW@7~*yOGC(sCV9nD(oq zfKi-yH}A^MqI8F_duV4cH~bTOLn^-CBW|NrzV+m_^cOG_0V`!n+n{_W#bkJ8G?d6r zh$*g3!q?L~PHE^d!Rm)J-z5WYcz={pa1uHU|l$ z$rCaj*jq!&zrJx%A^9ZC`O3yx2ByNyD6Pbt!3&rZaEfpae@@EpJnENSBTM4%9sIJx Qw~+RPr2e~uB&qQK0W!=NUjP6A literal 0 HcmV?d00001 From c345534fa12d1a79f00eb1bea0aedb3a5cc8a862 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 18:39:46 -0500 Subject: [PATCH 107/121] Now working on docstrings-main function doc --- xrtpy/xrt_dem_iterative/dem_solver.py | 61 +++++++++++++++++---------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index ad0118d9c..785f898e4 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -17,34 +17,49 @@ class XRTDEMIterative: """ - Estimate the differential emission measure (DEM) from Hinode/XRT data - using the iterative spline-based method. + Differential Emission Measure (DEM) solver for Hinode/XRT observations. + + This class implements a Python version of the IDL routine + `xrt_dem_iterative2.pro`, using spline-parameterized DEM curves and + iterative least-squares fitting. It supports Monte Carlo error analysis + and closely mirrors the logic of the original IDL algorithm. Parameters ---------- - observed_channel : str or list of str (required) - Filter names used in the observation (e.g., 'Al-mesh', 'Be-thin'). - Must match the provided temperature responses. - observed_intensities : array-like (required) - Observed intensities for each channel. - Units = DN/s/pix. - temperature_responses : list (required) - List of `TemperatureResponseFundamental` objects matching the filters. - Units = DN s^-1 pix^-1 EM^-1. - Can be generated using `xrtpy.response.tools.generate_temperature_responses` - for one or more filters. See: https://xrtpy.readthedocs.io/en/latest/getting_started.html + observed_channel : str or list of str, required + Names of the filters used in the observation (for example, + "Al-mesh", "Be-thin"). Must correspond one-to-one with the + temperature_responses argument. + observed_intensities : array-like, required + Observed intensities for each filter channel. Units are DN/s/pix. + temperature_responses : list, required + List of TemperatureResponseFundamental objects matching the filters. + Units are DN s^-1 pix^-1 cm^5. These can be created using + xrtpy.response.tools.generate_temperature_responses(). intensity_errors : array-like, optional - Intensity uncertainties. If None, will use a model-based estimate. - minimum_bound_temperature : float - Minimum log10 temperature (default: 5.5). - maximum_bound_temperature: float - Maximum log10 temperature (default: 8.0). - logarithmic_temperature_step_size : float - Step size in log10 temperature space (default: 0.1). + Uncertainties in the observed intensities. If None, a default model + is used: max(0.03 * intensity, 2 DN/s/pix). + minimum_bound_temperature : float, optional + Minimum value of the log10(T) grid. Default is 5.5. + maximum_bound_temperature : float, optional + Maximum value of the log10(T) grid. Default is 8.0. + logarithmic_temperature_step_size : float, optional + Step size for the log10(T) grid. Default is 0.1. monte_carlo_runs : int, optional - Number of Monte Carlo runs to perform (default: 0, disabled). - Each run perturbs `observed_intensities` using `intensity_errors` - as Gaussian sigma and re-solves the DEM. + Number of Monte Carlo repetitions to perform. Default is 0 (disabled). + max_iterations : int, optional + Maximum number of function evaluations for lmfit. Default is 2000. + normalization_factor : float, optional + Internal scaling factor used during optimization. Default is 1e21. + + Notes + ----- + - All lists (observed_channel, observed_intensities, + temperature_responses) must be the same length. + - The log10(T) range must lie inside the native temperature grid + provided by all filter responses. + - If intensity_errors is not provided, a default model is used to + estimate uncertainties. """ def __init__( From b4d8bfefe9ed217c3dc01f117b5fc93f624d1016 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 18:43:11 -0500 Subject: [PATCH 108/121] Updating intensity_errors doc --- xrtpy/xrt_dem_iterative/dem_solver.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 785f898e4..b95e7df21 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -327,6 +327,7 @@ def observed_intensities( ): # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. + Returns ------- `~astropy.units.Quantity` @@ -395,18 +396,21 @@ def relative_error(self): @property def intensity_errors(self) -> u.Quantity: """ - Returns the intensity uncertainties, either user-provided or model-based. + Return the intensity uncertainty values. + + If the user supplied intensity_errors, those values are returned. + Otherwise a default model is used: - If not provided, errors are estimated using: - max(0.03 * observed_intensity, 2 DN/s/pix) + sigma = max(0.03 * intensity, 2 DN/s/pix) + + This behavior mirrors the default uncertainty logic of the IDL routine + xrt_dem_iterative2.pro. + + `~astropy.units.Quantity` + Intensity errors in DN/s for each filter. For details, see: https://hesperia.gsfc.nasa.gov/ssw/hinode/xrt/idl/util/xrt_dem_iterative2.pro - - Returns - ------- - `~astropy.units.Quantity` - Intensity errors in DN/s for each filter. """ if self._intensity_errors is not None: return self._intensity_errors * (u.DN / u.s) From b37654f2c9072131abac31810c6d24d60fd022fd Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 18:53:20 -0500 Subject: [PATCH 109/121] Cleaned up doc for _interpolate_responses_to_grid and _estimate_initial_dem --- xrtpy/xrt_dem_iterative/dem_solver.py | 64 +++++++++++++++------------ 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index b95e7df21..f0b7940fa 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -60,6 +60,9 @@ class XRTDEMIterative: provided by all filter responses. - If intensity_errors is not provided, a default model is used to estimate uncertainties. + + SELFNOTEJOY + Add web-link to IDL script. """ def __init__( @@ -521,6 +524,22 @@ def create_logT_grid(self): def _interpolate_responses_to_grid(self): """ + Interpolate all filter responses onto the solver's regular log10(T) grid. + + This constructs the response matrix used in the DEM forward model. + Each filter's native temperature response (R(T)) is interpolated to the + grid defined by self.logT. Extrapolated values outside the native + response range are set to zero. + + Equivalent to the "Res_Mat" construction in the IDL routine xrt_dem_iterative2.pro. + + Notes + ----- + - Response units (from XRTpy) are DN s^-1 pix^-1 cm^5. + - Output matrix has shape (n_filters, n_temperatures). + - Rows correspond to filters; columns correspond to temperature bins. + + ------- IDL method of Interpolate emissivity. Interpolate all filter responses onto the common logT grid and build the response matrix. @@ -528,20 +547,11 @@ def _interpolate_responses_to_grid(self): Equivalent to constructing `Res_Mat` in IDL's `xrt_dem_iterative2.pro` and in the DEM_Solver PDF documentation. - Notes - ----- - - Each filter's response is interpolated to `self.logT` (regular log10 grid). - - Extrapolation beyond the native response grid is set to 0.0. - - Units: DN s^-1 pix^-1 cm^5 (per emission measure). - - Shape of `_response_matrix`: (n_filters, n_temperatures) - Rows = filters, Columns = temperature bins. - Attributes Created ------------------ interpolated_responses : list of ndarray - Interpolated response arrays for each filter. _response_matrix : ndarray - Final stacked matrix (n_filters x n_temperatures). + Stacked filter responses on the uniform logT grid. """ if not hasattr(self, "logT"): raise AttributeError( @@ -635,36 +645,32 @@ def _prepare_scaled_observations(self): def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: """ - Construct an initial DEM guess, mirroring IDL's xrt_dem_iter_estim. + Compute an initial DEM estimate closely following the structure of the IDL routine xrt_dem_iter_estim. - This method follows the *structure* of the IDL routine: - - Identify channels with non-zero observed intensity. - - For each such channel, find the peak of its emissivity/response. - - Integrate the response around the peak to estimate a DEM value. - - Combine/compact duplicate peak temperatures. + The IDL code performs a rough DEM estimate by evaluating intensities + relative to response peaks, but xrt_dem_iterative2 ultimately replaces + that estimate with a flat log10(DEM) curve before calling the solver. - HOWEVER, to exactly match the behavior of IDL's xrt_dem_iter_estim - as used by xrt_dem_iter_nowidget, the final initial guess returned - to the solver is a *flat* log10(DEM) curve: + This method repeats the peak-finding logic for diagnostic purposes, but + the final DEM passed into the solver is always: - log10(DEM(T)) = 1.0 for all T on the solver grid. + log10(DEM(T)) = 0.0 for all temperature bins - The detailed peak-based DEM estimates are kept only for optional - diagnostics; they do not affect the initial DEM passed into the - spline/least-squares solver (this is exactly what the IDL code does). + which corresponds to DEM(T) = 1 in arbitrary units. This reproduces the + IDL initial condition exactly. Parameters ---------- cutoff : float, optional - Fraction of the peak response used to define the "good" window - around each filter's peak. Default is 1/e (≈ 0.3679), as in IDL. + Fraction of the peak response used to define the usable window + around a channel's emissivity peak. Default is 1/e (approximately + 0.3679). Returns ------- - est_log_dem_on_grid : ndarray - Array of shape (n_temperatures,) giving the initial guess for - log10(DEM) on `self.logT`. For strict IDL-compatibility, this - is identically 1.0 everywhere. + ndarray + Initial log10(DEM) estimate on self.logT. This is always a flat + array of zeros (IDL-equivalent behavior). """ if not hasattr(self, "logT"): raise AttributeError( From 2ba49f7a120cd6f65459f3c6e5a58b1f7d4cc4b7 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 19:01:43 -0500 Subject: [PATCH 110/121] Added more docstrings and code formatting using tools like black, ruff, ect. --- xrtpy/xrt_dem_iterative/dem_solver.py | 88 +++++++++++++++++---------- 1 file changed, 57 insertions(+), 31 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index f0b7940fa..a617ef208 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -60,9 +60,9 @@ class XRTDEMIterative: provided by all filter responses. - If intensity_errors is not provided, a default model is used to estimate uncertainties. - + SELFNOTEJOY - Add web-link to IDL script. + Add web-link to IDL script. """ def __init__( @@ -330,7 +330,7 @@ def observed_intensities( ): # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. - + Returns ------- `~astropy.units.Quantity` @@ -400,7 +400,7 @@ def relative_error(self): def intensity_errors(self) -> u.Quantity: """ Return the intensity uncertainty values. - + If the user supplied intensity_errors, those values are returned. Otherwise a default model is used: @@ -408,7 +408,7 @@ def intensity_errors(self) -> u.Quantity: This behavior mirrors the default uncertainty logic of the IDL routine xrt_dem_iterative2.pro. - + `~astropy.units.Quantity` Intensity errors in DN/s for each filter. @@ -430,7 +430,7 @@ def intensity_errors(self) -> u.Quantity: category=UserWarning, stacklevel=2, ) - + self._using_estimated_errors = True # Fixed in units @@ -525,12 +525,12 @@ def create_logT_grid(self): def _interpolate_responses_to_grid(self): """ Interpolate all filter responses onto the solver's regular log10(T) grid. - + This constructs the response matrix used in the DEM forward model. Each filter's native temperature response (R(T)) is interpolated to the grid defined by self.logT. Extrapolated values outside the native response range are set to zero. - + Equivalent to the "Res_Mat" construction in the IDL routine xrt_dem_iterative2.pro. Notes @@ -564,9 +564,7 @@ def _interpolate_responses_to_grid(self): ): # Make sure that R_orig.value is indeed in DN/s/pix per cm^5 logT_orig = np.log10(T_orig.to_value(u.K)) - response_vals = ( - R_orig.value - ) # already in correct physical units for XRTpy #NOTEFORJOY- TRIPLE check this + response_vals = R_orig.value # already in correct physical units for XRTpy #NOTEFORJOY- TRIPLE check this interp_func = interp1d( logT_orig, @@ -771,14 +769,23 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: def _prepare_spline_system(self): """ - Pythonic, IDL version of mp_prep. - Prepares:s - - self.n_spl (number of spline knots) - - self.spline_logT (knot positions) - - self.spline_log_dem (initial spline logDEM values) - - self.pm_matrix (R(T) * T * dlnT) - - self.weights (all ones) - - self.abundances (all ones) + Prepare the spline-based DEM parameterization. + + This mirrors the IDL routine mp_prep and sets up all arrays needed by + the least-squares solver, including: + + - self.n_spl : number of spline knots + - self.spline_logT : knot positions (evenly spaced in log10(T)) + - self.spline_log_dem : initial values of log10(DEM) at each knot + - self.pm_matrix : response matrix multiplied by T * d(ln T) + - self.weights : all ones (IDL uses a channel weighting mask) + - self.abundances : all ones + + pm_matrix corresponds to: + pm[i, j] = R_i(T_j) * T_j * d(ln T) + + which appears in the forward model: + I_model_i = sum_j DEM(T_j) * pm[i, j] """ # Number of channels @@ -838,6 +845,7 @@ def _build_lmfit_parameters(self): def _reconstruct_dem_from_knots(self, params): """ Construct DEM(T) on self.logT using spline of log10(DEM) at knot positions. + Uses a natural cubic spline interpolation in log10(DEM) space. """ from scipy.interpolate import CubicSpline @@ -851,9 +859,18 @@ def _reconstruct_dem_from_knots(self, params): def _residuals(self, params): """ - IDL equivalent of mpdemfunct. - Computes residuals: - ((DEM ## pm) - i_obs_scaled) / i_err_scaled + Compute residuals for use by the least-squares optimizer. + + Residuals are computed as: + residual_i = (I_model_i - I_observed_i) / sigma_i + + where: + I_model = (pm_matrix @ DEM) * abundances + + Returns + ------- + ndarray + Residuals for each filter channel. """ # 1. DEM(T) @@ -878,6 +895,11 @@ def _residuals(self, params): return residuals def _solve_single_dem(self, observed_intensities_vals: np.ndarray): + """ + This method solves the DEM for one set of intensities only, without + Monte Carlo perturbation. + """ + nf = self._normalization_factor # 1. scaled obs/errors @@ -927,6 +949,8 @@ def _run_monte_carlo(self): - self.mc_base_obs shape (n_obs, N+1) - self.mc_mod_obs shape (n_obs, N+1) - self.mc_chisq shape (N+1,) + + Note that N+1 rows means: row 0 = base case, rows 1..N = MC. """ n_obs = len(self._observed_intensities) @@ -1119,13 +1143,13 @@ def summary(self): Print a detailed, diagnostic summary of the DEM solver state. This provides: - • Input observation details - • Temperature grid configuration - • Response matrix status - • Spline system configuration - • Base DEM fit results - • Monte Carlo statistics (if available) - • Available plotting helpers + - Input observation details + - Temperature grid configuration + - Response matrix status + - Spline system configuration + - Base DEM fit results + - Monte Carlo statistics (if available) + - Available plotting helpers """ print("\n" + "=" * 76) @@ -1153,7 +1177,7 @@ def summary(self): print("\nTEMPERATURE GRID") print("-" * 70) if hasattr(self, "logT"): - print( f" logT range: {self.logT[0]:.2f} to {self.logT[-1]:.2f}") + print(f" logT range: {self.logT[0]:.2f} to {self.logT[-1]:.2f}") print(f" Number of temperature bins: {len(self.logT)}") print(f" logT (grid spacing): {self.dlogT:.3f}") print(f" lnT (natural log spacing): {self.dlnT:.3f}") @@ -1164,7 +1188,9 @@ def summary(self): print("\nRESPONSE MATRIX") print("-" * 70) if hasattr(self, "_response_matrix"): - print(f" Matrix shape: {self._response_matrix.shape} (filters x T bins)") + print( + f" Matrix shape: {self._response_matrix.shape} (filters x T bins)" + ) print(f" Response units: {self._response_unit}") else: print(" Response matrix not constructed.") From d7012ee039130a0d09aa1324a5828a117245d930 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 19:28:17 -0500 Subject: [PATCH 111/121] Few string/comment edits --- xrtpy/xrt_dem_iterative/dem_solver.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index a617ef208..6ad4711f4 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -2,8 +2,6 @@ "XRTDEMIterative", ] -# import pdb; pdb.set_trace() - import warnings import astropy.units as u @@ -323,11 +321,7 @@ def __repr__(self): ########################################################## @property - def observed_intensities( - self, - ) -> ( - u.Quantity - ): # Add method to account for known values not worth observed_intensities + def observed_intensities(self,) -> u.Quantity: # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. @@ -633,13 +627,9 @@ def _prepare_scaled_observations(self): # Store for solver self._scaled_prepared = True - ############################ Everything line of code ABOVE is PREP for the DEM ############################################# - - # **************************************************************************************************************************** - ############################ Everything line of code BELOW is FOR the DEM ################################################## - - #############************************** Start of INITIAL ROUGH DEM ESTIMATE **************************########################## - ################## An estimated EM shape based on simple intensity-over-response peaks, smoothed across T. ##################### + # ====================================================================== + # DEM INITIALIZATION AND SOLVER METHODS + # ====================================================================== def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: """ From d36d4e02538e646801071b4a645df920e343f0e0 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Tue, 25 Nov 2025 19:46:31 -0500 Subject: [PATCH 112/121] Cleaning up code and removing comments --- xrtpy/xrt_dem_iterative/dem_solver.py | 30 ++++++--------------------- 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/dem_solver.py b/xrtpy/xrt_dem_iterative/dem_solver.py index 6ad4711f4..bd29a5fd3 100644 --- a/xrtpy/xrt_dem_iterative/dem_solver.py +++ b/xrtpy/xrt_dem_iterative/dem_solver.py @@ -319,9 +319,12 @@ def __repr__(self): # """ # return self._name - ########################################################## @property - def observed_intensities(self,) -> u.Quantity: # Add method to account for known values not worth observed_intensities + def observed_intensities( + self, + ) -> ( + u.Quantity + ): # Add method to account for known values not worth observed_intensities """ Observed intensities with physical units. @@ -753,10 +756,6 @@ def _estimate_initial_dem(self, cutoff: float = 1.0 / np.e) -> np.ndarray: return est_log_dem_on_grid - #############************************** End of INITIAL DEM ESTIMATE **************************################################## - - # ------------------------------------------------------------------------------------------------------------------------------- - def _prepare_spline_system(self): """ Prepare the spline-based DEM parameterization. @@ -929,8 +928,6 @@ def _solve_single_dem(self, observed_intensities_vals: np.ndarray): return dem_phys, modeled_intensities_phys, chisq, result - # ------------------------------------------------------------------------------------------------------------------------------- - def _run_monte_carlo(self): """ Replicates IDL's Monte Carlo loop. @@ -1010,8 +1007,6 @@ def _run_monte_carlo(self): self.mc_mod_obs = mc_mod self.mc_chisq = mc_chi - # ------------------------------------------------------------------------------------------------------------------------------- - def solve(self): """ High-level DEM solver. @@ -1055,11 +1050,9 @@ def solve(self): """ # Validate inputs (IDL: argument checks near top) - self.validate_inputs() # 1) Build logT grid and response matrix - IDL: regular logT grid + interpolated emissivities - self.create_logT_grid() self._interpolate_responses_to_grid() @@ -1097,7 +1090,6 @@ def solve(self): self.mc_mod_obs[0, :] = mod_base # 4) Monte Carlo loop - if N > 0: rng = np.random.default_rng() # like IDL's systime(1) seeding @@ -1125,7 +1117,6 @@ def solve(self): self.mc_base_obs[ii, :] = obs_pert self.mc_mod_obs[ii, :] = mod_i - # 5) Return DEM for convenience return self.dem def summary(self): @@ -1146,7 +1137,6 @@ def summary(self): print(" XRTpy DEM Iterative — Solver Summary") print("=" * 76) - # ----------------------------------------------------- print("\nINPUT DATA") print("-" * 70) print(f" Filters: {self.filter_names}") @@ -1163,7 +1153,6 @@ def summary(self): print(f" Error values (DN/s): {self.intensity_errors.to_value('DN/s')}\n") - # ----------------------------------------------------- print("\nTEMPERATURE GRID") print("-" * 70) if hasattr(self, "logT"): @@ -1174,7 +1163,6 @@ def summary(self): else: print(" Grid has not been constructed (call solve()).") - # ----------------------------------------------------- print("\nRESPONSE MATRIX") print("-" * 70) if hasattr(self, "_response_matrix"): @@ -1185,7 +1173,6 @@ def summary(self): else: print(" Response matrix not constructed.") - # ----------------------------------------------------- print("\nSOLVER CONFIGURATION") print("-" * 70) print(f" Normalization factor: {self.normalization_factor:.2e}") @@ -1196,7 +1183,7 @@ def summary(self): print(f" Knot positions (logT): {getattr(self, 'spline_logT', 'N/A')}") else: print(" Spline system not prepared yet.") - # ----------------------------------------------------- + print("\nINITIAL DEM GUESS") print("-" * 70) if hasattr(self, "_initial_log_dem"): @@ -1205,8 +1192,6 @@ def summary(self): else: print(" Initial DEM has not been estimated.") - # ----------------------------------------------------- - print("\nBASE DEM SOLUTION") print("-" * 70) if hasattr(self, "dem"): @@ -1218,8 +1203,6 @@ def summary(self): else: print(" No DEM solution computed yet (call solve()).") - # ----------------------------------------------------- - print("\nMONTE CARLO ENSEMBLE") print("-" * 70) if hasattr(self, "mc_dem"): @@ -1239,7 +1222,6 @@ def summary(self): else: print(" No Monte Carlo results available.") - # ----------------------------------------------------- print("\nPLOTTING HELPERS") print("-" * 76) print(" • plot_dem() – Base DEM only") From b3d851c75ae6fbddaf8623e6c3c413fbbcb7db86 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 26 Nov 2025 14:32:33 -0500 Subject: [PATCH 113/121] Working on docs site for the DEM --- docs/dem_overview.rst | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index 0acd9bc3c..6cb988936 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -1,7 +1,7 @@ .. _xrtpy-dem-overview: =================================== -DEM +Differential Emission Measure (DEM) =================================== .. contents:: @@ -10,24 +10,38 @@ DEM Introduction ------------ -The **Differential Emission Measure (DEM)** describes how much plasma is present -in the solar corona as a function of temperature. -It is a key diagnostic for understanding coronal heating, solar flares, and -general plasma properties. -Hinode/XRT is particularly well suited for DEM analysis because of its multiple -broadband filters, which are sensitive to different temperature ranges. +The differential emission measure (DEM) describes how much plasma is present +in the solar corona as a function of temperature. It is a key diagnostic for +understanding coronal heating, solar flares, and the thermal structure of +active regions. -Why DEM? --------- -- Converts observed X-ray intensities into a thermal distribution of plasma. -- Allows comparison across instruments (e.g., Hinode/XRT, SDO/AIA, Hinode/EIS). -- Provides a physical link between observations and coronal heating models. +Hinode/XRT is well suited for DEM analysis because it observes the corona +through multiple broadband filters, each sensitive to different temperature +ranges. By combining these channels, we can infer a temperature distribution +DEM(T) that explains the observed X-ray intensities. + + +.. Why DEM? +.. -------- +.. - Converts observed X-ray intensities into a thermal distribution of plasma. +.. - Allows comparison across instruments (e.g., Hinode/XRT, SDO/AIA, Hinode/EIS). +.. - Provides a physical link between observations and coronal heating models. DEM in XRTpy ------------ -XRTpy provides a Python implementation of the iterative spline-fitting method -(originally available in IDL as ``xrt_dem_iterative2.pro``). +XRTpy provides a Python implementation of the iterative spline fitting method +originally available in IDL as `xrt_dem_iterative2.pro `_. +The core solver is implemented in :class:`xrtpy.xrt_dem_iterative.XRTDEMIterative`. + + +Conceptually, the solver: + 1. Builds a regular grid in log10(T) between user-specified bounds. + 2. Interpolates the filter temperature responses onto that grid. + 3. Represents log10(DEM) as a spline in log10(T). + 4. Uses least-squares fitting (via ``lmfit``) to adjust the spline values so that the modeled filter intensities match the observed intensities. + 5. Optionally performs Monte Carlo runs by perturbing the observed intensities with their errors and re-solving the DEM many times to estimate uncertainties. + The DEM workflow requires three main inputs, each with specific type, shape, and units: From c993a6676fee94875dd408761ca0a69eba01dc3e Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 28 Nov 2025 13:39:03 -0500 Subject: [PATCH 114/121] Updating doc site page --- docs/dem_overview.rst | 109 ++++++++++++++++++++++++++++-------------- 1 file changed, 74 insertions(+), 35 deletions(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index 6cb988936..a1c0b07ce 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -22,12 +22,6 @@ ranges. By combining these channels, we can infer a temperature distribution DEM(T) that explains the observed X-ray intensities. -.. Why DEM? -.. -------- -.. - Converts observed X-ray intensities into a thermal distribution of plasma. -.. - Allows comparison across instruments (e.g., Hinode/XRT, SDO/AIA, Hinode/EIS). -.. - Provides a physical link between observations and coronal heating models. - DEM in XRTpy ------------ XRTpy provides a Python implementation of the iterative spline fitting method @@ -43,54 +37,73 @@ Conceptually, the solver: 5. Optionally performs Monte Carlo runs by perturbing the observed intensities with their errors and re-solving the DEM many times to estimate uncertainties. -The DEM workflow requires three main inputs, each with specific type, shape, and units: +Required inputs +--------------- +The DEM workflow requires three main input pieces: + +1. Observed channels (filters) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +* Type: ``str`` or ``list`` of ``str`` +* Description: Names of the filters used in the observation, for example ``"Al-mesh"`` or ``"Be-thin"``. +* These must correspond to filters understood by XRTpy and must match the provided temperature responses one-to-one. + +2. Observed intensities +~~~~~~~~~~~~~~~~~~~~~~~ +* Type: array-like +* Units: DN/s (normalized per pixel) +* Description: Measured intensities in each filter channel. +* Length must match the number of filters. -1. **Observed channels (filters)** - - Type: ``str`` or ``list`` of ``str`` - - Description: Names of the filters used in the observation (e.g., ``"Al-mesh"``, ``"Be-thin"``). -2. **Observed intensities** - - Type: array-like - - Units: DN/s (normalized per pixel) - - Description: Measured intensities corresponding to each filter. +3. Temperature response functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +* Type: ``list`` of :class:`xrtpy.response.TemperatureResponseFundamental` +* Units: DN s\ :sup:`-1` pix\ :sup:`-1` cm\ :sup:`5` +* Description: Instrument response as a function of temperature for each filter, matching the order of the filters. +* Can be generated using :func:`xrtpy.response.tools.generate_temperature_responses`. + -3. **Temperature response functions** - - Type: ``list`` of :class:`xrtpy.response.TemperatureResponseFundamental` - - Units: DN s\ :sup:`-1` pix\ :sup:`-1` EM\ :sup:`-1` - - Description: Instrument temperature responses matching the filters. - These can be generated with ``xrtpy.response.tools.generate_temperature_responses`` - See :doc:`getting_started` for details. Example ------- A simple example with two filters: -.. code-block:: python +.. code-block:: python + from xrtpy.response.tools import generate_temperature_responses - from xrtpy.xrt_dem_iterative import XRTDEMIterative - - # Define filters and observed intensities - filters = ["Al-poly","C-poly/Ti-poly"] - intensities = [250.0, 180.0] # DN/s - # Generate responses + filters = ["Al-poly", "Ti-poly"] responses = generate_temperature_responses( - filters, - observation_date="2007-07-10", + filters, + "2012-10-27T00:00:00", abundance_model="hybrid" ) - # Solve XRT DEM + +Overview of the XRTDEMIterative API +----------------------------------- +The main entry point is :class:`xrtpy.xrt_dem_iterative.XRTDEMIterative`. + +Constructor +~~~~~~~~~~~ +.. code-block:: python + + from xrtpy.xrt_dem_iterative import XRTDEMIterative + dem_solver = XRTDEMIterative( observed_channel=filters, observed_intensities=intensities, temperature_responses=responses, + intensity_errors=None, + minimum_bound_temperature=5.5, + maximum_bound_temperature=8.0, + logarithmic_temperature_step_size=0.1, + monte_carlo_runs=0, + max_iterations=2000, + normalization_factor=1e21, ) - dem_result = dem_solver.solve() - - dem_result.plot() Comparison with IDL @@ -179,8 +192,34 @@ References - Golub, L., et al. (2004), *Solar Physics*, 243, 63. :cite:p:`golub:2004` - Weber, M. A., et al. (2004), *ApJ*, 605, 528. :cite:p:`weber:2004`. + .. Next Steps .. ---------- -.. - See :ref:`API Reference ` for details on -.. ``XRTDEMIterative``. Coming soon. .. - Explore example notebooks in the `examples/` directory. Coming soon. + +.. .. code-block:: python + +.. from xrtpy.response.tools import generate_temperature_responses +.. from xrtpy.xrt_dem_iterative import XRTDEMIterative + +.. # Define filters and observed intensities +.. filters = ["Al-poly","C-poly/Ti-poly"] +.. intensities = [250.0, 180.0] # DN/s + +.. # Generate responses +.. responses = generate_temperature_responses( +.. filters, +.. observation_date="2007-07-10", +.. abundance_model="hybrid" +.. ) + +.. # Solve XRT DEM +.. dem_solver = XRTDEMIterative( +.. observed_channel=filters, +.. observed_intensities=intensities, +.. temperature_responses=responses, +.. ) + +.. dem_result = dem_solver.solve() + +.. dem_result.plot() From 244e5be2336bb2b886ef1f6165d0b8e8f96bf97c Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Fri, 28 Nov 2025 14:22:37 -0500 Subject: [PATCH 115/121] Updating DEM doc site --- docs/dem_overview.rst | 186 +++++++++++++++++++++++++----------------- 1 file changed, 111 insertions(+), 75 deletions(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index a1c0b07ce..350d7d0e9 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -77,7 +77,6 @@ A simple example with two filters: responses = generate_temperature_responses( filters, "2012-10-27T00:00:00", - abundance_model="hybrid" ) @@ -104,73 +103,143 @@ Constructor normalization_factor=1e21, ) + # Solve for the DEM + dem = dem_solver.solve() # returns the DEM array, also stored in dem_solver.dem + # Plot the DEM + dem_solver.plot_dem() + + +Enabling Monte Carlo error estimates +------------------------------------ +To estimate uncertainties, you can enable Monte Carlo iterations. The solver +will perturb the observed intensities by their errors and re-solve the DEM +for each realization. + +.. code-block:: python + + N_mc = 50 # number of Monte Carlo runs + + dem_solver = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + monte_carlo_runs=N_mc, + ) + + dem_solver.solve() + + # Monte Carlo DEM Plot + dem_solver.plot_dem_mc() # base DEM plus Monte Carlo curves + +The arrays ``dem_solver.mc_dem``, ``dem_solver.mc_chisq``, +``dem_solver.mc_base_obs``, and ``dem_solver.mc_mod_obs`` are then available +for custom analysis. Comparison with IDL ------------------- -The Python solver mirrors the SolarSoft/IDL routine -`xrt_dem_iterative2.pro `_. +The Python solver is designed to closely follow the logic of the +SolarSoft/IDL routine `xrt_dem_iterative2.pro `_: + +* Uses a regular log10(T) grid. +* Represents log10(DEM) at a set of spline knots. +* Uses a least-squares algorithm to minimize chi-square. +* Supports Monte Carlo noise realizations for uncertainty estimation. + + +Small numerical differences can arise due to: + +* Different interpolation choices (for example, cubic splines from SciPy). +* Differences in optimization libraries (lmfit versus IDL MPFIT). +* Floating-point rounding and platform-specific details. -While results are consistent, minor differences can occur due to -interpolation choices and optimization details. +Within these limits, the Python implementation is intended to produce +results that are consistent with the IDL tool. -Mathematical Note: Ill-posed Nature of DEM Inversion ----------------------------------------------------- -The DEM problem is inherently an **ill-posed mathematical inversion**. -Given observed intensities :math:`I_i` in channels *i*, and their -temperature response functions :math:`R_i(T)`, the relationship is: +Mathematical background +----------------------- +The DEM inversion problem is ill posed. For each filter channel i, +the observed intensity :math:`I_i` is related to the DEM through: .. math:: I_i = \int DEM(T) \, R_i(T) \, dT -Recovering :math:`DEM(T)` from a small set of broadband channels is -not unique and is technically fraught with perils. +where :math:`R_i(T)` is the temperature response function for the filter, +and :math:`DEM(T)` is the unknown thermal distribution. -XRTpy (like the original IDL routine ``xrt_dem_iterative2.pro``) employs a -**forward-fitting approach**: -- A trial DEM is guessed. -- It is folded through :math:`R_i(T)` to produce "model" intensities. -- The DEM spline points are adjusted to minimize chi-square between model and observed values. +Since the number of temperature bins typically exceeds the number of +observed channels, the inversion does not have a unique solution. The +XRTpy solver uses a forward-fitting approach: -Because the number of temperature bins typically exceeds the number -of observations, the solution is constrained by assumptions (e.g., -spline smoothness). +1. Assume a parametric form for log10(DEM(T)) using spline knots. +2. Compute model intensities: -Uncertainties are estimated through **Monte Carlo iterations**, where -observations are perturbed by their errors and re-fit. The resulting -distribution of DEM solutions gives an estimate of confidence. + .. math:: + I_i^{model} = \sum_j DEM(T_j)\, R_i(T_j)\, T_j\, \Delta(\ln T) +3. Adjust the spline values to minimize: -Example Extension ------------------ -In addition to the required inputs, you can provide optional parameters -to fine-tune the DEM solution. -The example below shows all options explicitly set. + .. math:: + + \chi^2 = \sum_i \left[ + \frac{I_i^{model} - I_i^{obs}}{\sigma_i} + \right]^2 + +Here :math:`\sigma_i` are the observational uncertainties. Smoothness and +the low number of spline knots help regularize the solution. + + +Monte Carlo iterations perturb the observed intensities: + +.. math:: + + I_i^{(k)} = I_i^{obs} + \mathcal{N}(0, \sigma_i) + +and re-fit the DEM for each realization k. The spread in the resulting DEM +curves provides an estimate of the uncertainty in DEM(T). + + +Extended example with options +----------------------------- +Below is an extended example showing more constructor options explicitly. +These values match current defaults but are written out here for clarity. .. code-block:: python + from xrtpy.response.tools import generate_temperature_responses from xrtpy.xrt_dem_iterative import XRTDEMIterative + filters = ["Al-poly", "Ti-poly", "Be-thin", "C-poly"] + intensities = [2500.0, 1800.0, 900.0, 450.0] # DN/s + observation_date="2012-10-27T00:00:00" + + responses = generate_temperature_responses( + filters, + observation_date, + ) + dem_solver = XRTDEMIterative( - observed_channel=filters, # Filter names - observed_intensities=intensities, # Observed values - temperature_responses=responses, # Instrument responses - - intensity_errors=errors, # Obs. uncertainties (default: 3%) - min_T=5.6, # Min log T (default: 5.5) - max_T=7.8, # Max log T (default: 8.0) - dT=0.05, # Bin width in log T (default: 0.1) - min_error=1.5, # Minimum error floor (default: 2 DN) - relative_error=0.02, # Fractional error scaling (default: 0.03) - monte_carlo_runs=50, # # of Monte Carlo runs (default: none) - max_iterations=3000, # Solver max iterations (default: 2000) - solv_factor=1e17, # Scaling factor (default: 1e21) + observed_channel=filters, # Filter names + observed_intensities=intensities, # Observed intensity values + temperature_responses=responses, # Instrument responses + + # Optional configuration: + intensity_errors=None, # Obs. uncertainties - default: auto-estimated (3%) + minimum_bound_temperature=5.5, # Minimum log T (default: 5.5) + maximum_bound_temperature=8.0, # Maximum log T (default: 8.0) + logarithmic_temperature_step_size=0.1, # Bin width in log T (default: 0.1) + monte_carlo_runs=100, # # of Monte Carlo runs (default: none) + max_iterations=2000, # Solver max iterations (default: 2000) + normalization_factor=1e21, # Normalization saling factor (default: 1e21) ) + dem_solver.solve() + dem_solver.plot_dem_mc() + .. note:: The values shown above correspond to existing defaults in the solver, but they are written out here to illustrate what can be tuned. @@ -186,40 +255,7 @@ The example below shows all options explicitly set. .. capabilities from legacy IDL routines into modern, open-source Python .. tools for the solar physics community.* - References ---------- -- Golub, L., et al. (2004), *Solar Physics*, 243, 63. :cite:p:`golub:2004` +- Golub, L., et al. (2004), *Solar Physics*, 243, 63. :cite:`golub:2004` - Weber, M. A., et al. (2004), *ApJ*, 605, 528. :cite:p:`weber:2004`. - - -.. Next Steps -.. ---------- -.. - Explore example notebooks in the `examples/` directory. Coming soon. - -.. .. code-block:: python - -.. from xrtpy.response.tools import generate_temperature_responses -.. from xrtpy.xrt_dem_iterative import XRTDEMIterative - -.. # Define filters and observed intensities -.. filters = ["Al-poly","C-poly/Ti-poly"] -.. intensities = [250.0, 180.0] # DN/s - -.. # Generate responses -.. responses = generate_temperature_responses( -.. filters, -.. observation_date="2007-07-10", -.. abundance_model="hybrid" -.. ) - -.. # Solve XRT DEM -.. dem_solver = XRTDEMIterative( -.. observed_channel=filters, -.. observed_intensities=intensities, -.. temperature_responses=responses, -.. ) - -.. dem_result = dem_solver.solve() - -.. dem_result.plot() From 8388f54de6b30bbf2e846b4c2d2fa1cdb8540e84 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Sat, 29 Nov 2025 00:20:31 -0500 Subject: [PATCH 116/121] Few doc edits --- docs/dem_overview.rst | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index 350d7d0e9..46914314b 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -25,10 +25,9 @@ DEM(T) that explains the observed X-ray intensities. DEM in XRTpy ------------ XRTpy provides a Python implementation of the iterative spline fitting method -originally available in IDL as `xrt_dem_iterative2.pro `_. +originally available in IDL as `xrt_dem_iterative2.pro `__. The core solver is implemented in :class:`xrtpy.xrt_dem_iterative.XRTDEMIterative`. - Conceptually, the solver: 1. Builds a regular grid in log10(T) between user-specified bounds. 2. Interpolates the filter temperature responses onto that grid. @@ -36,6 +35,8 @@ Conceptually, the solver: 4. Uses least-squares fitting (via ``lmfit``) to adjust the spline values so that the modeled filter intensities match the observed intensities. 5. Optionally performs Monte Carlo runs by perturbing the observed intensities with their errors and re-solving the DEM many times to estimate uncertainties. +This approach mirrors the structure and behavior of the IDL routine while providing +a modern, fully open-source implementation in Python. Required inputs --------------- @@ -45,7 +46,7 @@ The DEM workflow requires three main input pieces: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Type: ``str`` or ``list`` of ``str`` * Description: Names of the filters used in the observation, for example ``"Al-mesh"`` or ``"Be-thin"``. -* These must correspond to filters understood by XRTpy and must match the provided temperature responses one-to-one. +* These must correspond to valid XRT filters and must match the provided temperature responses one-to-one. 2. Observed intensities ~~~~~~~~~~~~~~~~~~~~~~~ @@ -70,7 +71,7 @@ A simple example with two filters: .. code-block:: python - + from xrtpy.response.tools import generate_temperature_responses filters = ["Al-poly", "Ti-poly"] @@ -94,13 +95,8 @@ Constructor observed_channel=filters, observed_intensities=intensities, temperature_responses=responses, - intensity_errors=None, - minimum_bound_temperature=5.5, - maximum_bound_temperature=8.0, - logarithmic_temperature_step_size=0.1, monte_carlo_runs=0, max_iterations=2000, - normalization_factor=1e21, ) # Solve for the DEM @@ -118,15 +114,16 @@ for each realization. .. code-block:: python - N_mc = 50 # number of Monte Carlo runs + N_mc = 100 # number of Monte Carlo runs dem_solver = XRTDEMIterative( observed_channel=filters, observed_intensities=intensities, temperature_responses=responses, monte_carlo_runs=N_mc, + max_iterations=2000, ) - + dem_solver.solve() # Monte Carlo DEM Plot @@ -139,7 +136,10 @@ for custom analysis. Comparison with IDL ------------------- The Python solver is designed to closely follow the logic of the -SolarSoft/IDL routine `xrt_dem_iterative2.pro `_: +SolarSoft/IDL routine `xrt_dem_iterative2.pro `__: + + + * Uses a regular log10(T) grid. * Represents log10(DEM) at a set of spline knots. @@ -244,8 +244,8 @@ These values match current defaults but are written out here for clarity. The values shown above correspond to existing defaults in the solver, but they are written out here to illustrate what can be tuned. You can adjust these to best suit your analysis needs. - This mirrors the flexibility of the IDL routine - ``xrt_dem_iterative2.pro``. + This mirrors the flexibility of the IDL routine ``xrt_dem_iterative2.pro``. + .. Acknowledgement .. --------------- @@ -257,5 +257,5 @@ These values match current defaults but are written out here for clarity. References ---------- -- Golub, L., et al. (2004), *Solar Physics*, 243, 63. :cite:`golub:2004` -- Weber, M. A., et al. (2004), *ApJ*, 605, 528. :cite:p:`weber:2004`. +- Golub, L., et al. (2004), Solar Physics, 243, 63. :cite:p:`golub:2004` +- Weber, M. A., et al. (2004), Astrophysical Journal, 605, 528. :cite:p:`weber:2004` From c19e5bc7b3d73eec6b46c402c90b28a69adf5686 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Sat, 29 Nov 2025 01:11:08 -0500 Subject: [PATCH 117/121] Trying to fix github doc test to pass --- docs/dem_overview.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/dem_overview.rst b/docs/dem_overview.rst index 46914314b..1c9ba5eaf 100644 --- a/docs/dem_overview.rst +++ b/docs/dem_overview.rst @@ -26,7 +26,7 @@ DEM in XRTpy ------------ XRTpy provides a Python implementation of the iterative spline fitting method originally available in IDL as `xrt_dem_iterative2.pro `__. -The core solver is implemented in :class:`xrtpy.xrt_dem_iterative.XRTDEMIterative`. +The core solver is implemented in :class:`xrtpy.xrt_dem_iterative.dem_solver.XRTDEMIterative`. Conceptually, the solver: 1. Builds a regular grid in log10(T) between user-specified bounds. @@ -65,6 +65,7 @@ The DEM workflow requires three main input pieces: + Example ------- A simple example with two filters: @@ -83,7 +84,9 @@ A simple example with two filters: Overview of the XRTDEMIterative API ----------------------------------- -The main entry point is :class:`xrtpy.xrt_dem_iterative.XRTDEMIterative`. +The main entry point is :class:`xrtpy.xrt_dem_iterative.dem_solver.XRTDEMIterative`. + + Constructor ~~~~~~~~~~~ From a8e91713cf150eefd714ff55748b445997a4a15d Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Sat, 29 Nov 2025 01:11:35 -0500 Subject: [PATCH 118/121] Added three new works-def --- docs/glossary.rst | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 178a371cf..15ffcdd8e 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -17,7 +17,13 @@ Glossary A numerical image processing technique used to correct for the blurring caused by the telescope's Point Spread Function (PSF), improving sharpness and visibility of fine structures. DEM - Differential Emission Measure (DEM) — a function that describes the distribution of plasma as a function of temperature along the line of sight. XRTpy will support DEM modeling in future versions. + Differential Emission Measure (DEM) — a function that describes how much plasma is present along the line of sight as a function of temperature. See :ref:`xrtpy-dem-overview` for a detailed overview of DEM theory, + usage, and the solver provided in XRTpy. + + DEM Inversion + The process of determining the temperature distribution of coronal plasma (the DEM) from a small number of filter intensities. Since more temperature bins are used than available filters, the problem is mathematically + underconstrained (“ill posed”), so regularization and smoothing are required to obtain a stable, physical solution. + DN Data Number (DN) — the digital value recorded by the CCD, representing the detected photon flux, usually in DN s\ :sup:`−1`\ . @@ -38,9 +44,20 @@ Glossary Contamination (related to the XRT) Refers to the gradual accumulation of material on the CCD and focal plane filters (FPFs), which reduces instrument throughput. This time-dependent degradation impacts effective area calculations and must be accounted for in data analysis. Refer to Section 2.5.3 *Contamination* in the `SolarSoft XRT Analysis Guide`_ for more information. + Monte Carlo DEM + A set of DEM solutions computed by adding random noise (based on intensity errors) to the observed intensities and re-solving the DEM multiple times. + The spread of these Monte Carlo solutions provides an estimate ofuncertainty in the DEM at each temperature. + PSF Point Spread Function — describes the response of the telescope to a point source of light. In XRTpy, it is used in deconvolution routines to sharpen images. + Response Matrix + A two-dimensional array containing the temperature response of each XRT filter interpolated onto the solver’s regular log10 temperature grid.This matrix connects + the DEM to the modeled filter intensities through the forward model: + + :math:`I_i^{model} = \sum_j DEM(T_j)\, R_i(T_j)\, T_j\, \Delta(\ln T)`. + + Solar Emission Spectra Emission spectra produced by solar plasma across a range of temperatures, calculated using spectral models such as CHIANTI. These spectra are used in temperature response and filter ratio methods From c87c2aa4d4412373cd5aa48e712377e7321b3209 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Wed, 14 Jan 2026 15:59:32 -0500 Subject: [PATCH 119/121] Working on test for DEM; --- .../test/test_dem_input_validation.py | 577 ++++++++++-------- 1 file changed, 322 insertions(+), 255 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index 798e71002..5c999fcc6 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -1,15 +1,14 @@ -from importlib.resources import files +from pathlib import Path import astropy.units as u import numpy as np import pytest -import sunpy -import sunpy.io.special -import sunpy.map +from lmfit import Parameters +from scipy.io import readsav + from xrtpy.response.channel import Channel from xrtpy.response.tools import generate_temperature_responses from xrtpy.xrt_dem_iterative import XRTDEMIterative -from lmfit import Parameters channel_names = [ "Al-mesh", @@ -112,26 +111,26 @@ def test_create_logT_grid(): x.create_logT_grid() - #1 — Correct start and end + # 1 — Correct start and end assert x.logT[0] == pytest.approx(5.5) assert x.logT[-1] == pytest.approx(7.5) - #2 — Correct number of bins: (7.5 - 5.5)/0.1 + 1 = 21 + # 2 — Correct number of bins: (7.5 - 5.5)/0.1 + 1 = 21 assert len(x.logT) == 21 assert x.n_bins == 21 - #3 — Correct spacing (uniform) + # 3 — Correct spacing (uniform) diffs = np.diff(x.logT) assert np.allclose(diffs, 0.1, atol=1e-12) - #4 — dlogT and dlnT correct + # 4 — dlogT and dlnT correct assert x.dlogT == pytest.approx(0.1) assert x.dlnT == pytest.approx(np.log(10) * 0.1) - #5 — T = 10**logT + # 5 — T = 10**logT assert np.allclose(x.T.to_value(u.K), 10**x.logT) - #6 — logT strictly increasing + # 6 — logT strictly increasing assert np.all(np.diff(x.logT) > 0) @@ -148,7 +147,7 @@ def test_estimate_initial_dem(): maximum_bound_temperature=7.5, ) - #Step 2: Create temperature grid & response matrix + # Step 2: Create temperature grid & response matrix x.create_logT_grid() x._interpolate_responses_to_grid() @@ -158,13 +157,13 @@ def test_estimate_initial_dem(): # TEST 1: Correct length assert len(est) == len(x.logT) - #TEST 2: All values should be exactly 0.0 ( Python implementation overrides with flat logDEM = 0) + # TEST 2: All values should be exactly 0.0 ( Python implementation overrides with flat logDEM = 0) assert np.allclose(est, 0.0) - #TEST 3: Internal storage _initial_log_dem should match + # TEST 3: Internal storage _initial_log_dem should match assert np.allclose(x._initial_log_dem, est) - #TEST 4: Returned DEM should be finite + # TEST 4: Returned DEM should be finite assert np.all(np.isfinite(est)) @@ -242,7 +241,6 @@ def test_residuals_simple_case(): # Synthetic pm_matrix = constant 2 everywhere x.pm_matrix = np.ones((1, N)) * 2.0 - # STEP 3 — Construct synthetic spline state x.spline_logT = np.array([x.logT[0], x.logT[-1]]) x.spline_log_dem = np.array([0.0, 0.0]) # log10(DEM)=0 → DEM=1 @@ -252,13 +250,11 @@ def test_residuals_simple_case(): params.add("knot_0", value=0.0, min=-20, max=0) params.add("knot_1", value=0.0, min=-20, max=0) - # STEP 4 synthetic errors x.intensities_scaled = np.array([10.0]) x.sigma_scaled_intensity_errors = np.array([1.0]) - - #MISSING IN ORIGINAL TEST: Need abundances and weights (normally set in _prepare_spline_system) + # MISSING IN ORIGINAL TEST: Need abundances and weights (normally set in _prepare_spline_system) x.abundances = np.ones(1) x.weights = np.ones(1) @@ -276,249 +272,320 @@ def test_residuals_simple_case(): assert np.isclose(residuals[0], expected_residual) +def test_solve_single_dem_zero_case(): + """ + If all observed intensities are zero, the solver must return: + - DEM = all zeros or value of normalization_factor + - modeled intensities = all zeros + - chi sqr = 0 + - result = None + This run will output a warning - expected due to no intensity_errors provided. + """ + # filterwarnings = ignore:No intensity_errors provided + filters = ["Al-poly", "Ti-poly"] + intensities = np.array([0.0, 0.0]) # all zero → triggers nosolve + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") -# ----------------------------------- TEST Against IDL ------------------------------------------ + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + ) + # STEP 2 — Build grid + response matrix + x.create_logT_grid() + x._interpolate_responses_to_grid() + + # STEP 3 — Call solver on zero intensities + dem, modeled, chi2, result = x._solve_single_dem( + observed_intensities_vals=intensities + ) -# def test_interpolate_responses_to_grid(): -# # ------------------------------- -# # Step 1: Setup a simple DEM case -# # ------------------------------- -# filters = ["Al-poly", "Ti-poly"] -# intensities = np.array([1000.0, 2000.0]) - -# responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") - -# x = XRTDEMIterative( -# observed_channel=filters, -# observed_intensities=intensities, -# temperature_responses=responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=8.0, -# logarithmic_temperature_step_size=0.1, -# ) - -# # ------------------------------- -# # Step 2: Create temperature grid -# # ------------------------------- -# x.create_logT_grid() - -# # ------------------------------- -# # Step 3: Interpolate responses -# # ------------------------------- -# x._interpolate_responses_to_grid() - -# # ------------------------------- -# # TEST 1: Correct response matrix shape -# # ------------------------------- -# n_filters = len(filters) -# n_T = len(x.logT) - -# assert x.response_matrix.shape == (n_filters, n_T) -# # TEST 2: Response values must be non-negative -# assert np.all(x.response_matrix >= 0) - -# # TEST 3: Responses at the boundaries are very small but not negative -# assert np.all(x.response_matrix[:, 0] >= 0) -# assert np.all(x.response_matrix[:, -1] >= 0) - -# # TEST 4: Values near boundaries should be small -# assert np.all(x.response_matrix[:, 0] < 1e-27) -# assert np.all(x.response_matrix[:, -1] < 1e-27) - - -# # TEST 3: Interpolated values are finite - -# assert np.all(np.isfinite(x.response_matrix)) - -# # TEST 4: Boundary values should be significantly smaller than peak response -# for i in range(n_filters): -# peak = np.max(x.response_matrix[i]) -# left = x.response_matrix[i, 0] -# right = x.response_matrix[i, -1] - -# # Boundaries should be at least 100x smaller than peak -# assert left < peak / 100 -# assert right < peak / 100 - -# # ------------------------------- -# # TEST 5: Temperature ordering preserved -# # Response_matrix row i should roughly follow original shape: -# # no reversed ordering, no nan blocks. -# # ------------------------------- -# for i in range(n_filters): -# # Check no negative values -# assert np.all(x.response_matrix[i] >= 0) - -# # Should have at least 2 non-zero values inside range -# assert np.count_nonzero(x.response_matrix[i]) > 2 - -# # ------------------------------------------------------------------------- -# # 1) Valid configuration should pass validate_inputs -# # ------------------------------------------------------------------------- - -# def test_validate_inputs_valid_configuration(basic_responses, basic_intensities): -# x = XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=8.0, -# logarithmic_temperature_step_size=0.1, -# monte_carlo_runs=0, -# normalization_factor=1e21, -# ) - -# # Should not raise -# x.validate_inputs() - - -# # ------------------------------------------------------------------------- -# # 2) Empty observed_channel should raise -# # ------------------------------------------------------------------------- - -# def test_empty_observed_channel_raises(basic_responses, basic_intensities): -# with pytest.raises(ValueError, match="`observed_channel` is required"): -# XRTDEMIterative( -# observed_channel=[], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# ) - - -# # ------------------------------------------------------------------------- -# # 3) Mismatched lengths of intensities / responses / channels -# # ------------------------------------------------------------------------- - -# def test_length_mismatch_raises(): -# responses = [DummyResponse("F1"), DummyResponse("F2")] -# intensities = np.array([1000.0]) # only one value - -# with pytest.raises(ValueError, match="Length mismatch"): -# XRTDEMIterative( -# observed_channel=["F1", "F2"], -# observed_intensities=intensities, -# temperature_responses=responses, -# ) - - -# # ------------------------------------------------------------------------- -# # 4) Temperature range outside response grid should raise -# # ------------------------------------------------------------------------- - -# def test_temperature_range_outside_responses_raises(basic_responses, basic_intensities): -# # min T too low -# with pytest.raises(ValueError, match="outside the bounds"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=4.0, # below dummy response range -# maximum_bound_temperature=8.0, -# ) - -# # max T too high -# with pytest.raises(ValueError, match="outside the bounds"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=9.0, # above dummy response range -# ) - - -# # ------------------------------------------------------------------------- -# # 5) Negative or zero logarithmic_temperature_step_size should raise -# # ------------------------------------------------------------------------- - -# def test_negative_logarithmic_temperature_step_size_raises(basic_responses, basic_intensities): -# with pytest.raises(ValueError, match="logarithmic_temperature_step_size must be a positive"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=8.0, -# logarithmic_temperature_step_size=-0.1, -# ) - - -# def test_too_few_temperature_bins_raises(basic_responses, basic_intensities): -# # Choose a huge step so that fewer than 4 bins are produced -# with pytest.raises(ValueError, match="Temperature grid must have at least 4 points"): -# XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# minimum_bound_temperature=5.5, -# maximum_bound_temperature=5.8, -# logarithmic_temperature_step_size=0.5, -# ) - - -# # ------------------------------------------------------------------------- -# # 6) Monte Carlo runs validation -# # ------------------------------------------------------------------------- - -# def test_monte_carlo_runs_negative_raises(): -# with pytest.raises(ValueError, match="must be ≥ 0"): -# make_iterative(monte_carlo_runs=-1) - - -# def test_monte_carlo_runs_bool_raises(): -# with pytest.raises(ValueError, match="must be a non-negative whole number, not a boolean"): -# make_iterative(monte_carlo_runs=True) - + # STEP 4 — Assertions matching IDL behavior -DEM must be zero everywhere + assert np.all(dem == 0.0) -# def test_monte_carlo_runs_float_non_integer_raises(): -# with pytest.raises(ValueError, match="Decimal values are not allowed"): -# make_iterative(monte_carlo_runs=3.5) - - -# def test_monte_carlo_runs_zero_ok(): -# x = make_iterative(monte_carlo_runs=0) -# assert x.monte_carlo_runs == 0 - - -# def test_monte_carlo_runs_positive_integer_ok(): -# x = make_iterative(monte_carlo_runs=10) -# assert x.monte_carlo_runs == 10 + # modeled intensities must be all zero + assert np.all(modeled == 0.0) + # chi² must be zero + assert chi2 == 0.0 -# # ------------------------------------------------------------------------- -# # 7) Intensity errors validation in validate_inputs -# # ------------------------------------------------------------------------- - -# def test_intensity_errors_length_mismatch_raises(basic_responses, basic_intensities): -# x = XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# intensity_errors=np.array([1.0, 2.0]), # wrong length -# ) - -# with pytest.raises(ValueError, match="Length of intensity_errors must match"): -# x.validate_inputs() - - -# def test_intensity_errors_negative_raises(basic_responses, basic_intensities): -# x = XRTDEMIterative( -# observed_channel=["Filter-1", "Filter-2", "Filter-3"], -# observed_intensities=basic_intensities, -# temperature_responses=basic_responses, -# intensity_errors=np.array([1.0, -2.0, 3.0]), -# ) - -# with pytest.raises(ValueError, match="must be finite and >= 0"): -# x.validate_inputs() + # result object must be None (no lmfit minimization) + assert result is None + # Shape correctness + assert dem.shape == x.logT.shape + assert modeled.shape == intensities.shape + + +def test_monte_carlo_different_realizations(): + """ + Monte Carlo DEM runs should produce DEMs that differ from the base DEM + when observational noise causes perturbed intensities to change. + + This test verifies: + • Monte Carlo output arrays have correct shapes + • Each perturbed DEM differs from the base DEM + • Perturbed intensities differ from the base intensities + • No invalid values appear (no NaNs, no negatives) + """ + + filters = ["Al-poly", "Ti-poly", "Be-thin"] + intensities = np.array([3000.0, 1500.0, 800.0], dtype=float) + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + N = 5 # small Monte Carlo batch for fast testing + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + minimum_bound_temperature=5.5, + maximum_bound_temperature=7.5, + logarithmic_temperature_step_size=0.5, + monte_carlo_runs=N, + ) + + # STEP 2: Run full DEM + Monte Carlo solver + x.solve() # this computes base and MC DEMs + + # STEP 3: Basic shape checks + n_T = len(x.logT) + n_obs = len(filters) + + # mc_dem shape: (N+1, n_T) + assert x.mc_dem.shape == (N + 1, n_T) + + # mc_base_obs shape: (N+1, n_obs) + assert x.mc_base_obs.shape == (N + 1, n_obs) + + # mc_mod_obs shape: (N+1, n_obs) + assert x.mc_mod_obs.shape == (N + 1, n_obs) + + # mc_chisq shape: (N+1,) + assert x.mc_chisq.shape == (N + 1,) + + # STEP 4: DEMs should *not* all be identical + base_dem = x.mc_dem[0] + + # At least one MC DEM must differ from base DEM + diffs = [ + not np.allclose(base_dem, x.mc_dem[ii], rtol=1e-5, atol=1e-8) + for ii in range(1, N + 1) + ] + + assert any( + diffs + ), "Monte Carlo realizations did not change the DEM; noise not applied?" + + # STEP 5: Perturbed observed intensities should differ + base_obs = x.mc_base_obs[0] + + obs_diffs = [not np.allclose(base_obs, x.mc_base_obs[ii]) for ii in range(1, N + 1)] + assert any( + obs_diffs + ), "Monte Carlo observed intensities identical; noise not applied?" + + # STEP 6: No invalid numbers after fitting + assert np.all(np.isfinite(x.mc_dem)) + assert np.all(x.mc_dem >= 0.0) + assert np.all(np.isfinite(x.mc_mod_obs)) + assert np.all(np.isfinite(x.mc_chisq)) + + +def test_reconstruct_dem_from_knots(): + """ + Test whether DEM reconstruction from spline knots behaves predictably. + + We construct: + • a synthetic temperature grid + • synthetic knots at logT endpoints + • synthetic log10(DEM) values at knots + And verify that: + • reconstructed logDEM matches knot values at endpoints + • DEM(T) is smooth in between (no NaNs or jumps) + • the DEM shape increases when knot values increase + """ -# #Test to add later -# #both should be True -# # np.allclose(x.intensities_scaled, -# # x.observed_intensities.value / x.normalization_factor) + filters = ["Dummy"] + intensities = np.array([1000.0]) + responses = generate_temperature_responses(["Al-poly"], "2012-10-27T00:00:00") + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + ) + + x.create_logT_grid() + + # STEP 2 — Define synthetic spline knot positions - Use ONLY endpoints (simplest nontrivial spline) + x.spline_logT = np.array([x.logT[0], x.logT[-1]]) + x.n_spl = 2 + + # STEP 3 — Create synthetic lmfit Parameters for knots - Case: log10(DEM) goes from -2 at low T to +1 at high T + params = Parameters() + params.add("knot_0", value=-2.0) # DEM = 1e-2 + params.add("knot_1", value=+1.0) # DEM = 1e+1 + + # Also required by reconstruct: store initial logDEM values + x.spline_log_dem = np.array([-2.0, 1.0]) + + # STEP 4 — Reconstruct DEM + dem = x._reconstruct_dem_from_knots(params) + + # STEP 5 — Assertions + # # Shape matches temperature grid + assert dem.shape == x.logT.shape + + # No NaNs or negatives + assert np.all(np.isfinite(dem)) + assert np.all(dem >= 0.0) + + # Endpoint matches exactly 10^knot_value + assert np.isclose(dem[0], 10 ** (-2.0), rtol=1e-6) + assert np.isclose(dem[-1], 10 ** (1.0), rtol=1e-6) + + # The DEM should increase monotonically between these endpoints + # (Cubic spline + monotonic knots → smooth monotonic increase) + assert ( + dem[0] < dem[len(dem) // 2] < dem[-1] + ), "Reconstructed DEM is not increasing between knots" + + +def test_full_pipeline_end_to_end(): + """ + Full DEM solving pipeline using real XRT filter responses. + Verifies: + • solve() runs without errors + • base DEM exists, finite, and positive + • modeled intensities computed + • chi-square finite + • Monte Carlo arrays created correctly (when N>0) + • No NaNs, no negative DEM, no shape mismatches + """ + + filters = ["Al-poly", "Ti-poly", "Be-thin", "C-poly"] + intensities = np.array([2500.0, 1800.0, 900.0, 450.0], dtype=float) + responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") + N = 3 + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + monte_carlo_runs=N, + ) + + x.solve() + + # STEP 3: Base DEM checks + assert hasattr(x, "dem") + assert x.dem.shape == x.logT.shape + assert np.all(np.isfinite(x.dem)) + assert np.all(x.dem >= 0.0) + + # Peak must not be zero everywhere + assert np.max(x.dem) > 0.0 + + # STEP 4: Modeled intensities + assert hasattr(x, "modeled_intensities") + assert x.modeled_intensities.shape == intensities.shape + assert np.all(np.isfinite(x.modeled_intensities)) + + # STEP 5: Chi-square + assert hasattr(x, "chisq") + assert np.isfinite(x.chisq) + + # STEP 6: Monte Carlo arrays exist and are valid + assert x.mc_dem.shape == (N + 1, len(x.logT)) + assert x.mc_base_obs.shape == (N + 1, len(filters)) + assert x.mc_mod_obs.shape == (N + 1, len(filters)) + assert x.mc_chisq.shape == (N + 1,) + + # MC fields finite + assert np.all(np.isfinite(x.mc_dem)) + assert np.all(x.mc_dem >= 0.0) + assert np.all(np.isfinite(x.mc_base_obs)) + assert np.all(np.isfinite(x.mc_mod_obs)) + assert np.all(np.isfinite(x.mc_chisq)) + + # STEP 7: At least one MC DEM must differ from base DEM + base_dem = x.mc_dem[0] + different = [ + not np.allclose(base_dem, x.mc_dem[i], rtol=1e-5, atol=1e-8) + for i in range(1, N + 1) + ] + assert any(different), "Monte Carlo DEMs identical to base DEM — noise not applied?" + + # STEP 8: Modes must differ for perturbed cases + base_mod = x.mc_mod_obs[0] + different_mod = [ + not np.allclose(base_mod, x.mc_mod_obs[i]) for i in range(1, N + 1) + ] + assert any(different_mod) + +# ----------------------------------- TEST Against IDL ------------------------------------------ + + +def test_compare_with_idl_dem(): + """ + Compare xrtpy DEM solver output with reference IDL DEM stored in a .sav file. + The comparison is tolerant (allclose), since Python spline fitting and + MPFIT/LMFit differences mean we cannot expect bit-exact equality. + """ + + TEST_DIR = Path(__file__).parent + data_path = ( + TEST_DIR / "IDL_DEM_testing_sav_files" / "xrt_IDL_DEM_2012_10_27_MC100.sav" + ) + data = readsav(data_path) + logT_idl = data["logt"] + + # XRTpy + filters = ["Ti-poly", "Be-thin", "Al-poly", "C-poly"] + intensities = np.array([311680.2, 135815.0, 2351258.9, 2352987.7]) + date = "2012-10-27T16:27:46" + + responses = generate_temperature_responses(filters, date) + + x = XRTDEMIterative( + observed_channel=filters, + observed_intensities=intensities, + temperature_responses=responses, + monte_carlo_runs=0, # IDL comparison → no MC + ) -# # np.allclose(x.sigma_scaled_intensity_errors, -# # x.intensity_errors.to_value(u.DN/u.s) / x.normalization_factor) + # STEP 3 — Run Python DEM + x.solve() # linear DEM + logT_python = x.logT + + # STEP 4 — Compare logT grids - IDL usually uses exactly the same grid, but check with tolerance + assert np.allclose( + logT_python, logT_idl, atol=1e-6 + ), "Temperature grids differ significantly between IDL and Python." + + # #log_dem_python = np.log10(np.maximum(dem_python, 1e-99)) + # #dem_idl = data["dem"] + # #log_dem_idl = np.log10(np.maximum(dem_idl, 1e-99)) + # # STEP 5 — Compare DEM values + # # Numerical tolerances: + # # - 0.3 dex tolerance → factor of ~2 + # # - Acceptable because: + # # • Spline fits differ slightly between MPFIT and LMFit + # # • IDL interpolation behavior differs from CubicSpline + # # • Floating-point roundoff differences + # tol_dex = 0.9 + + # diff = np.abs(log_dem_python - log_dem_idl) + + # assert np.all(diff < tol_dex), ( + # "DEM shape diverges too far from IDL reference.\n" + # f"Max difference {diff.max():.3f} dex (allowed {tol_dex})." + # ) + + # # Ensure the overall trends match: peak in same region + # peak_idl = np.argmax(log_dem_idl) + # peak_python = np.argmax(log_dem_python) + # assert abs(peak_idl - peak_python) <= 1, \ + # "DEM peak location differs more than 1 temperature bin." From 77e651f67bc5e0f470892e01ba4d05a78b2504ca Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 15 Jan 2026 10:09:57 -0500 Subject: [PATCH 120/121] Updating testing values to real values --- .../test/test_dem_input_validation.py | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py index 5c999fcc6..23de3ef5b 100644 --- a/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py +++ b/xrtpy/xrt_dem_iterative/test/test_dem_input_validation.py @@ -78,7 +78,7 @@ def test_dem_temperature_grid(): def test_validate_inputs_good_case(): filters = ["Be-thin", "Be-med"] - i_obs = [10000.0, 20000.0] + i_obs = [100.0, 200.0] resp = generate_temperature_responses(filters, "2007-07-10") dem = XRTDEMIterative(filters, i_obs, resp) dem.validate_inputs() # Should NOT raise @@ -86,7 +86,7 @@ def test_validate_inputs_good_case(): def test_validate_inputs_mismatched_errors(): filters = ["Be-thin", "Be-med"] - i_obs = [10000.0, 20000.0] + i_obs = [100.0, 200.0] i_err = [100.0] # Wrong length - should be two error/ uncertainties resp = generate_temperature_responses(filters, "2007-07-10") dem = XRTDEMIterative(filters, i_obs, resp, intensity_errors=i_err) @@ -97,7 +97,7 @@ def test_validate_inputs_mismatched_errors(): def test_create_logT_grid(): filters = ["Al-poly"] - intensities = np.array([1500.0]) + intensities = np.array([150.0]) responses = generate_temperature_responses(filters, "2018-10-27T00:00:00") x = XRTDEMIterative( @@ -136,7 +136,7 @@ def test_create_logT_grid(): def test_estimate_initial_dem(): filters = ["Al-poly", "Ti-poly"] - intensities = np.array([1500.0, 2300.0]) + intensities = np.array([150.0, 230.0]) responses = generate_temperature_responses(filters, "2012-10-27T00:00:00") x = XRTDEMIterative( @@ -537,17 +537,21 @@ def test_compare_with_idl_dem(): TEST_DIR = Path(__file__).parent data_path = ( - TEST_DIR / "IDL_DEM_testing_sav_files" / "xrt_IDL_DEM_2012_10_27_MC100.sav" + TEST_DIR / "IDL_DEM_testing_sav_files" / "obs_20090730_DEM_MC100_IDL_2026.sav"# "xrt_IDL_DEM_2012_10_27_MC100.sav" ) data = readsav(data_path) logT_idl = data["logt"] - # XRTpy - filters = ["Ti-poly", "Be-thin", "Al-poly", "C-poly"] - intensities = np.array([311680.2, 135815.0, 2351258.9, 2352987.7]) - date = "2012-10-27T16:27:46" + # XRTpy - 2012-10-27 Data Set + # filters = ["Ti-poly", "Be-thin", "Al-poly", "C-poly"] + # intensities = np.array([311680.2, 135815.0, 2351258.9, 2352987.7]) + # date = "2012-10-27T16:27:46" + + filters = ["Al-mesh", "Ti-poly", "Al-poly", "Be-thin"] + intensities = [178.482 ,44.919,132.193,3.149] # DN/s + observation_date="2009-07-30T00:38" - responses = generate_temperature_responses(filters, date) + responses = generate_temperature_responses(filters, observation_date) x = XRTDEMIterative( observed_channel=filters, From 249a5e70a0dedbe998c616009dbe6a80b5690b52 Mon Sep 17 00:00:00 2001 From: joyvelasquez Date: Thu, 15 Jan 2026 10:10:21 -0500 Subject: [PATCH 121/121] Adding more IDL sav DEM testing files --- .../obs_20090730_DEM_MC100_IDL_2026.sav | Bin 0 -> 22480 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 xrtpy/xrt_dem_iterative/test/IDL_DEM_testing_sav_files/obs_20090730_DEM_MC100_IDL_2026.sav diff --git a/xrtpy/xrt_dem_iterative/test/IDL_DEM_testing_sav_files/obs_20090730_DEM_MC100_IDL_2026.sav b/xrtpy/xrt_dem_iterative/test/IDL_DEM_testing_sav_files/obs_20090730_DEM_MC100_IDL_2026.sav new file mode 100644 index 0000000000000000000000000000000000000000..0115d0ed6e5e38cd4e59c5726e8584e55d01bbb0 GIT binary patch literal 22480 zcmeFY`9D_C*9J_P2^o_}88gqB;y!0DWS(v^&$G%HQld!aOi9L)DMKY>YBD5K#w10O zR7l7?RPX(Lp7-1P%ljuh=kwX;-md$6aP77C+IwAV9Wzrh3NkV>Ix;eflcdl8yZ&zk z{%-{SZv+mUa&zU=bvnx@BhM!ztsEh+uK5APxU8vXz6{?DC4 z-$ix$rv5^|h|J(N@eI6t=a#H*6^`Gzli~R@x|7+BQhh$`g8aX+_V|RB# zVOu0@xA!$h`s1XF#O2hB|LOhjZ7*mV{HI4rYX9Xz`kzgiNiG*BVWj8(bDjAQ*Z*>H zBlZ9Pc>D*W1_swba=MmDP@AS3GITHk)tYr#n z>Vl?Y8&N4s8;Iu?S+wYXfTQcaoSU4Z;Lh3Qq;-uJyoA0i92B+#Pt5;qRvss~jP-sv zE0zZ~S!aoqnW12;qZ;_bBpnF)+wAO(mq9GMVV`6<4Y1ZGtFoCdqm5kO6xSe5G@4hN z`~B%YR8v3lUgVDpN+|TM3?f@Wj%KFcG}4xlw22otIDEo)?iuh)E#%;B--YW+yRPD~ z=8q|DdlPUS^<0{{>~x$;N;SXdF0mI!J6t_2XRmwoLbv{Tg5R_-W9u**+m3RYIB*h#n~WTp4rB_Xaj8iICo+2WwfGck#)l46zZv=etYroJ(O>q zD;TF=g}hR3-26+YjHK_0ejv}Zz?Z(TJhU5|$IF{5BmI>pamR!fv_IDsXQuK`QPH5n z8t`L9n`d<~+x)Jt&Q6AeZ8>dw;_gur#HGThrua~+GX!WZg75Ms%`k=3OLlCpqIF^2Z$g4 zmY7-Pf~L>JM4oCH$gtnBkdluBcIH1OxR9PA#F)tMM7HHEr+ajq5twhzYPjDI&bR1p_Suj;;;LH!kCeG`4v~t zanX98n-~BV?>egMH@3lH*TwDjr$^uva*QV!{|I*788;QZF)$lx+V|p+EkOFB@-5*= zkWOo>xppZ5*z#%5l$KPZl}eRVN_&0Oo5IJ`HLHLMk_^RuI?f>P=C+=s&+$m+5)!q# zsDLlO98LZc?|@exefK5NGYNNc{%*>6Ukhh`HE}z+ZU<`wy<{$~e#|b5a7BZ;BeVYYa#g!5dVsE=1{wVW{+3ls6_(EBz;yQsM!Gf8;1F> zSAEcG|KVz?!dcWO=5_X02N}A3Ct}aJa{}lz4^z?GYD*-0sz$!ep%ed=mdPwtFpF2| z{Qj{UPlh{F>MW0aOvU#bnPu2jKgJpt%sH=pPQ&bF=Cp>Qn+Q8fj?e!lhY`}IiuQ!b zC;gWXkdwdaP>{3*z6eV~{}D@2I=}Llss9S-?H-!$Zng!>KF$%Y5pQtfeQHMTX9muJ zFZf#;Z-B!AZtt&oO+XC&HN5w+F=$dLJrZg<3evAk_=F<4fUQ<3BycMmt>leW_4ShG zo6#3{WldvLaM(M_=Rpqg)}(Jdp*DkL1e`oaUBBbY&JX1sXWj6M$QXl4;d8j-->l}V z_C%cdjBuDyz&os=t9_z%|3%D>@5zP1f;WWi*wQp?ew&bX``WpV*_Z$LFm&dXKdFd? zV`aZQlmZU`b`#HD{OJy+51u;OQn-QrQlHSC$SH7T2yc+9rv_Ko6CL<%CUDeio^wm! z24c|i97n@Ez>#S7yCzE@xuS8u;vpljguf#Wstu!MCgJKJ^=Z_}{a8zTa{*-^$UIj# ztd89JcH{D{zeeK3gco{Y>iCc650BA|pTJ8#p6d>q{(#%2XSv9WoeOU3(9mu);Yx6M34le-3(Ui_9W;9A7Kj zB6Y`wBR~HBMMAqyGK$@^i22jGk@{P$Kzq+&>fLi%5Vo6-+H2qf1dFtX$?Te7Qe_c0 z>U|pQUNVMq`>KG;%^zl3=peW%u6(N_%z&ev+l8lfkAT=2`qp>*Ex^4(=L@;UAWfBe zcFgDjuuEI)M}PFX8!3S1}$rziSDIww=X-6+bG-C7Vl zOo|g@YNfzccZgE1UKH%k{2Y?+`V1zo__O?nngN^m>i3n_A0&D_Uq0>i2DaEE@j=ud z&;|@`Un9fONb+HWCqp~vp{a>}N$q_UZKg!#OQb@?`J<_H%$w-Qcy?Y{d>cONkh@K1 zMT_63`diTQAO=5^doEKd&k+|fs*{T7EWtkAc*wXf=M0wiRH3-NY6rv45j4+m+Ysg| zgzj8z8v!O3wLg-)cR|ek&b#+A#eioC1zj+<1+$ci?As4Az>!gdP4cb~xZdTFGdL~= zu5Sh|gp4bJW9(5W#msCV7N`i38D0dCNE0YsB(0x`S{2Cv3ShC7^rRYkfR-k|o$=9c zN3S~U$C*S@(5=b_KA-1++=bmT6D@ z&)+9-`pk_ci`7c3&S3C;%%1(2Rr|v!d+%?AO+zfPRnnS}+F>_u_$`efK<*$i$o&Fj zCxdubPV|A+KHt5`W@2DYEBxA%?JYQ-Uy_Tbs0BA&*5Xcy2yk!vDD~a{6}U7@NevE*}3cdc)4fT{sn_x+>7;q z_*>mpoc~#@zyTRvY^?c}TUq)QEaBewR8NU9Ov&ez&Lyb`!c15>Hf9K-umqAE9!=z#>ViGY@(OS8Ou;F8#^zXSCJn_QVdYn0@0Qb-f zdcpF74nHQTk-}2;4Ets|z7Q2phZROz?n{=xfLZ=xw3t5E1vGjC<4b~HKuAROUTa<# zXwcmIBCdWFj0a{U5Bj_WTl+&#Gzi+_o$7Q zS+`qA!tAo38c_jX=xw#V8+jZrt!-DjMnlByGDV+Gw?4%gw#E!)O8V#*%)SB zoxpWsIh(Ngv&t|FO%u{6UdMUGYX0*F(vriqBXi#Xh-$$fi7P<7ruQl6$S-hgzgI(H zQUUIuQL`|`2A;gN^6KY>z|A(D(}KDZ>^JsjtaP@3sjWH6`1}m8&c)|@n%Y2|De4PX zk|FG0S)a0?Y)8MY?U8m*%Rqg1{XbJ$`=BCyfhJ93Pjv2Ka&?e>4#ImGzMn0WM0Bye z-h*TucwZKy6fHFcp7`O@RGd{SuB9zd%;h7G?eflAzNZ+%DpzNIR*U;#mTG}PV)^12 znP-uO=<096opEEgy>phZr>*9o_TU&CnUmk1p}PwzQr`wIsdj_GHTU8Z+2_IP?Xr?l zWG^@s+@fsa{|L@q?{9=3;sXcEt%&@~h_s(t4oLC~0=&JcS|#BR(osR*aMN;N!vrty z7~DiF2TryUx_MAf;Ca`sSt^vz@ki48Nk8(knr2JUh)2@bJQ=PXT*Q~!kJhr>Jd2kn znRJSxG~6+O&M)SN4bC)`;3K(&um+)l{Y%zoG25$YfyzuQgl&G6ru$oa329-*o0kHa z|7#z#JEEt<;(i$p)e1(qq%VTPKy{9G);rLNI$&91Ck3ZC2hykaJ^)98w8!ir4sd$& z*k9=bIoQX?`KWw$0-`6nclh>q&{VT%*GJnRBgp?KuTBBj?X7QUXTLP{t8Werk{xe?1FrFH>p@Tbpb;uM9 zP{a2huiRa#4!|0RbQg5haLk_jyjHsIBwV3EheVCkl(?er#ts`R zCD~|1{)<;nOgd`0QB0S0ss>fOXwF>}`HeC@=*<46kw)PYDgN@=O~}*0I7eSA{O!bFAL`4VaM3oRFI;05LahoETasoW z@LT6-%$PU$Ql~zV-3kQ{_SY!W*$f;#4}`iN5&#RG878Xh&7lA5xSWwO2Vff+A|7Kp zpkOR-4LAdc2$UVxJ$n}pa(X4PSXBVs#9D{NhB4YUwF~l$UPj+zR}&w(u%eOfbKl6* zG*D}B%T%N%6{?yN6lkZy(9MJU`bu);&}EjdrgNpLW}4HaWDD8tFJ8&Jfmt+~L0{gT*sscj0V0wMfo3XJE@ItQd97X0J zIm(pPdp!-o(B_df|JRyBfXO(52N>I)m<45eHAG`lBo6oZKHrMUlzx&U0kw@GT#d*o4#J@?YVMD!wb~SwLSd^13 z&(r}IOxm72@uuk};cXQ8g=7g~U@&+c5f0^WOnc|AWFrF1c7m)&>?D|;AL6^46b$x7 zd?HH?Ea37vwtu6L1zZWQZ+F>#0Y~{y?jZ~VK(s&q<9ozCfc?ycf8AC=a_Zw*HpW-L z67pzo+p8|L%rKe!F?|ko9x5*H=OF1JJnmh#TU5wxBuXAaYmm6ZwSAAg$?zY&_e8}Q zCh(Hkq)=MdG~D*iv&E}D6ga~nuI_IP7* z*x*}j#3f|ncl!(;VG~tuo8710W3f`)_K#}XFsaZchB3WIgaP#7h-t7Q!8@kie}=XP z_+H_%7U!P9@!PS(7w8f}H)=WOp^XBZ`gnC}E`$=COj`;w-hKdA%ekBPo@axLa~zd_ z>N?o{7O^;RbOcO?N7POzO8^q?*@*Fv0I6`^i!sSZfNhX-TF>(+THh1r{qSx$8oH1c zlb~jb%1V8?g5Ui`Vfvo?zR=eotrTIVxf*fAq_q^A|I-m4OnK%|lhKc-?2Sxfd##9T z*Nz;sU@XLTgMa@W6N$qr%A#5v8uw%7OlF z6A6&zja2zylM33>DkFg_$KaH}Mr@u)9ys&fb$W7C1Kcf#b6<#VL4}rR3O#BtBI>C5@YvSG7iD$V z+PShRA~(9aM{*8^NNo9d?ujNjd_nBBxc?CWyo_@B?0eNk+(EPHDrH74&dk{=V-_2Y zH65c&**q|SIX=%Hk{PYR$c7{ZFTd&{WLxix6Y+RVkXl#0M7Eb1#3O|=#o84CdmEL| z+?NHW`&$i9ZJ2`H>)EXp?ICdGw&7iinkAhRx&pJu_`y}@r9@zVRnH?WZ0n^?S zdj5i4fY>r}!>4pW>RCh15QQJGCoQ(626Li~Hf7&gRdqCSq@`9viyqxK3D^JhQyE<@ z<4iT@4@JgZVt-7^`q4oV={>BR&+t#r-$n(|D&u#`RM<0;cyRkthpWS-^f>!;h{i?k z1gxuUDS~LTi-qQ}TbOeUWBdxE(abT`gifvfpM3)vlrbeQK<)yHM*|ZXAi*n?_5aoL^xQ}d8kxJh=bvXx$4xLWYCDr zHF5o731W}mRn=>60NYRMD+)OW&<@oi`S=M6H1UYqbvyhOYHnSO3F0h7nbLMC{sFV- zY(Pg~iHQipe($d(J8gzoP6@ftU2wub31969y^rH%H~CoB&$Q!y$0VmvZ6Gddyx&Jw zl!z^l7WF+{d5cwbbJOK2-p1S%x}CPzU%-CJwuO>2o**GpZua%72%zJlzNcn$!R#5$ z32B?d;4oSrJ@w}-xSq&sR9gxKSM#5#H!Z@!QS@LcPwXlXO+Srq70UsLeYt3Pz7(V) z<#Z(Z?*S|3?lqf|hJG2HO!A4_mhX3g z{XJdx(B*!#lQdc8(X)&u3@W7OEj3Zo$pq^!FP%~P&frzTJwD|1F@Eh!M-WoG_5I1} zK_*1MU-DQ+-7$P*m9lzvD;>{|*-i4#-N2nCy>}-yKI2EPvINH&U&cOh=*+C&(#38* zE4j*(Xo49O>YOGWReOr`23lu%GDOYi(lu? zHhmYyZL}sXj4J)c>Bb-MYiPD%wY^1G`?#$zD;$Nfs8rgYRWCp<3=+quofFeLfUSBxV?Tok+Kl-ned;x!cTx*P zN@G1#^R6vMlz|N;XiT}?-;zTP0Wu=L^Td&4PNE8r&@8?s%}{PVe9 z92#jzy7;{v6pGRc_38~lhfSC`l@SaUDg6btmN> zW=cT6A!@r%a6Gz<7TxLaun2jh=)6KRM2#c_}5zpm@^+Bj#`<8oSETCBAw zeA>X18FQaGxZZczIbe^?{~&-0?zjZ`0LZIMb|VImO*PQv6e2-_c@^ z*34J=|RRGQN)Xz59scJodVdR2I+1)Us6w=&pUB5ss{h65$Brm`YKz6{5-ZC8Dm}!kd1ux88{QH&9~Qj* zq+>)#J3zj0nR$mG;3=?L=5_&XnqAoYdaDc$JW}z~dUqM*cys(ryC*>F#g%PWZ+aju zQdPOkF@W9Nt@S2#18^)}NaD0DaO+NWKE38S&FHW<%60=VVJ;;2Iq{oLmRpNd(Pe|L2 zue=au_Rl_1u4`dT@5bO#bybe-v3Llvs*VkgNCy7{tw!^ix4@HUtC-)L4;(fl=#i;xfw@;(bfoV}LEljGn3w^ZNw<1avKpdHL{ z0imsE>J=Awj=ne2$90NDpi#PAuD?npsQtd&VBf27RQ1n`(uI;*w z84P=)fb95|J*|1j=FL7oRXIik%*>wcLODom;*G*({a(a!+hRO9_aeTrB%S9SJPId` z84f+K>IDmtv1@K;h~UI|_~qT*Kydf!l1i#o05A3@V<%Ke`9>-Se{wQX+|Xzz={MdF zHl5m<2?=|^_|=%CplB0dcETIt`_e)3aljyBpd@hecSzjj??k^d)Q9`-Pov4%cLgJk zwdg7HUg6QkQIr{&J6SYtgU%$K6>3-{AVR^VNu#25#7xnUUUl>b{yvP#NbJKTUii`| zCoNPMcOOXpq|0>x7jAn0kw!)Un_=evQ=uG+6?8uhdNkpM5e=M$mDLtN{&O3>;*K$B zzce?#pVi3Q;JJG!UIO%mL$@3p7WBl+(~8HY_Xh~V8`%D;jH+uD z0FwkM!Rw7T0V@~m3LMo1iO`>a{N53OO*d~LYEBZZFWcl72sWb;ak|E+#+n7sGuNcZI~Kop7PO z+UfS3`>=^bgI{e;9$1Qd3Okd5DRyFqysdv^h4A%kr}yC4C>+zLmDcm5212N4-o@B@ zFi}0$+RCX3cEY+-{M}^WO1nJd^ox`q4vxu)zfAzw41Km{?ozOSm-~J_oD59Ay(Z17 zAi(E`cu%$@f#itU3oqk7V6%IUKT7CDYY%UUzn`c>LlHAszh^knJuQmjmV+dF z<%I)?N$WST4jThw{g;Gq>-K1#K|X$JX)kK^(c26ls-tudvgQ+4MUle?34?am4@e-& z7=QIP2LCctaQDyH9Dch#RJgOV7dMB{>!0KiP8neNxo$uSt3G`5MRUp?%>3rg@0Rro zgf(w@RlTPqSvIFeNkymYpFcQ8oBBCELjed`g3>xtr1^6*(L-d16lYV$Fdd~*0@sG; zcQSo#z(ZMSz2u-exJhi#pSTtb4k5P6&)=GY8O^n!=HE7e*Ds_g$Bcr+sOu8BWiqhJ z`lPSm=4fSbz<2RgAsR3;DBZhw1QolP4`tPiq2Q})0%jYl2px@c^Y0)>bk>KD*F5-w zzmAN1uz4&5kDp<>JbQ&7N1IWbI?I*V=25Ls7jp-!wCYLUk)w^6X?M4@%HD^B%{{^y z$*k0btZQ#Og`dU()xNrSZ!McZpuoU~_b&z~t`9~Sn*Rc0J9`$Fqf1~b9LyHm`wg6D zz8Pv(5=i^$cGiG}8aP^zKYcpK z*W8Yq>bwT(p$vSUtfhnULZmD-eE@kj9gB|5Za~tkQ#fB_0lsw7&LsMK6n?KG#!6zh z4|iDU%Uc&2#F?lsRq3)gV)g0E6Ss5ZF`IRciC1$LgsnE&kLHS2gfw##D)T;*fA%3e zpjh^K_yZiaI~-zc8vw`c!le@4H4|Exd-l1GgW0Bnr$zZFKxzR!b>b6{KDR@!MYu>>Kc=IHr3q-|lv|(gofoKQdvDou zW(|}-@a|)9;%Vf~I4Cx90z)!1oHYr{I{31_>-j^c$ngsI!!pNthH=N0EpArMA)MK* z{!`ovU=3~jO{4dpVs<>_)cv$DauStD^fC_MoUykCEsYbu~jm}q{#L%Rf5Aj;2A9~^$uD&;h58W6g z5?{JXq0?DhBc-eINMq}V&1Hp;h%vXqfzrngALGp!&&$B^JC#fh%7iN1&ET|gM+-YH zxPHR;kW4l+MmNgMiqL^(Uqgpea3;tsUWd!SM1bQl z9j|x$S+uq@-*MuFKN>h1Ug8wMgYK-Z3l}J-ptH%M!)|GXNPf?jO110@eD&mj`wbUW z{J|29otkSP?vl!){FZd?VJUYA7jLk@njbDwk5hle96pFil}GO6GsslR!C~(Cgyy|fa7~p;@`N#P z?SH~5{j(MvlfqBGjH?9VL%T}LZdU+p+oU^xagf^2d1>?b5U@n{dhf~JMaz`iYWmSx zsFSSp=s_bnD6noKr1pJ8HC+Dx_%LK2i_;M6 zfMek;9|!M})$)@w#5%^35)j+l-XoRzG%gZQOH6`x>E|0r}%`nP8^BoF=)KO8+ow?0_d=B;>^NCt|=Llcr_$x)=?kI4D+wTRq-6X#tyBiFSg<+xVVGTeGRw~mmG6G;^ zE!$q!1u3PanAnq5z^Zr@R11vIuQWG8OYASy^Y>0&7V{3e9rWk+u#^ot>&E`xnrjbI zlp6K6m~_Rr8Vk1L4$tEC?K1fy@x8cz%#WLqQSG=uQO+t$RW>&4TJeS9MhJHC4|dA$ z12uL;{OCL{Hssxd0_sg zpyG1q8*obCnf9Y-2X~$}KW}pda9cRLs3*fos((3Lk>Py~%xP9Ftw??W3{58X(-J}Q z;g`J@Vy3{tJRN5wql}i$F+^nQn4nh_gC7|mCZVkQ@kX=k0puF@Ug6<6e-ZntuHBkm`+p_R)PiPS{oygHt~rJWDv756-MImny03z(Y;&>CNs7;1#8Vq%41v z^y{#LUKdDy&ennK@o+p?4_{Y}Tb~1??~>M^uWADUZaQ-hm4mobmE3jy7+^mXO2_xu z2yK_o)iMnxf90X2sU>fSB=|kg7ek2keBZy z!Ns?UV0+pa9M*oTskmJNV#bC}cH#y=!=&5690`yngxt%mBL~(N>uvqXTJ-CpNT31P z9P0L#3i?iIi*kpSLzV+=kf*}7#RgvklDf*cEVzi_iyM2hlftjz_fD~aRqT7*A!IYM z|AZFKnE$!r>*^#{Z<3c&d`|_lX?XaM!Ei5OD`#f^+#Lf#npV4P3>^PwpP2F2W22A0 z!sRH@kjY$rI6s{^(=I&-0T+c@@@q);i5@<(TagS-w9}@UPKUvQ!rpr8sSN1Fw*Foe zSOvm8pUpgrG?2RzQ=u!+Op51XI%Uy5I3QkE{krfL(8kBwkNWYVZK}J5+HP8CHvQxG zRx&R1PLs1&`>8+bz>YuOJ;#G;o=^;SzWIS}|2(M>vsHx>c{Gc>Z}_3$n`TAQHlD~S zWq9kz5(Cmb?P+PdaSq8T1|9Ot6-5VsAAG=HKZ5q?_8xG?_QFvMsz|pr22fQo6igCg z2cyS*3-e<%U?*|9sIqSb+)TnxcU<`no^Rt){!|2l$B{hd5$9HLQmd!yylma!W26`TtBn-n4Mj zq?r=QK09`EKy?jY3lp5=e*3%Z1+Bc{ubT4o4lG9gd*+>(X8&b*5fpFYD;|GD4zTX|6V^xj=& zXdVoeoS~d^#5U>E(ezMEPi+5EWkPH zWp_};A#ksF=S4g81>E`S7HM=lz)5w$XpdYmn0qCpEWbDinv&0jd&n0^e1cz~<_ciD zZO&LbXoptWf{Rsh3{hV&rNp;sPE<%1Tq;?868Y7HJvGzgMoM99IX*|}@!e2)RZ8D=bm?P=}Kjp;7cbsUzFyx+B^?IX%4XzMK7-K)5X$nSJjLNJwe2p7FE>_QCsY!SnCYPQ~g}VRtf` z$m=#F$flyE7c@fdx`im?l4_COVIuN=wsP~YH!nKz#_XVOcMM{<#Ju__{4PFf;cxoH z>mz>KP4gNXsfNep+3o(c&_}p{rQwQd-2-fDns?=ImkgHu^MBQof>MXb0Edo1P|?Twwd-XN~2J zOfaFnQd+700~`Z9L^kNrwhq^p`#2-|Aln>C-d>LCD=MPa zuUto|-ihNjN0N|R>n^_#&Ou5$u?o;kDj>>zMX!UJ_TU3H9li@W8u*Q;dq4G*NaB|F zcOFw}rQ)359n0yXJh9@t>p!u>0boWT;9eW-L>Q7`IhNc_=nYP9Xvv^Yw zU>hB+Q1}EK(yF+}&@Z6=_0y>ycSc*P$;Ej$U!&QTE0G1N@6mgf*a4>EPSlkh7$Cl9 z4K-w{6dGA{q0$4%J{q0P=!Wvb9Kq=hisDdEpK>)re%;o69m z>aQ;aFyKkJ+&k_7M2U3EPoW>^qANgtXj+ zD)Aeu|Lns~X0LZzb_rx!L*pJ1eL!10&^yg41x`hNYL0%k7n}?G;*5%{z(cMnP0=wN z+@HF+QWTQ(x0K7`+usVo+!4<>pOXp@lp=k&L>MHipi$H2J?vjJc09O|gqGALqG=e} zQAe)od9{^TlqG#Et!H@_xzY_M+mPbcV>5v%4GF#YoYIl_0hvy`Xfkvv_hTb&EgAbp zZCMhhoqGREYo8)kE5P2TM$p47bA8nl&I=MYuEeyX(FsCoQ2&(#F1LStltk%vy`Bkz zPJXo72M(N+zv52~lnVvNEKSDm8n3}Mlz6#G`H%12%HiH`|W}q#R=8aC>s#Jdo?g^C}kYnhnZG`zWLFSIs|LCw`&^r5;V~ z23eG{quDNz?T%a)UeLXXUO;k1zj2}SzKG0Fa`?2BEZ#Bn(%09q36EcH>HL_+fa~^! zvRv!Qz^Tr*iWM9(!Jc1eIxt&VhXn+l_#Ur(7(23_@0HF)2UgjyC#F66!1eylPg`si zPG2}O=701j_;m|XdLCv2|2?i>xu4L$nTNX>C*G6nV>`yGp3MPv9Ft)eUiX8sc|v#X zRXWhP%lJyf))++X)WiY~G6BoG4>T0r@6iTj?WCTPG8&!Ly2dN<9M$nu+S0ccqw5po ztTXq_kZVD-h_9duQrsM*7wZp0RLo6B4qhAku4LRtsv(T`MD5xlx4WAaApKQ zqTW;zN6f$`lg}Ky{h<=eYRDiHUm3)VFDE?xez6d!5-E8V`#*yqgWamIdMBtKZm~G9 zYYE0BS5SRgCD>*MQV7SJfQz`4rBEA5-#IijkV{F@FFKPqNSo?Z~!{&}Gyk=0(BB`2*!;o7>C(8b_W(vu8Q#G?Da? zW3)akZTOOzddL>T5`OOuPmz6!1@1^@dW7st0?x#oy>oi^0al+?j&hr|F`KPiQ^nlh zgst{3rBCBX>&N`1c!tmar|!VEkcC6l=qpMYYVwLt^bP0;1Ca;A&SfKy&^ z{;#x1@wLVF4owCdIQ3|kX=L_+y=LB`bc{O?5B_-QBJKmwfAw(rUq+B_Y`K0%;vKNH zTf4~J)FAl-im!`7yr{QIy*F^C1{D+*c9LcAA@7&JKdJ_}BALii!4!>Ue7U_*c<5RW zUU{fH_tZy5+{x}F1KGDZoVmrx-%wu~YgFNNSIe@;?2;5U2&)W)?E#ym+lwT_09Q8$CzMo4!0{{-UUt zLpn9YAralNe0yw{cMAFMo6$`%oJA_{a!aOWs1ey8_E-zsBD}?#cYvkW3BOo4p0$?H zfXi`Z(K)~givSh=XR;7AhX=0 zBiF49&`c+nd9M)6?(XVJZYqL(b>NL|gG5rkheOZg!e4N66iv8YO$9E(N2zC<5ZLB4 zXiep=g30R!we_Ymfcr6z4=>t-RQ1uYE-eAzh}IOY9hF1dCK~6<*c;H8kf}t3+#0GW z$>HyNR*K?qHL;{;bGBv z8C=%kAjLq{Lu_{b`ohfCB9^bGrz%R=gBi#r?Q|332^;LrXES|Cd3EZbu~AZeyL7y< z=veRx0Ds1k460->+m+(wofHNKw(C;Q>a4*yM_ zTD*y5(!UWwdOMs*9)c*2X02I&+Qi!_3-)Vl-oztj3RA?2f8z4C%)E1{t=N}7YSYmS z2`pvno7|~s0Zfsm?93(7`;Bi4N~VUNKOzKZY?v!O>V+dEs?U!vDuB{vU~AHYL!i&W z^K+Yi0Iak}rflcgz}evT@{RcfaB~}Eo|2y>>2KGQB=hcp1C8uw&InpC!+Ur~`Jw<3 zFP#~SH3KQlOe4;TIAGJq!@482(Hf|U}zgZFOM!}c92lRNaD0l5t7c(CFlr{eOzN0vnZ8Uw9!5mxF91TeR8jKZ=y*}wo z?~duq#=I0UJWg2pbarJrLY9!i6dOvO76bckX@;Kh&<0V-0I`!DkQ*X{kjP9rxVm(sN_qDU-!)Ryh? zS$zJdz`{eyb9f1MZnKr;K5jF48k{}e;q?9|e0Atk@wDi@}65;n1- zcD{^vgwz&?*s1R&|NMc3Yhq#ZOd8-tb=@yrfRvy5R@Ape0S;k7Z>6$Xz)kYZuj9_# z;Gvz)${(x=uFI0qKbOhC{$u&$=RpEsTIeTRC+-ipTQt`})n6cyH?2o6QVy&(XZDw4 zQE2sSEW@7FQ)p0sYUx(qBUBpMv5!9NB0Aq`@k_+-0Mc5Y@2z9FjF_uZqo(uS@ZtXb z?v6MGp00W5FDFeMZdmU)a{QMtPC=VnJ+HBj)minDccW9^Ps(0v7W6`#8Q5z`@Uv|7PYj zaI#dVpv<@jcH_Ul_51w=BE#FVfGj?M)w*5Vf*g?E&Fa4XSp?WMq@HEALKLsh^vfj6v?VZ?Y#JKAO96~S4#m4!K?73o`-2FxHFBD z#E-om`2JI&l&RaK_Zxq&Lh^4F%w8*^LX=2G*f|;8zr%QlkoG->p*!jQKl{*$GhY6X zVh+MZx=+4}L;}I{!A_%&DVP+V_-^r080?~+SZWN{fr|+#tEO5HE^jK%sOo+IhmA9u zZaD-XDqR~1kB|qT9qWpt(FZ9qG0Rl-d0;7ssOOL+y+4QFe);)W4(hy!lpokPpzIqa z7v}8JkUNUh{9WUZ#Me1WY2T>g3!XCX@3+(Ar7p>Fy(Ve6ot*gJ>%Jv8LjjYG{;L)2 zF_~cTgL}T1br6Tqo-KdEW_!QpJ~%;0T|JdZ5E1yths?yhnXVh)@Vhk;`r1M`-txQl zZmuxs8ETA$?G%BfscvounKL+!H=vxO%HT|(pUmbd0{cnn!#pp$fasQydxnFQFWJi{ zJ@`Qxqz769i8`9b39cr?2BH}+>()={L>!tpuTz!R~g=!t)< z=fOuhgKpLQ7{N0}j}mex({W>bormLBCQebMJ^%ceAy(&*_IP{540D>Hty!QRz-WZB zUUNM`gz|$oc#IEMf{;I%HSvWrsNLNa)p5B726^=Q%~?8Nxw?K%VUVP|D<7ZhUM1yA zVn5K+1fjR2FO5W`e4XTY;>cZ^pnfYd~5!D9V$;Lz_2$SFRA zw(P#f?x|)&qXrf7JV!@Rwa$*``|=$WJyy7&_qhuZ6JEK|AbZ4@>t?txQigv$u+w93 zkyJOTtjc!$em3qltx8sRegWsx(U!jv{~Q~hOCMcxn#Q7*>rG^jEn?Ez_93MwSqUT8 zE>A`@=7E^m(;wMXp8yL=Sc~o=op-hqo-o%7f<3+6rTMF*{U$h}yyS>GxW7LBu$>ks zy+3#6rsb*4aGM8rPNKhl0FAAi63^_o4K7@nh& zw((Y~8n;Nk@QbgS24}qM=29?e|Nr!G=HFPo-y6@E%o#I<%tHz(B|PWc+dR)QJ$Pgc z84{_`L}bhyQ52aHg^-LPDn2DbL?lJV2)+59U%s8S&L42E`<%V@zV_?7uok*2+FH*O zFuwtjsHBpc7*7$|Z%g7Xss7{Chl_V*z~r#XJ*HN||If)6z~Hk9lucQxJxn*iOa4iz zj*~X{95x8-=TiqSwX_#S7n#6KT53#zr3~y}ShtUKjT8Ln)`ilwM?h)`pG|(X2M(Hb z3*JMIfO|ReyHuhf+S=Ja_)<9>jp8E#?9^xa5=Y>f*@_6AnnMe(~Y}~6mwDAc`3@-QUiTQulQP}J+!OlnB zeWqMs;L3zh#V{XS%TkLJpx|Jb70Y#w)nBo7huPHh*c>U6tQGQAHz7{9&9Kz z8a#`BO9*BL=Hcia%j3r@eM;z_!BVx_seQ;d^vA8;#NI^Nnkd1gmiS6xk8V=DI{uj2 zo#7ot52vv5j~3JS;2i22`_I~(!Wv|}nGOhUVr1vBYtuH3q^<3w=C(8{(rvk~g?sWG z|J4KaWIi5SP8Kk3WH=qM*amhH)tq@kxkBH zjswA27{|a{AxALd?)l~&zX6!dErv7mgq|4fb^6bcA#hXIezS4#LVw26=sdRb(U9%1 zj@?u~s;-%MLmiibViQh!u1i%Q$7yi~>Kauff8w0k*UXdnnpe_V2jfNj#rn;MxgG3y z&}MNSkk7K;jIK2!qdEOlYOuAep{N2KDta`O(+vq9WQfrP1))C@iz0k3Ho& z(mBq5J0ji+?Zvx)znl`q2e|AY_Qf2YrjYh9CvyTfyS8Gj@KzGv{o&dvYfVqAK67x# zeb-&gQ|o!^n|vmWgMM}?hc<=utau^NGuH^HG8;3tvxvTb`6XUTUNC(Es9h-1%G5{C%VMIJ>nkKmJk|t1CU>uXeo%bEKhuVYe1c+6?|e zEjAiKO3B=~{kv7<-~G_)n}wHLI{;#`bZ_VVB|)tL_k3{D35+*pE*+sf2D>r2QxUo> z;41%dXyVv3P)=+}hbf8zc{K0bo^WE`E>6|Egs1@K);4;pF2Lc4%!cl3vA`u^p>|ll zA1(jPa@@$fhkArAq=gM6q1=Z8F;m$y$aBHg)1l8E9afhrSM$@v7ejJGd?Q8h5>72w znaUR2S;nQhveN-)Bb(;)vjA56ZkvH|k>Gn36G!Fh8c7>JgNi>H5&ePN->$*;ivP~z zN4ZIRr)dHF^bW*!aah9{HP6ATDI?(d=-Y&OM-{lKE+@Lin*&+;e2VV`6IdtTlV<-= z3dS$Lj2fJ~0;rn#`R#j^aO`;;ZEt-N$PTFAOMA=(f*~*RHcAwLS<4{ldI>S7p6uMm zcO(#fYo7C`Z{k7!3u}&7mK8>g;#A_7;+0ULh~}#i{Sg$$#=pN}P#y&cC{5k3S4EaK z;T~3%N$6;khML@|1;jgLCY~pI5MNIqhgki3ix0F@#YY<_;Z^UdYgT79L7ZOiQ}xF< zP`~nq^?h~;n9;4z{2dk{{Ia%7t=d!Lq>+Y^|t{|5{8Xj`L0_*~2?}nIK(0uyOBEw)2)S)yim^j^sGAbvn zTvjKL+nbVZdgC}GKe<&V{b3Vd_$>a};|&vDM#DDtgMS^TJi43})j^N*F!f5UF-Br- zb=|C?`j@aX+?>s09f=s*lkZ_C$NNcT%7xVO;fW+vIHKhn{U111W!v?f_zZ5VKlJgV z0A7Ell+$ZO;EYr6PwHP*;N#Y5uu~fh9)8AZEPr2u%d1~%5e8*ov-s<;(Nk_P%(0ca z>KOzYCO0<%baFu6hbxq0Q51NuE5$r5s{tyJ67KnuX*4|=|80)T5_JW~(9h((L&bXs zCem)Pplh=gzYIglku%p7#djyKAeEmR{oI^Pi1FI?+HznaJ{l&TB6`jnFBO_8s7R2& zL%!S)sSZ7lV{D$6UYcg%^uFfm2|eyu_ty+x{YSLGNH?2Fb8#J{iiRhb#0X#c)u=;( zvX~Y!*dJGr@ZTd}mR&M;iEl(Lr}oviw?N=aC1;Glg=V4s>wC zByfVU0srN~{_MEj9=uq9V@9F85qF}Fz8ovPhO?N3Re$^_h}8r+H*5%}Vs;aBu0tu`DczyD`fdymtZ)JzZ|cX`a*WB}CwCdMykO~8)THTPRS{@|!LD@G8b*Hg+)Z~h&<~i)TuzT)E=KnC&lLr`FkCcIS zswSCDg99vwYoC0-wF%DfqDIU+8cxUlkKz%U44y{fM+yYxz{5xG%gR;moCXse9&+F=bfEaJYP3E z&-TI%oZ{GOrQM#4i%Ex9w)i`~U|? z99FqWHq6#FhIQAMr;g3>mJDN#bPF%kgR7lyTPc4-@n|4*JtJ2+>t?=~3y1y(1nHq2nIgug!A z{Xv2!98P$uca%{Ixabh;Z#hY{{NU)?!iqI|eUqAxONG$Xhs%{TOG1#RY_!`jR{%Oh zxw^m*dkkNARj8Pwy^24Q{&J-uItV8pceq~3{uyU=5r2P3@dj2aq4euk!8Oc2b}2{M zg~&?_A1gRD`k9n+{AX2Rl-|GdTr7&-psTwDq0t)AJjP)Vv}3teN&W!-d~mg5mK!{H z(!w=b?t!xl>)qQ$C&2nx(ez0JIxy_FiIT^Z02}Lw*1XIHDu?(S4RiN_)Zf;NHB!-Q z?0%R#P7iK0(z`{B+Q6YR!l^XA2h2h(X68nU0Gnt%v;r!i(49trd-1??lsRxaO%QE1 zxQw{a??c0nw|Hz{okBHy&HFgIqEJGN)i2R#UgT2N*C*|bkfO>`r^35h{4e&q#qqcf z-j0l}(Zd#KrN%*oZNiI|B;Y5s_On+Jh9DK?9%#@*!OvLGq-Ikk?YTPC?am#X8y1Wzh1Rt9C{>`8$JIkKNuN+qzX?mC?^k4@F45K$ zKcSS37l6c~T242C8zrRT4v8A$ek-yv0zC z?>9TE+tSE;m+iT&*aD>VVWZ6bP!|6CsSxSsQV?Ff&Rft+XNdcfUA9kacHyGBm&M}i zjIch^wN&3WFD%sIgI7YC5hl2;E}+gAOlr_59!!$cCs}eHwg}492999v3w}q%;n01X z{=6q20A^y&3$qe?-gCD&t}YjYi^}sL156rD7fT##!I|Ln*$9QIi%sBMOS1Q=@(0W7 z+8e371TWTe-te3zaX&NH1nBCVVQ*F>)2wL*;omM|H%~|CmA^ym>oYPaQy_%uD%lIU zG1ti|N;)H%t#z-GuY2(yPWQLNV!QDNySelk9y8;1?o;$k3RO6x!2RyY>3r;|33mR? p;uL24`reXYtQ2Xz5Pt06wjw28S*lICp!)yyfXIZTC*FV0{{hIf2rmEt literal 0 HcmV?d00001