From 68efad6037c2d975a95f73eda1e8be1d951b8ee4 Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Fri, 19 Jul 2024 16:17:03 +0200 Subject: [PATCH 1/8] addition of global minuit fitting functions with different variations --- flip/fit_utils.py | 882 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 882 insertions(+) create mode 100644 flip/fit_utils.py diff --git a/flip/fit_utils.py b/flip/fit_utils.py new file mode 100644 index 0000000..00cb420 --- /dev/null +++ b/flip/fit_utils.py @@ -0,0 +1,882 @@ +import os +import pickle + +import numpy as np +import pandas as pd +import snsim +import snutils +from astropy.cosmology import FlatLambdaCDM + +from flip import fitter +from flip.covariance import covariance +from flip.utils import create_log + +log = create_log() + + +def fit_density_minuit( + parameter_dict, + model_name, + likelihood_type, + likelihood_properties, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, + additional_parameters_values=(), + maximum_number_coordinates=None, +): + grid_name = parameter_fit[0] + power_spectrum_dict = parameter_fit[1] + name_out_fit = parameter_fit[2] + str_fit = parameter_fit[3] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + + grid = pd.read_parquet(grid_name) + if (maximum_number_coordinates is not None) & ( + grid["ra"].size > maximum_number_coordinates + ): + log.add("Maximum number coordinate exceeded") + return + + for i in range(len(str_fit)): + log.add(str_fit[i]) + + coordinates_density = np.array([grid["ra"], grid["dec"], grid["rcom"]]) + data_density = { + "density": np.array(grid["density"]), + "density_error": np.array(grid["density_err"]), + } + + covariance_fit = covariance.CovMatrix.init_from_flip( + model_name, + "density", + power_spectrum_dict, + coordinates_density=coordinates_density, + size_batch=size_batch, + number_worker=number_worker, + additional_parameters_values=additional_parameters_values, + ) + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + covariance_fit, + data_density, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_density_interp_sigg_minuit( + parameter_dict, + model_name, + likelihood_type, + likelihood_properties, + interpolation_value_name, + interpolation_value_range, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, + maximum_number_coordinates=None, +): + grid_name = parameter_fit[0] + power_spectrum_dict = parameter_fit[1] + name_out_fit = parameter_fit[2] + str_fit = parameter_fit[3] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + + grid = pd.read_parquet(grid_name) + if (maximum_number_coordinates is not None) & ( + grid["ra"].size > maximum_number_coordinates + ): + log.add("Maximum number coordinate exceeded") + return + + for i in range(len(str_fit)): + log.add(str_fit[i]) + + coordinates_density = np.array([grid["ra"], grid["dec"], grid["rcom"]]) + data_density = { + "density": np.array(grid["density"]), + "density_error": np.array(grid["density_err"]), + } + + covariance_list = [] + for sigg in interpolation_value_range: + covariance_list.append( + covariance.CovMatrix.init_from_flip( + model_name, + "density", + power_spectrum_dict, + coordinates_density=coordinates_density, + size_batch=size_batch, + number_worker=number_worker, + additional_parameters_values=(sigg,), + ) + ) + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + covariance_list, + data_density, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + interpolation_value_name=interpolation_value_name, + interpolation_value_range=interpolation_value_range, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_velocity_true_minuit( + parameter_dict, + likelihood_type, + likelihood_properties, + zmin, + z_simu, + photo_type, + completeness_file, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, +): + + sim_name = parameter_fit[0] + fit_name = parameter_fit[1] + zmax = parameter_fit[2] + power_spectrum_dict = parameter_fit[3] + name_out_fit = parameter_fit[4] + str_fit = parameter_fit[5] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + for i in range(len(str_fit)): + log.add(str_fit[i]) + + fit = snsim.io_utils.open_fit(fit_name) + sim = snsim.SimSample.fromFile(sim_name) + _, sim_lcs = snutils.get_fitted_light_curves(sim, fit) + + mag_m, mag_M, completeness, _, _, _, _ = np.loadtxt( + completeness_file, + skiprows=1, + ).T + mag_x = (mag_m + mag_M) / 2 + + detection_mask = snutils.return_detection_mask(sim_lcs) + typing_mask = snutils.return_typing_mask( + sim_lcs, mag_x, completeness, photo_type=photo_type + ) + phase_mask = snutils.give_phasemask(sim_lcs, fit) + light_curve_mask = snutils.give_mask( + fit, phasemask=phase_mask, verbose=False, zrange=[zmin, zmax] + ) + + data_velocity = fit[typing_mask & detection_mask & light_curve_mask] + + cosmo = FlatLambdaCDM( + H0=data_velocity.attrs["cosmo"]["H0"], Om0=data_velocity.attrs["cosmo"]["Om0"] + ) + data_velocity["rcom_zobs"] = ( + cosmo.comoving_distance(data_velocity["zobs"]).value * cosmo.h + ) + data_velocity["hubble_norm"] = cosmo.H(data_velocity["zobs"]) / cosmo.h + + samepos_list = snutils.find_samepos(data_velocity) + same_pos_mask = np.ones(len(data_velocity), dtype="bool") + if len(samepos_list) > 0: + for same_pos in samepos_list: + same_pos_mask[same_pos[1:]] = False + + data_velocity = data_velocity[same_pos_mask] + + coordinates_velocity = [ + data_velocity["ra"].values, + data_velocity["dec"].values, + data_velocity["rcom_zobs"].values, + ] + data_velocity_true = { + "velocity": data_velocity["vpec"].values, + "velocity_error": np.zeros(len(data_velocity["vpec"].values)), + } + + covariance_scaling = float(((cosmo.H(z_simu) / (1 + z_simu)) / cosmo.H0) ** 2) + + cov = covariance.CovMatrix.init_from_flip( + "carreres23", + "velocity", + power_spectrum_dict, + coordinates_velocity=coordinates_velocity, + size_batch=size_batch, + number_worker=number_worker, + ) + cov.covariance_dict["vv"] = covariance_scaling * cov.covariance_dict["vv"] + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + cov, + data_velocity_true, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + data_velocity.index.size, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_velocity_true_interp_sigu_minuit( + parameter_dict, + likelihood_type, + likelihood_properties, + interpolation_value_name, + interpolation_value_range, + zmin, + z_simu, + photo_type, + completeness_file, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, +): + + sim_name = parameter_fit[0] + fit_name = parameter_fit[1] + zmax = parameter_fit[2] + power_spectrum_dict_list = parameter_fit[3] + name_out_fit = parameter_fit[4] + str_fit = parameter_fit[5] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + for i in range(len(str_fit)): + log.add(str_fit[i]) + + fit = snsim.io_utils.open_fit(fit_name) + sim = snsim.SimSample.fromFile(sim_name) + _, sim_lcs = snutils.get_fitted_light_curves(sim, fit) + + mag_m, mag_M, completeness, _, _, _, _ = np.loadtxt( + completeness_file, + skiprows=1, + ).T + mag_x = (mag_m + mag_M) / 2 + + detection_mask = snutils.return_detection_mask(sim_lcs) + typing_mask = snutils.return_typing_mask( + sim_lcs, mag_x, completeness, photo_type=photo_type + ) + phase_mask = snutils.give_phasemask(sim_lcs, fit) + light_curve_mask = snutils.give_mask( + fit, phasemask=phase_mask, verbose=False, zrange=[zmin, zmax] + ) + + data_velocity = fit[typing_mask & detection_mask & light_curve_mask] + + cosmo = FlatLambdaCDM( + H0=data_velocity.attrs["cosmo"]["H0"], Om0=data_velocity.attrs["cosmo"]["Om0"] + ) + data_velocity["rcom_zobs"] = ( + cosmo.comoving_distance(data_velocity["zobs"]).value * cosmo.h + ) + data_velocity["hubble_norm"] = cosmo.H(data_velocity["zobs"]) / cosmo.h + + samepos_list = snutils.find_samepos(data_velocity) + same_pos_mask = np.ones(len(data_velocity), dtype="bool") + if len(samepos_list) > 0: + for same_pos in samepos_list: + same_pos_mask[same_pos[1:]] = False + + data_velocity = data_velocity[same_pos_mask] + + coordinates_velocity = [ + data_velocity["ra"].values, + data_velocity["dec"].values, + data_velocity["rcom_zobs"].values, + ] + data_velocity_true = { + "velocity": data_velocity["vpec"].values, + "velocity_error": np.zeros(len(data_velocity["vpec"].values)), + } + + covariance_fit_list = [] + + covariance_scaling = float(((cosmo.H(z_simu) / (1 + z_simu)) / cosmo.H0) ** 2) + for i in range(len(interpolation_value_range)): + + cov_sigu = covariance.CovMatrix.init_from_flip( + "carreres23", + "velocity", + power_spectrum_dict_list[i], + coordinates_velocity=coordinates_velocity, + size_batch=size_batch, + number_worker=number_worker, + ) + cov_sigu.covariance_dict["vv"] = ( + covariance_scaling * cov_sigu.covariance_dict["vv"] + ) + covariance_fit_list.append(cov_sigu) + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + covariance_fit_list, + data_velocity_true, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + interpolation_value_name=interpolation_value_name, + interpolation_value_range=interpolation_value_range, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + data_velocity.index.size, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_velocity_estimated_minuit( + parameter_dict, + likelihood_type, + likelihood_properties, + zmin, + z_simu, + photo_type, + completeness_file, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, +): + + sim_name = parameter_fit[0] + fit_name = parameter_fit[1] + zmax = parameter_fit[2] + power_spectrum_dict = parameter_fit[3] + name_out_fit = parameter_fit[4] + str_fit = parameter_fit[5] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + for i in range(len(str_fit)): + log.add(str_fit[i]) + + fit = snsim.io_utils.open_fit(fit_name) + sim = snsim.SimSample.fromFile(sim_name) + _, sim_lcs = snutils.get_fitted_light_curves(sim, fit) + + mag_m, mag_M, completeness, _, _, _, _ = np.loadtxt( + completeness_file, + skiprows=1, + ).T + mag_x = (mag_m + mag_M) / 2 + + detection_mask = snutils.return_detection_mask(sim_lcs) + typing_mask = snutils.return_typing_mask( + sim_lcs, mag_x, completeness, photo_type=photo_type + ) + phase_mask = snutils.give_phasemask(sim_lcs, fit) + light_curve_mask = snutils.give_mask( + fit, phasemask=phase_mask, verbose=False, zrange=[zmin, zmax] + ) + + data_velocity = fit[typing_mask & detection_mask & light_curve_mask] + + cosmo = FlatLambdaCDM( + H0=data_velocity.attrs["cosmo"]["H0"], Om0=data_velocity.attrs["cosmo"]["Om0"] + ) + data_velocity["rcom_zobs"] = ( + cosmo.comoving_distance(data_velocity["zobs"]).value * cosmo.h + ) + data_velocity["hubble_norm"] = cosmo.H(data_velocity["zobs"]) / cosmo.h + + samepos_list = snutils.find_samepos(data_velocity) + same_pos_mask = np.ones(len(data_velocity), dtype="bool") + if len(samepos_list) > 0: + for same_pos in samepos_list: + same_pos_mask[same_pos[1:]] = False + + data_velocity = data_velocity[same_pos_mask] + + coordinates_velocity = [ + data_velocity["ra"].values, + data_velocity["dec"].values, + data_velocity["rcom_zobs"].values, + ] + + covariance_scaling = float(((cosmo.H(z_simu) / (1 + z_simu)) / cosmo.H0) ** 2) + + cov = covariance.CovMatrix.init_from_flip( + "carreres23", + "velocity", + power_spectrum_dict, + coordinates_velocity=coordinates_velocity, + size_batch=size_batch, + number_worker=number_worker, + ) + cov.covariance_dict["vv"] = covariance_scaling * cov.covariance_dict["vv"] + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + cov, + data_velocity, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + data_velocity.index.size, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_velocity_estimated_interp_sigu_minuit( + parameter_dict, + likelihood_type, + likelihood_properties, + interpolation_value_name, + interpolation_value_range, + zmin, + z_simu, + photo_type, + completeness_file, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, +): + + sim_name = parameter_fit[0] + fit_name = parameter_fit[1] + zmax = parameter_fit[2] + power_spectrum_dict_list = parameter_fit[3] + name_out_fit = parameter_fit[4] + str_fit = parameter_fit[5] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + for i in range(len(str_fit)): + log.add(str_fit[i]) + + fit = snsim.io_utils.open_fit(fit_name) + sim = snsim.SimSample.fromFile(sim_name) + _, sim_lcs = snutils.get_fitted_light_curves(sim, fit) + + mag_m, mag_M, completeness, _, _, _, _ = np.loadtxt( + completeness_file, + skiprows=1, + ).T + mag_x = (mag_m + mag_M) / 2 + + detection_mask = snutils.return_detection_mask(sim_lcs) + typing_mask = snutils.return_typing_mask( + sim_lcs, mag_x, completeness, photo_type=photo_type + ) + phase_mask = snutils.give_phasemask(sim_lcs, fit) + light_curve_mask = snutils.give_mask( + fit, phasemask=phase_mask, verbose=False, zrange=[zmin, zmax] + ) + + data_velocity = fit[typing_mask & detection_mask & light_curve_mask] + + cosmo = FlatLambdaCDM( + H0=data_velocity.attrs["cosmo"]["H0"], Om0=data_velocity.attrs["cosmo"]["Om0"] + ) + data_velocity["rcom_zobs"] = ( + cosmo.comoving_distance(data_velocity["zobs"]).value * cosmo.h + ) + data_velocity["hubble_norm"] = cosmo.H(data_velocity["zobs"]) / cosmo.h + + samepos_list = snutils.find_samepos(data_velocity) + same_pos_mask = np.ones(len(data_velocity), dtype="bool") + if len(samepos_list) > 0: + for same_pos in samepos_list: + same_pos_mask[same_pos[1:]] = False + + data_velocity = data_velocity[same_pos_mask] + + coordinates_velocity = [ + data_velocity["ra"].values, + data_velocity["dec"].values, + data_velocity["rcom_zobs"].values, + ] + + covariance_fit_list = [] + + covariance_scaling = float(((cosmo.H(z_simu) / (1 + z_simu)) / cosmo.H0) ** 2) + for i in range(len(interpolation_value_range)): + + cov_sigu = covariance.CovMatrix.init_from_flip( + "carreres23", + "velocity", + power_spectrum_dict_list[i], + coordinates_velocity=coordinates_velocity, + size_batch=size_batch, + number_worker=number_worker, + ) + cov_sigu.covariance_dict["vv"] = ( + covariance_scaling * cov_sigu.covariance_dict["vv"] + ) + covariance_fit_list.append(cov_sigu) + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + covariance_fit_list, + data_velocity, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + interpolation_value_name=interpolation_value_name, + interpolation_value_range=interpolation_value_range, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + data_velocity.index.size, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_full_velocity_estimated_minuit( + parameter_dict, + model_name, + likelihood_type, + likelihood_properties, + zmin, + z_simu, + photo_type, + completeness_file, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, + additional_parameters_values=(), + maximum_number_coordinates=None, +): + + sim_name = parameter_fit[0] + fit_name = parameter_fit[1] + grid_name = parameter_fit[2] + zmax = parameter_fit[3] + power_spectrum_dict = parameter_fit[4] + name_out_fit = parameter_fit[5] + str_fit = parameter_fit[6] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + + grid = pd.read_parquet(grid_name) + if (maximum_number_coordinates is not None) & ( + grid["ra"].size > maximum_number_coordinates + ): + log.add("Maximum number coordinate exceeded") + return + + for i in range(len(str_fit)): + log.add(str_fit[i]) + + coordinates_density = np.array([grid["ra"], grid["dec"], grid["rcom"]]) + data_density = { + "density": np.array(grid["density"]), + "density_error": np.array(grid["density_err"]), + } + + fit = snsim.io_utils.open_fit(fit_name) + sim = snsim.SimSample.fromFile(sim_name) + _, sim_lcs = snutils.get_fitted_light_curves(sim, fit) + + mag_m, mag_M, completeness, _, _, _, _ = np.loadtxt( + completeness_file, + skiprows=1, + ).T + mag_x = (mag_m + mag_M) / 2 + + detection_mask = snutils.return_detection_mask(sim_lcs) + typing_mask = snutils.return_typing_mask( + sim_lcs, mag_x, completeness, photo_type=photo_type + ) + phase_mask = snutils.give_phasemask(sim_lcs, fit) + light_curve_mask = snutils.give_mask( + fit, phasemask=phase_mask, verbose=False, zrange=[zmin, zmax] + ) + + data_velocity = fit[typing_mask & detection_mask & light_curve_mask] + + cosmo = FlatLambdaCDM( + H0=data_velocity.attrs["cosmo"]["H0"], Om0=data_velocity.attrs["cosmo"]["Om0"] + ) + data_velocity["rcom_zobs"] = ( + cosmo.comoving_distance(data_velocity["zobs"]).value * cosmo.h + ) + data_velocity["hubble_norm"] = cosmo.H(data_velocity["zobs"]) / cosmo.h + + samepos_list = snutils.find_samepos(data_velocity) + same_pos_mask = np.ones(len(data_velocity), dtype="bool") + if len(samepos_list) > 0: + for same_pos in samepos_list: + same_pos_mask[same_pos[1:]] = False + + data_velocity = data_velocity[same_pos_mask] + + data_full = {} + data_full.update(data_density) + for key in data_velocity.columns: + data_full[key] = np.array(data_velocity[key]) + + coordinates_velocity = [ + data_velocity["ra"].values, + data_velocity["dec"].values, + data_velocity["rcom_zobs"].values, + ] + + covariance_scaling = float(((cosmo.H(z_simu) / (1 + z_simu)) / cosmo.H0)) + + cov = covariance.CovMatrix.init_from_flip( + model_name, + "full", + power_spectrum_dict, + coordinates_density=coordinates_density, + coordinates_velocity=coordinates_velocity, + size_batch=size_batch, + number_worker=number_worker, + additional_parameters_values=additional_parameters_values, + ) + + cov.covariance_dict["vv"] = covariance_scaling**2 * cov.covariance_dict["vv"] + cov.covariance_dict["gv"] = covariance_scaling * cov.covariance_dict["gv"] + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + cov, + data_full, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + ) + + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + data_velocity.index.size, + ] + + pickle.dump(params, open(name_out_fit, "wb")) + + +def fit_full_velocity_estimated_interp_sigu_minuit( + parameter_dict, + model_name, + likelihood_type, + likelihood_properties, + interpolation_value_name, + interpolation_value_range, + zmin, + z_simu, + photo_type, + completeness_file, + parameter_fit, + overwrite=False, + size_batch=10_000, + number_worker=32, + additional_parameters_values=(), + maximum_number_coordinates=None, +): + + sim_name = parameter_fit[0] + fit_name = parameter_fit[1] + grid_name = parameter_fit[2] + zmax = parameter_fit[3] + power_spectrum_dict_list = parameter_fit[4] + name_out_fit = parameter_fit[5] + str_fit = parameter_fit[6] + + if (os.path.isfile(name_out_fit)) & (overwrite is False): + log.add("Fit already performed") + return + + grid = pd.read_parquet(grid_name) + if (maximum_number_coordinates is not None) & ( + grid["ra"].size > maximum_number_coordinates + ): + log.add("Maximum number coordinate exceeded") + return + + for i in range(len(str_fit)): + log.add(str_fit[i]) + + coordinates_density = np.array([grid["ra"], grid["dec"], grid["rcom"]]) + data_density = { + "density": np.array(grid["density"]), + "density_error": np.array(grid["density_err"]), + } + + fit = snsim.io_utils.open_fit(fit_name) + sim = snsim.SimSample.fromFile(sim_name) + _, sim_lcs = snutils.get_fitted_light_curves(sim, fit) + + mag_m, mag_M, completeness, _, _, _, _ = np.loadtxt( + completeness_file, + skiprows=1, + ).T + mag_x = (mag_m + mag_M) / 2 + + detection_mask = snutils.return_detection_mask(sim_lcs) + typing_mask = snutils.return_typing_mask( + sim_lcs, mag_x, completeness, photo_type=photo_type + ) + phase_mask = snutils.give_phasemask(sim_lcs, fit) + light_curve_mask = snutils.give_mask( + fit, phasemask=phase_mask, verbose=False, zrange=[zmin, zmax] + ) + + data_velocity = fit[typing_mask & detection_mask & light_curve_mask] + + cosmo = FlatLambdaCDM( + H0=data_velocity.attrs["cosmo"]["H0"], Om0=data_velocity.attrs["cosmo"]["Om0"] + ) + data_velocity["rcom_zobs"] = ( + cosmo.comoving_distance(data_velocity["zobs"]).value * cosmo.h + ) + data_velocity["hubble_norm"] = cosmo.H(data_velocity["zobs"]) / cosmo.h + + samepos_list = snutils.find_samepos(data_velocity) + same_pos_mask = np.ones(len(data_velocity), dtype="bool") + if len(samepos_list) > 0: + for same_pos in samepos_list: + same_pos_mask[same_pos[1:]] = False + + data_velocity = data_velocity[same_pos_mask] + + data_full = {} + data_full.update(data_density) + for key in data_velocity.columns: + data_full[key] = np.array(data_velocity[key]) + + coordinates_velocity = [ + data_velocity["ra"].values, + data_velocity["dec"].values, + data_velocity["rcom_zobs"].values, + ] + + covariance_fit_list = [] + + covariance_scaling = float(((cosmo.H(z_simu) / (1 + z_simu)) / cosmo.H0)) + + for i in range(len(interpolation_value_range)): + cov_sigu = covariance.CovMatrix.init_from_flip( + model_name, + "full", + power_spectrum_dict_list[i], + coordinates_density=coordinates_density, + coordinates_velocity=coordinates_velocity, + size_batch=size_batch, + number_worker=number_worker, + additional_parameters_values=additional_parameters_values, + ) + + cov_sigu.covariance_dict["vv"] = ( + covariance_scaling**2 * cov_sigu.covariance_dict["vv"] + ) + cov_sigu.covariance_dict["gv"] = ( + covariance_scaling * cov_sigu.covariance_dict["gv"] + ) + covariance_fit_list.append(cov_sigu) + + minuit_fitter = fitter.FitMinuit.init_from_covariance( + covariance_fit_list, + data_full, + parameter_dict, + likelihood_type=likelihood_type, + likelihood_properties=likelihood_properties, + interpolation_value_name=interpolation_value_name, + interpolation_value_range=interpolation_value_range, + ) + minuit_fitter.run() + + params = [ + minuit_fitter.minuit.values.to_dict(), + minuit_fitter.minuit.limits.to_dict(), + minuit_fitter.minuit.errors.to_dict(), + minuit_fitter.minuit.valid, + minuit_fitter.minuit.accurate, + minuit_fitter.minuit.fval, + data_velocity.index.size, + ] + + pickle.dump(params, open(name_out_fit, "wb")) From 9322f9e8370bd6ddcc281c66fba9a1dd258fe461 Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Mon, 22 Jul 2024 14:08:49 +0200 Subject: [PATCH 2/8] add plot utils --- flip/plot_utils.py | 147 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/flip/plot_utils.py b/flip/plot_utils.py index 09e5178..cef3c53 100644 --- a/flip/plot_utils.py +++ b/flip/plot_utils.py @@ -1,3 +1,7 @@ +import glob +import os +import pickle + import matplotlib.pyplot as plt import numpy as np @@ -173,3 +177,146 @@ def plot_correlation_from_likelihood( correlation_sum = cov_utils.return_correlation_matrix(covariance_sum) plt.imshow(correlation_sum, vmin=vmin, vmax=vmax) plt.colorbar() + + +def plot_all_fits( + fit_output, + parameters, + fiducials=None, + **kwargs, +): + figsize = utils.return_key(kwargs, "figsize", (10, 10)) + + all_fit = glob.glob(os.path.join(fit_output, "*")) + fig, ax = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) + + for i, f in enumerate(all_fit): + fit = pickle.load(open(f, "rb")) + if fit[3] is False: + continue + elif fit[4] is False: + continue + for j, param in enumerate(parameters): + ax[j].errorbar( + i, fit[0][param], fit[2][param], marker=".", ls="None", color="C1" + ) + + ax[j].set_ylabel(param, fontsize=18) + + if fiducials is not None: + if fiducials[j] is not None: + ax[j].axhline(fiducials[j], ls=":", color="k") + + ax[0].margins(x=0.005) + fig.tight_layout() + + +def plot_all_mean_fits( + fit_output, + parameters, + fiducials=None, + **kwargs, +): + figsize = utils.return_key(kwargs, "figsize", (10, 10)) + + all_fit = glob.glob(os.path.join(fit_output, "*")) + + fit_to_plot = [] + fit_name_to_plot = [] + for f in all_fit: + fit = pickle.load(open(f, "rb")) + if fit[3] is False: + continue + elif fit[4] is False: + continue + fit_to_plot.append(fit) + fit_name_to_plot.append(f) + + fit_prop = [] + for i in range(len(fit_name_to_plot)): + fit_prop.append( + fit_name_to_plot[i].split("fitted_parameters_")[-1].split("_box")[0] + ) + + fit_prop = np.array(fit_prop) + unique_fit_prop = np.sort(np.unique(fit_prop)) + + fig, ax = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) + + text = [] + for i, fit_p in enumerate(unique_fit_prop): + mask = fit_prop == fit_p + fits = np.array(fit_to_plot)[mask] + + for j, param in enumerate(parameters): + mean_param = np.mean([fits[i][0][param] for i in range(len(fits))]) + mean_error_param = np.mean( + [fits[i][2][param] for i in range(len(fits))] + ) / np.sqrt(len(mask[mask])) + + ax[j].errorbar( + i, mean_param, mean_error_param, marker=".", ls="None", color="C1" + ) + + ax[j].set_ylabel(param, fontsize=18) + + if fiducials is not None: + if fiducials[j] is not None: + ax[j].axhline(fiducials[j], ls=":", color="k") + + text.append(fit_p) + + j_index = np.arange(len(unique_fit_prop)) + ax[-1].set_xticks(j_index, np.array(text), rotation=90, fontsize=10) + ax[0].margins(x=0.005) + fig.tight_layout() + + +def plot_all_mean_error_fits( + fit_output, + parameters, + **kwargs, +): + figsize = utils.return_key(kwargs, "figsize", (10, 10)) + + all_fit = glob.glob(os.path.join(fit_output, "*")) + + fit_to_plot = [] + fit_name_to_plot = [] + for f in all_fit: + fit = pickle.load(open(f, "rb")) + if fit[3] is False: + continue + elif fit[4] is False: + continue + fit_to_plot.append(fit) + fit_name_to_plot.append(f) + + fit_prop = [] + for i in range(len(fit_name_to_plot)): + fit_prop.append( + fit_name_to_plot[i].split("fitted_parameters_")[-1].split("_box")[0] + ) + + fit_prop = np.array(fit_prop) + unique_fit_prop = np.sort(np.unique(fit_prop)) + + fig, ax = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) + + text = [] + for i, fit_p in enumerate(unique_fit_prop): + mask = fit_prop == fit_p + fits = np.array(fit_to_plot)[mask] + + for j, param in enumerate(parameters): + mean_error_param = np.mean([fits[i][2][param] for i in range(len(fits))]) + + ax[j].plot(i, mean_error_param, marker=".", ls="None", color="C1") + ax[j].set_ylabel(param, fontsize=18) + + text.append(fit_p) + + j_index = np.arange(len(unique_fit_prop)) + ax[-1].set_xticks(j_index, np.array(text), rotation=90, fontsize=10) + ax[0].margins(x=0.005) + fig.tight_layout() From d0a7a52cc6c813b238d2b299ca4e6c1fcc7f5314 Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Mon, 22 Jul 2024 14:43:01 +0200 Subject: [PATCH 3/8] fix for mcmc pickling --- flip/likelihood.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/flip/likelihood.py b/flip/likelihood.py index ad478e5..55f0364 100644 --- a/flip/likelihood.py +++ b/flip/likelihood.py @@ -1,3 +1,5 @@ +from functools import partial + import numpy as np import scipy as sc @@ -22,6 +24,14 @@ def log_likelihood_gaussian_cholesky(vector, covariance_sum): return -0.5 * (vector.size * np.log(2 * np.pi) + logdet + chi2) +def no_prior(x): + return 0 + + +def prior_sum(priors, x): + return sum(prior(x) for prior in priors) + + class BaseLikelihood(object): _default_likelihood_properties = { @@ -122,7 +132,7 @@ def initialize_prior( self, ): if "prior" not in self.likelihood_properties.keys(): - return lambda x: 0 + return no_prior else: prior_dict = self.likelihood_properties["prior"] priors = [] @@ -140,10 +150,8 @@ def initialize_prior( ) priors.append(prior) - def prior_sum(x): - return sum(prior(x) for prior in priors) - - return prior_sum + prior_function = partial(prior_sum, priors) + return prior_function class MultivariateGaussianLikelihood(BaseLikelihood): From de81335a363a5185db02f1086148b9d177a36884 Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Mon, 22 Jul 2024 15:41:45 +0200 Subject: [PATCH 4/8] fix mcmc fitter --- flip/fitter.py | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/flip/fitter.py b/flip/fitter.py index 1bdc384..46877f3 100644 --- a/flip/fitter.py +++ b/flip/fitter.py @@ -352,10 +352,11 @@ def init_from_covariance( data, parameter_dict, likelihood_type="multivariate_gaussian", - likelihood_properties=None, + likelihood_properties={}, sampler_name="emcee", nwalkers=1, backend_file=None, + **kwargs, ): """ The init_from_covariance function is a class method that initializes the MCMC fitter from a covariance matrix. @@ -373,19 +374,30 @@ def init_from_covariance( """ - mcmc_fitter = cls(covariance=covariance, data=data, sampler_name=sampler_name) - + mcmc_fitter = cls( + covariance=covariance, + data=data, + sampler_name=sampler_name, + ) likelihood = mcmc_fitter.get_likelihood( parameter_dict, likelihood_type=likelihood_type, - likelihood_properties=likelihood_properties, + likelihood_properties={ + **likelihood_properties, + "negative_log_likelihood": True, + }, + **kwargs, ) - p0 = None if backend_file is None: p0 = np.stack( [p["randfun"](size=nwalkers) for p in parameter_dict.values()] ).T + else: + if os.path.exists(backend_file): + p0 = np.stack( + [p["randfun"](size=nwalkers) for p in parameter_dict.values()] + ).T mcmc_fitter.set_sampler(likelihood, p0=p0, backend_file=backend_file) return mcmc_fitter @@ -455,7 +467,6 @@ def __init__(self, likelihood, p0=None, backend_file=None): "Initial size: {0}".format(self.backend.iteration) ) self._p0 = None - self.ndim = self.backend.shape[1] self.nwalkers = self.backend.shape[0] else: log.add("Create new file to store chains") @@ -502,3 +513,16 @@ def run_chains_untilconv( break old_tau = tau return sampler + + # def return_results(): + # reader = emcee.backends.HDFBackend(backend_file, read_only=True) + # try: + # tau = reader.get_autocorr_time() + # burnin = int(2 * np.max(tau)) + # thin = int(0.5 * np.min(tau)) + # except: + # burnin = 0 + # thin = 1 + # samples = reader.get_chain(discard=burnin, thin=thin) + # flat_samples = reader.get_chain(discard=burnin, flat=True, thin=thin) + # logprob = reader.get_log_prob(discard=burnin, flat=True, thin=thin) From 625106a9306141ede1fbd24c8466ab29993b1e3f Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Mon, 22 Jul 2024 17:42:00 +0200 Subject: [PATCH 5/8] fix mcmc --- flip/fitter.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/flip/fitter.py b/flip/fitter.py index 46877f3..1a81f6a 100644 --- a/flip/fitter.py +++ b/flip/fitter.py @@ -384,7 +384,7 @@ def init_from_covariance( likelihood_type=likelihood_type, likelihood_properties={ **likelihood_properties, - "negative_log_likelihood": True, + "negative_log_likelihood": False, }, **kwargs, ) @@ -394,7 +394,7 @@ def init_from_covariance( [p["randfun"](size=nwalkers) for p in parameter_dict.values()] ).T else: - if os.path.exists(backend_file): + if not (os.path.exists(backend_file)): p0 = np.stack( [p["randfun"](size=nwalkers) for p in parameter_dict.values()] ).T @@ -513,16 +513,3 @@ def run_chains_untilconv( break old_tau = tau return sampler - - # def return_results(): - # reader = emcee.backends.HDFBackend(backend_file, read_only=True) - # try: - # tau = reader.get_autocorr_time() - # burnin = int(2 * np.max(tau)) - # thin = int(0.5 * np.min(tau)) - # except: - # burnin = 0 - # thin = 1 - # samples = reader.get_chain(discard=burnin, thin=thin) - # flat_samples = reader.get_chain(discard=burnin, flat=True, thin=thin) - # logprob = reader.get_log_prob(discard=burnin, flat=True, thin=thin) From 3c8a45a4c81db5afda0892819592316552f7b8da Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Tue, 23 Jul 2024 11:52:35 +0200 Subject: [PATCH 6/8] fix and prior additions --- flip/likelihood.py | 91 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/flip/likelihood.py b/flip/likelihood.py index 55f0364..40c8841 100644 --- a/flip/likelihood.py +++ b/flip/likelihood.py @@ -7,7 +7,7 @@ from flip.utils import create_log log = create_log() -_available_priors = ["gaussian"] +_available_priors = ["gaussian", "positive", "uniform"] def log_likelihood_gaussian_inverse(vector, covariance_sum): @@ -148,6 +148,15 @@ def initialize_prior( prior_mean=prior_properties["mean"], prior_standard_deviation=prior_properties["standard_deviation"], ) + elif prior_properties["type"].lower() == "positive": + prior = PositivePrior( + parameter_name=parameter_name.lower(), + ) + elif prior_properties["type"].lower() == "uniform": + prior = UniformPrior( + parameter_name=parameter_name.lower(), + range=prior_properties["range"], + ) priors.append(prior) prior_function = partial(prior_sum, priors) @@ -252,7 +261,10 @@ def verify_covariance(self): if self.covariance[i].full_matrix is False: self.covariance[i].compute_full_matrix() - def __call__(self, parameter_values): + def __call__( + self, + parameter_values, + ): """ The __call__ function is the function that is called when you call an instance of a class. For example, if you have a class named 'Foo' and create an instance of it like this: @@ -269,6 +281,16 @@ def __call__(self, parameter_values): """ parameter_values_dict = dict(zip(self.parameter_names, parameter_values)) + interpolation_value = parameter_values_dict[self.interpolation_value_name] + + if (interpolation_value < self.interpolation_value_range[0]) | ( + interpolation_value > self.interpolation_value_range[-1] + ): + if self.likelihood_properties["negative_log_likelihood"]: + return np.inf + else: + return -np.inf + vector, vector_error = self.load_data_vector( self.covariance[0].model_type, parameter_values_dict, @@ -282,11 +304,12 @@ def __call__(self, parameter_values): ) ) covariance_sum_interpolated = sc.interpolate.interp1d( - self.interpolation_value_range, covariance_sum_list, copy=False, axis=0 - ) - covariance_sum = covariance_sum_interpolated( - parameter_values_dict[self.interpolation_value_name] + self.interpolation_value_range, + covariance_sum_list, + copy=False, + axis=0, ) + covariance_sum = covariance_sum_interpolated(interpolation_value) likelihood_function = eval( f"log_likelihood_gaussian_{self.likelihood_properties['inversion_method']}" @@ -307,6 +330,8 @@ def __init__( parameter_names=None, prior=None, likelihood_properties={}, + interpolation_value_name_0=None, + interpolation_value_name_1=None, interpolation_value_range_0=None, interpolation_value_range_1=None, ): @@ -335,6 +360,8 @@ def __init__( prior=prior, likelihood_properties=likelihood_properties, ) + self.interpolation_value_name_0 = interpolation_value_name_0 + self.interpolation_value_name_1 = interpolation_value_name_1 self.interpolation_value_range_0 = interpolation_value_range_0 self.interpolation_value_range_1 = interpolation_value_range_1 @@ -347,8 +374,6 @@ def verify_covariance(self): def __call__( self, parameter_values, - interpolation_value_0, - interpolation_value_1, ): """ The __call__ function is the function that will be called when the likelihood @@ -368,6 +393,20 @@ def __call__( """ parameter_values_dict = dict(zip(self.parameter_names, parameter_values)) + interpolation_value_0 = parameter_values_dict[self.interpolation_value_name_0] + interpolation_value_1 = parameter_values_dict[self.interpolation_value_name_1] + + if ( + (interpolation_value_0 < self.interpolation_value_range_0[0]) + | (interpolation_value_0 > self.interpolation_value_range_0[-1]) + | (interpolation_value_1 < self.interpolation_value_range_1[0]) + | (interpolation_value_1 > self.interpolation_value_range_1[-1]) + ): + if self.likelihood_properties["negative_log_likelihood"]: + return np.inf + else: + return -np.inf + vector, vector_error = self.load_data_vector( self.covariance[0][0].model_type, parameter_values_dict, @@ -401,7 +440,6 @@ def __call__( prior_value = self.prior(parameter_values_dict) if self.likelihood_properties["negative_log_likelihood"]: return -likelihood_function(vector, covariance_sum) - prior_value - return likelihood_function(vector, covariance_sum) + prior_value @@ -434,3 +472,38 @@ def __call__( + (parameter_values_dict[self.parameter_name] - self.prior_mean) ** 2 / self.prior_standard_deviation**2 ) + + +class PositivePrior(Prior): + + def __init__( + self, + parameter_name=None, + ): + super().__init__(parameter_name=parameter_name) + + def __call__( + self, + parameter_values_dict, + ): + if parameter_values_dict[self.parameter_name] < 0: + return -np.inf + else: + return 0 + + +class UniformPrior(Prior): + + def __init__(self, parameter_name=None, range=None): + super().__init__(parameter_name=parameter_name) + self.range = range + + def __call__( + self, + parameter_values_dict, + ): + value = parameter_values_dict[self.parameter_name] + if (value < self.range[0]) | (value > self.range[1]): + return -np.inf + else: + return 0 From f1cd481223f5670a11c7d832e29842bda2ebc861 Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Thu, 25 Jul 2024 15:45:33 +0200 Subject: [PATCH 7/8] plot improvement --- flip/plot_utils.py | 148 +++++++++++++++++++++++++++------------------ 1 file changed, 89 insertions(+), 59 deletions(-) diff --git a/flip/plot_utils.py b/flip/plot_utils.py index cef3c53..0478dcc 100644 --- a/flip/plot_utils.py +++ b/flip/plot_utils.py @@ -183,25 +183,55 @@ def plot_all_fits( fit_output, parameters, fiducials=None, + compute_fs8_from_beta=False, + subset_plot=None, **kwargs, ): figsize = utils.return_key(kwargs, "figsize", (10, 10)) all_fit = glob.glob(os.path.join(fit_output, "*")) fig, ax = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) - + fit_names, param_dict, error_dict = [], {}, {} + for j, param_name in enumerate(parameters): + param_dict[param_name] = [] + error_dict[param_name] = [] for i, f in enumerate(all_fit): + if subset_plot is not None: + if subset_plot not in f: + continue fit = pickle.load(open(f, "rb")) if fit[3] is False: continue elif fit[4] is False: continue - for j, param in enumerate(parameters): + + fit_names.append(f) + for j, param_name in enumerate(parameters): + if (param_name == "fs8") & (compute_fs8_from_beta): + param = fit[0]["beta_f"] * fit[0]["bs8"] + error = ( + fit[0]["beta_f"] + * fit[0]["bs8"] + * np.sqrt( + (fit[2]["bs8"] / fit[0]["bs8"]) ** 2 + + (fit[2]["beta_f"] / fit[0]["beta_f"]) ** 2 + ) + ) + else: + param = fit[0][param_name] + error = fit[2][param_name] + param_dict[param_name].append(param) + error_dict[param_name].append(error) ax[j].errorbar( - i, fit[0][param], fit[2][param], marker=".", ls="None", color="C1" + i, + param, + error, + marker=".", + ls="None", + color="C1", ) - ax[j].set_ylabel(param, fontsize=18) + ax[j].set_ylabel(param_name, fontsize=18) if fiducials is not None: if fiducials[j] is not None: @@ -209,12 +239,16 @@ def plot_all_fits( ax[0].margins(x=0.005) fig.tight_layout() + return fit_names, param_dict, error_dict def plot_all_mean_fits( fit_output, parameters, fiducials=None, + weighted_mean=True, + compute_fs8_from_beta=False, + subset_plot=None, **kwargs, ): figsize = utils.return_key(kwargs, "figsize", (10, 10)) @@ -224,6 +258,9 @@ def plot_all_mean_fits( fit_to_plot = [] fit_name_to_plot = [] for f in all_fit: + if subset_plot is not None: + if subset_plot not in f: + continue fit = pickle.load(open(f, "rb")) if fit[3] is False: continue @@ -242,23 +279,60 @@ def plot_all_mean_fits( unique_fit_prop = np.sort(np.unique(fit_prop)) fig, ax = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) + fig2, ax2 = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) text = [] + mean_param_dict, mean_error_dict, error_mean_dict = {}, {}, {} + for j, param_name in enumerate(parameters): + mean_param_dict[param_name] = [] + mean_error_dict[param_name] = [] + error_mean_dict[param_name] = [] + for i, fit_p in enumerate(unique_fit_prop): + mask = fit_prop == fit_p fits = np.array(fit_to_plot)[mask] - for j, param in enumerate(parameters): - mean_param = np.mean([fits[i][0][param] for i in range(len(fits))]) - mean_error_param = np.mean( - [fits[i][2][param] for i in range(len(fits))] - ) / np.sqrt(len(mask[mask])) + for j, param_name in enumerate(parameters): + if (param_name == "fs8") & (compute_fs8_from_beta): + params = [ + fits[i][0]["beta_f"] * fits[i][0]["bs8"] for i in range(len(fits)) + ] + errors = [ + ( + fits[i][0]["beta_f"] + * fits[i][0]["bs8"] + * np.sqrt( + (fits[i][2]["bs8"] / fits[i][0]["bs8"]) ** 2 + + (fits[i][2]["beta_f"] / fits[i][0]["beta_f"]) ** 2 + ) + ) + for i in range(len(fits)) + ] + else: + params = [fits[i][0][param_name] for i in range(len(fits))] + errors = [fits[i][2][param_name] for i in range(len(fits))] + if weighted_mean: + mean_param = np.average( + params, weights=[1 / (error**2) for error in errors] + ) + else: + mean_param = np.mean(params) + error_mean_param = np.mean(errors) / np.sqrt(len(mask[mask])) + mean_error_param = np.mean(errors) + + mean_param_dict[param_name].append(mean_param) + mean_error_dict[param_name].append(mean_error_param) + error_mean_dict[param_name].append(error_mean_param) ax[j].errorbar( - i, mean_param, mean_error_param, marker=".", ls="None", color="C1" + i, mean_param, error_mean_param, marker=".", ls="None", color="C1" ) - ax[j].set_ylabel(param, fontsize=18) + ax[j].set_ylabel(param_name, fontsize=18) + + ax2[j].plot(i, mean_error_param, marker=".", ls="None", color="C1") + ax2[j].set_ylabel(r"$\sigma$(" + param_name + ")", fontsize=18) if fiducials is not None: if fiducials[j] is not None: @@ -271,52 +345,8 @@ def plot_all_mean_fits( ax[0].margins(x=0.005) fig.tight_layout() + ax2[-1].set_xticks(j_index, np.array(text), rotation=90, fontsize=10) + ax2[0].margins(x=0.005) + fig2.tight_layout() -def plot_all_mean_error_fits( - fit_output, - parameters, - **kwargs, -): - figsize = utils.return_key(kwargs, "figsize", (10, 10)) - - all_fit = glob.glob(os.path.join(fit_output, "*")) - - fit_to_plot = [] - fit_name_to_plot = [] - for f in all_fit: - fit = pickle.load(open(f, "rb")) - if fit[3] is False: - continue - elif fit[4] is False: - continue - fit_to_plot.append(fit) - fit_name_to_plot.append(f) - - fit_prop = [] - for i in range(len(fit_name_to_plot)): - fit_prop.append( - fit_name_to_plot[i].split("fitted_parameters_")[-1].split("_box")[0] - ) - - fit_prop = np.array(fit_prop) - unique_fit_prop = np.sort(np.unique(fit_prop)) - - fig, ax = plt.subplots(len(parameters), 1, figsize=figsize, sharex=True) - - text = [] - for i, fit_p in enumerate(unique_fit_prop): - mask = fit_prop == fit_p - fits = np.array(fit_to_plot)[mask] - - for j, param in enumerate(parameters): - mean_error_param = np.mean([fits[i][2][param] for i in range(len(fits))]) - - ax[j].plot(i, mean_error_param, marker=".", ls="None", color="C1") - ax[j].set_ylabel(param, fontsize=18) - - text.append(fit_p) - - j_index = np.arange(len(unique_fit_prop)) - ax[-1].set_xticks(j_index, np.array(text), rotation=90, fontsize=10) - ax[0].margins(x=0.005) - fig.tight_layout() + return unique_fit_prop, mean_param_dict, mean_error_dict, error_mean_dict From 01a9be97412d49bc9e55832b721754cb6127c4ef Mon Sep 17 00:00:00 2001 From: corentinravoux Date: Thu, 25 Jul 2024 15:45:48 +0200 Subject: [PATCH 8/8] fix --- flip/likelihood.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flip/likelihood.py b/flip/likelihood.py index 40c8841..ee4de93 100644 --- a/flip/likelihood.py +++ b/flip/likelihood.py @@ -296,13 +296,13 @@ def __call__( parameter_values_dict, ) - covariance_sum_list = [] - for i in range(len(self.covariance)): - covariance_sum_list.append( - self.covariance[i].compute_covariance_sum( - parameter_values_dict, vector_error - ) + covariance_sum_list = [ + self.covariance[i].compute_covariance_sum( + parameter_values_dict, vector_error ) + for i in range(len(self.covariance)) + ] + covariance_sum_interpolated = sc.interpolate.interp1d( self.interpolation_value_range, covariance_sum_list,