From dfa62dee88525b352fb6ccc1d4934a70ecb20474 Mon Sep 17 00:00:00 2001 From: akisschinas Date: Wed, 16 Oct 2024 17:13:37 +0300 Subject: [PATCH 1/4] Upgrade volesti and pass method as string avoiding uneeded integer checks --- build.py | 2 +- dingo/PolytopeSampler.py | 16 ++-- dingo/bindings/bindings.cpp | 161 ++++++++++++++++++------------------ dingo/bindings/bindings.h | 4 +- dingo/volestipy.pyx | 55 ++++-------- setup.py | 2 +- volesti | 2 +- 7 files changed, 108 insertions(+), 134 deletions(-) diff --git a/build.py b/build.py index 340036f0..c5bca212 100644 --- a/build.py +++ b/build.py @@ -23,7 +23,7 @@ def build(setup_kwargs): # gcc arguments hack: enable optimizations os.environ["CFLAGS"] = [ - "-std=c++11", + "-std=c++17", "-O3", "-DBOOST_NO_AUTO_PTR", "-ldl", diff --git a/dingo/PolytopeSampler.py b/dingo/PolytopeSampler.py index f003a486..f6d76d52 100644 --- a/dingo/PolytopeSampler.py +++ b/dingo/PolytopeSampler.py @@ -159,7 +159,7 @@ def generate_steady_states( self._T_shift = np.add(self._T_shift, Tr_shift) return steady_states - + def generate_steady_states_no_multiphase( self, method = 'billiard_walk', n=1000, burn_in=0, thinning=1, variance=1.0, bias_vector=None ): @@ -171,17 +171,17 @@ def generate_steady_states_no_multiphase( burn_in -- the number of points to burn before sampling thinning -- the walk length of the chain """ - + self.get_polytope() P = HPolytope(self._A, self._b) - + if bias_vector is None: bias_vector = np.ones(self._A.shape[1], dtype=np.float64) else: bias_vector = bias_vector.astype('float64') - samples = P.generate_samples(method, n, burn_in, thinning, variance, bias_vector, self._parameters["solver"]) + samples = P.generate_samples(method.encode('utf-8'), n, burn_in, thinning, variance, bias_vector, self._parameters["solver"]) samples_T = samples.T steady_states = map_samples_to_steady_states( @@ -213,7 +213,7 @@ def sample_from_polytope( return samples - + @staticmethod def sample_from_polytope_no_multiphase( A, b, method = 'billiard_walk', n=1000, burn_in=0, thinning=1, variance=1.0, bias_vector=None, solver=None @@ -232,10 +232,10 @@ def sample_from_polytope_no_multiphase( bias_vector = np.ones(A.shape[1], dtype=np.float64) else: bias_vector = bias_vector.astype('float64') - + P = HPolytope(A, b) - samples = P.generate_samples(method, n, burn_in, thinning, variance, bias_vector, solver) + samples = P.generate_samples(method.encode('utf-8'), n, burn_in, thinning, variance, bias_vector, solver) samples_T = samples.T return samples_T @@ -246,7 +246,7 @@ def round_polytope( ): P = HPolytope(A, b) A, b, Tr, Tr_shift, round_value = P.rounding(method, solver) - + return A, b, Tr, Tr_shift @staticmethod diff --git a/dingo/bindings/bindings.cpp b/dingo/bindings/bindings.cpp index 74057a39..881b1f31 100644 --- a/dingo/bindings/bindings.cpp +++ b/dingo/bindings/bindings.cpp @@ -44,7 +44,7 @@ HPolytopeCPP::HPolytopeCPP(double *A_np, double *b_np, int n_hyperplanes, int n_ HPolytopeCPP::~HPolytopeCPP(){} ////////// Start of "compute_volume" ////////// -double HPolytopeCPP::compute_volume(char* vol_method, char* walk_method, +double HPolytopeCPP::compute_volume(char* vol_method, char* walk_method, int walk_len, double epsilon, int seed) const { double volume; @@ -84,80 +84,80 @@ double HPolytopeCPP::compute_volume(char* vol_method, char* walk_method, ////////// Start of "generate_samples()" ////////// double HPolytopeCPP::apply_sampling(int walk_len, - int number_of_points, + int number_of_points, int number_of_points_to_burn, - int method, - double* inner_point, + char* method, + double* inner_point, double radius, double* samples, double variance_value, - double* bias_vector_){ - + double* bias_vector_){ + RNGType rng(HP.dimension()); HP.normalize(); int d = HP.dimension(); - Point starting_point; + Point starting_point; VT inner_vec(d); for (int i = 0; i < d; i++){ inner_vec(i) = inner_point[i]; } - - Point inner_point2(inner_vec); + + Point inner_point2(inner_vec); CheBall = std::pair(inner_point2, radius); HP.set_InnerBall(CheBall); starting_point = inner_point2; std::list rand_points; - + NT variance = variance_value; - if (method == 1) { // cdhr + if (strcmp(method, "cdhr")) { // cdhr uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 2) { // rdhr - uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, + } else if (strcmp(method, "rdhr")) { // rdhr + uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 3) { // accelerated_billiard - uniform_sampling(rand_points, HP, rng, walk_len, - number_of_points, starting_point, + } else if (strcmp(method, "billiard_walk")) { // accelerated_billiard + uniform_sampling(rand_points, HP, rng, walk_len, + number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 4) { // ball walk - uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, + } else if (strcmp(method, "ball_walk")) { // ball walk + uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 5) { // dikin walk + } else if (strcmp(method, "dikin_walk")) { // dikin walk uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 6) { // john walk + } else if (strcmp(method, "john_walk")) { // john walk uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 7) { // vaidya walk + } else if (strcmp(method, "vaidya_walk")) { // vaidya walk uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); - } else if (method == 8) { // Gaussian sampling with exact HMC walk + } else if (strcmp(method, "gaussian_hmc_walk")) { // Gaussian sampling with exact HMC walk NT a = NT(1)/(NT(2)*variance); gaussian_sampling(rand_points, HP, rng, walk_len, number_of_points, a, starting_point, number_of_points_to_burn); - } else if (method == 9) { // exponential sampling with exact HMC walk + } else if (strcmp(method, "exponential_hmc_walk")) { // exponential sampling with exact HMC walk VT c(d); for (int i = 0; i < d; i++){ c(i) = bias_vector_[i]; } - Point bias_vector(c); + Point bias_vector(c); exponential_sampling(rand_points, HP, rng, walk_len, number_of_points, bias_vector, variance, - starting_point, number_of_points_to_burn); - } else if (method == 10) { // HMC with Gaussian distribution - rand_points = hmc_leapfrog_gaussian(walk_len, number_of_points, number_of_points_to_burn, variance, starting_point, HP); - } else if (method == 11) { // HMC with exponential distribution + starting_point, number_of_points_to_burn); + } else if (strcmp(method, "hmc_leapfrog_gaussian")) { // HMC with Gaussian distribution + rand_points = hmc_leapfrog_gaussian(walk_len, number_of_points, number_of_points_to_burn, variance, starting_point, HP); + } else if (strcmp(method, "hmc_leapfrog_exponential")) { // HMC with exponential distribution VT c(d); for (int i = 0; i < d; i++) { c(i) = bias_vector_[i]; } Point bias_vector(c); - - rand_points = hmc_leapfrog_exponential(walk_len, number_of_points, number_of_points_to_burn, variance, bias_vector, starting_point, HP); - + + rand_points = hmc_leapfrog_exponential(walk_len, number_of_points, number_of_points_to_burn, variance, bias_vector, starting_point, HP); + } - + else { throw std::runtime_error("This function must not be called."); } @@ -187,7 +187,7 @@ void HPolytopeCPP::get_polytope_as_matrices(double* new_A, double* new_b) const new_A[n_si++] = A_to_copy(i, j); } } - + // create the new_b vector VT new_b_temp = HP.get_vec(); for (int i=0; i < n_hyperplanes; i++){ @@ -205,35 +205,35 @@ void HPolytopeCPP::mmcs_initialize(int d, int ess, bool psrf_check, bool paralle double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { - + HP.normalize(); int d = HP.dimension(); VT inner_vec(d); NT max_s; - + for (int i = 0; i < d; i++){ inner_vec(i) = inner_point[i]; } - - Point inner_point2(inner_vec); - CheBall = std::pair(inner_point2, radius); - - HP.set_InnerBall(CheBall); + + Point inner_point2(inner_vec); + CheBall = std::pair(inner_point2, radius); + + HP.set_InnerBall(CheBall); RNGType rng(d); - - if (mmcs_set_of_parameters.request_rounding && mmcs_set_of_parameters.rounding_completed) + + if (mmcs_set_of_parameters.request_rounding && mmcs_set_of_parameters.rounding_completed) { mmcs_set_of_parameters.req_round_temp = false; } - if (mmcs_set_of_parameters.req_round_temp) + if (mmcs_set_of_parameters.req_round_temp) { mmcs_set_of_parameters.nburns = mmcs_set_of_parameters.num_rounding_steps / mmcs_set_of_parameters.window + 1; - } - else + } + else { mmcs_set_of_parameters.nburns = mmcs_set_of_parameters.max_num_samples / mmcs_set_of_parameters.window + 1; } @@ -245,21 +245,21 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { MT TotalRandPoints; if (mmcs_set_of_parameters.parallelism) { - mmcs_set_of_parameters.complete = perform_parallel_mmcs_step(HP, rng, mmcs_set_of_parameters.walk_length, - mmcs_set_of_parameters.Neff, - mmcs_set_of_parameters.max_num_samples, - mmcs_set_of_parameters.window, - Neff_sampled, mmcs_set_of_parameters.total_samples, - mmcs_set_of_parameters.num_rounding_steps, - TotalRandPoints, CheBall.first, mmcs_set_of_parameters.nburns, - mmcs_set_of_parameters.num_threads, + mmcs_set_of_parameters.complete = perform_parallel_mmcs_step(HP, rng, mmcs_set_of_parameters.walk_length, + mmcs_set_of_parameters.Neff, + mmcs_set_of_parameters.max_num_samples, + mmcs_set_of_parameters.window, + Neff_sampled, mmcs_set_of_parameters.total_samples, + mmcs_set_of_parameters.num_rounding_steps, + TotalRandPoints, CheBall.first, mmcs_set_of_parameters.nburns, + mmcs_set_of_parameters.num_threads, mmcs_set_of_parameters.req_round_temp, L); } else { mmcs_set_of_parameters.complete = perform_mmcs_step(HP, rng, mmcs_set_of_parameters.walk_length, mmcs_set_of_parameters.Neff, mmcs_set_of_parameters.max_num_samples, mmcs_set_of_parameters.window, - Neff_sampled, mmcs_set_of_parameters.total_samples, + Neff_sampled, mmcs_set_of_parameters.total_samples, mmcs_set_of_parameters.num_rounding_steps, TotalRandPoints, CheBall.first, mmcs_set_of_parameters.nburns, mmcs_set_of_parameters.req_round_temp, WalkType); } @@ -270,29 +270,29 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { mmcs_set_of_parameters.Neff -= Neff_sampled; std::cout << "phase " << mmcs_set_of_parameters.phase << ": number of correlated samples = " << mmcs_set_of_parameters.total_samples << ", effective sample size = " << Neff_sampled; mmcs_set_of_parameters.total_neff += Neff_sampled; - + mmcs_set_of_parameters.samples.conservativeResize(d, mmcs_set_of_parameters.total_number_of_samples_in_P0 + mmcs_set_of_parameters.total_samples); for (int i = 0; i < mmcs_set_of_parameters.total_samples; i++) { - mmcs_set_of_parameters.samples.col(i + mmcs_set_of_parameters.total_number_of_samples_in_P0) = + mmcs_set_of_parameters.samples.col(i + mmcs_set_of_parameters.total_number_of_samples_in_P0) = mmcs_set_of_parameters.T * TotalRandPoints.row(i).transpose() + mmcs_set_of_parameters.T_shift; } - N = mmcs_set_of_parameters.total_number_of_samples_in_P0 + mmcs_set_of_parameters.total_samples; + N = mmcs_set_of_parameters.total_number_of_samples_in_P0 + mmcs_set_of_parameters.total_samples; mmcs_set_of_parameters.total_number_of_samples_in_P0 += mmcs_set_of_parameters.total_samples; - if (!mmcs_set_of_parameters.complete) + if (!mmcs_set_of_parameters.complete) { - if (mmcs_set_of_parameters.request_rounding && !mmcs_set_of_parameters.rounding_completed) + if (mmcs_set_of_parameters.request_rounding && !mmcs_set_of_parameters.rounding_completed) { VT shift(d), s(d); MT V(d, d), S(d, d), round_mat; - for (int i = 0; i < d; ++i) + for (int i = 0; i < d; ++i) { shift(i) = TotalRandPoints.col(i).mean(); } - for (int i = 0; i < mmcs_set_of_parameters.total_samples; ++i) + for (int i = 0; i < mmcs_set_of_parameters.total_samples; ++i) { TotalRandPoints.row(i) = TotalRandPoints.row(i) - shift.transpose(); } @@ -300,18 +300,18 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { Eigen::BDCSVD svd(TotalRandPoints, Eigen::ComputeFullV); s = svd.singularValues() / svd.singularValues().minCoeff(); - if (s.maxCoeff() >= 2.0) + if (s.maxCoeff() >= 2.0) { - for (int i = 0; i < s.size(); ++i) + for (int i = 0; i < s.size(); ++i) { - if (s(i) < 2.0) + if (s(i) < 2.0) { s(i) = 1.0; } } V = svd.matrixV(); - } - else + } + else { s = VT::Ones(d); V = MT::Identity(d, d); @@ -328,16 +328,16 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { std::cout << ", ratio of the maximum singilar value over the minimum singular value = " << max_s << std::endl; - if (max_s <= mmcs_set_of_parameters.s_cutoff || mmcs_set_of_parameters.round_it > mmcs_set_of_parameters.num_its) + if (max_s <= mmcs_set_of_parameters.s_cutoff || mmcs_set_of_parameters.round_it > mmcs_set_of_parameters.num_its) { mmcs_set_of_parameters.rounding_completed = true; } } - else + else { std::cout<<"\n"; } - } + } else if (!mmcs_set_of_parameters.psrf_check) { NT max_psrf = univariate_psrf(mmcs_set_of_parameters.samples).maxCoeff(); @@ -346,7 +346,7 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { std::cout<<"\n\n"; return 1.5; } - else + else { TotalRandPoints.resize(0, 0); NT max_psrf = univariate_psrf(mmcs_set_of_parameters.samples).maxCoeff(); @@ -360,7 +360,7 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { std::cerr << "\n [1]maximum marginal PSRF: " << max_psrf << std::endl; while (max_psrf > 1.1 && mmcs_set_of_parameters.total_neff >= mmcs_set_of_parameters.fixed_Neff) { - + mmcs_set_of_parameters.Neff += mmcs_set_of_parameters.store_ess(mmcs_set_of_parameters.skip_phase); mmcs_set_of_parameters.total_neff -= mmcs_set_of_parameters.store_ess(mmcs_set_of_parameters.skip_phase); @@ -369,7 +369,7 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { MT S = mmcs_set_of_parameters.samples; mmcs_set_of_parameters.samples.resize(d, mmcs_set_of_parameters.total_number_of_samples_in_P0); - mmcs_set_of_parameters.samples = + mmcs_set_of_parameters.samples = S.block(0, mmcs_set_of_parameters.store_nsamples(mmcs_set_of_parameters.skip_phase), d, mmcs_set_of_parameters.total_number_of_samples_in_P0); mmcs_set_of_parameters.skip_phase++; @@ -392,7 +392,6 @@ double HPolytopeCPP::mmcs_step(double* inner_point, double radius, int &N) { return 0.0; } - void HPolytopeCPP::get_mmcs_samples(double* T_matrix, double* T_shift, double* samples) { int n_variables = HP.dimension(); @@ -403,7 +402,7 @@ void HPolytopeCPP::get_mmcs_samples(double* T_matrix, double* T_shift, double* s T_matrix[t_mat_index++] = mmcs_set_of_parameters.T(i, j); } } - + // create the shift vector for (int i = 0; i < n_variables; i++){ T_shift[i] = mmcs_set_of_parameters.T_shift[i]; @@ -430,13 +429,13 @@ void HPolytopeCPP::apply_rounding(int rounding_method, double* new_A, double* ne auto P(HP); RNGType rng(P.dimension()); P.normalize(); - - - + + + // read the inner point provided by the user and the radius int d = P.dimension(); VT inner_vec(d); - + for (int i = 0; i < d; i++){ inner_vec(i) = inner_point[i]; } @@ -444,7 +443,7 @@ void HPolytopeCPP::apply_rounding(int rounding_method, double* new_A, double* ne Point inner_point2(inner_vec); CheBall = std::pair(inner_point2, radius); P.set_InnerBall(CheBall); - + // set the output variable of the rounding step round_result round_res; @@ -453,8 +452,8 @@ void HPolytopeCPP::apply_rounding(int rounding_method, double* new_A, double* ne // run the rounding method if (rounding_method == 1) { // max ellipsoid - round_res = max_inscribed_ellipsoid_rounding(P, CheBall.first); - + round_res = inscribed_ellipsoid_rounding(P, CheBall.first); + } else if (rounding_method == 2) { // isotropization round_res = svd_rounding(P, CheBall, 1, rng); } else if (rounding_method == 3) { // min ellipsoid diff --git a/dingo/bindings/bindings.h b/dingo/bindings/bindings.h index 2d299cbe..eb4dcb20 100644 --- a/dingo/bindings/bindings.h +++ b/dingo/bindings/bindings.h @@ -38,7 +38,7 @@ // for rounding #include "preprocess/min_sampling_covering_ellipsoid_rounding.hpp" #include "preprocess/svd_rounding.hpp" -#include "preprocess/max_inscribed_ellipsoid_rounding.hpp" +#include "preprocess/inscribed_ellipsoid_rounding.hpp" typedef double NT; typedef Cartesian Kernel; @@ -140,7 +140,7 @@ class HPolytopeCPP{ // the apply_sampling() function double apply_sampling(int walk_len, int number_of_points, int number_of_points_to_burn, - int method, double* inner_point, double radius, double* samples, double variance_value, double* bias_vector); + char* method, double* inner_point, double radius, double* samples, double variance_value, double* bias_vector); void mmcs_initialize(int d, int ess, bool psrf_check, bool parallelism, int num_threads); diff --git a/dingo/volestipy.pyx b/dingo/volestipy.pyx index 300f0842..b1259684 100644 --- a/dingo/volestipy.pyx +++ b/dingo/volestipy.pyx @@ -1,6 +1,6 @@ # This is a cython wrapper for the C++ library volesti # volesti (volume computation and sampling library) - + # Copyright (c) 2012-2021 Vissarion Fisikopoulos # Copyright (c) 2018-2021 Apostolos Chalkis # Copyright (c) 2020-2021 Pedro Zuidberg Dos Martires @@ -54,8 +54,8 @@ cdef extern from "bindings.h": # Random sampling double apply_sampling(int walk_len, int number_of_points, int number_of_points_to_burn, \ - int method, double* inner_point, double radius, double* samples, double variance_value, double* bias_vector) - + char* method, double* inner_point, double radius, double* samples, double variance_value, double* bias_vector) + # Initialize the parameters for the (m)ultiphase (m)onte (c)arlo (s)ampling algorithm void mmcs_initialize(unsigned int d, int ess, int psrf_check, int parallelism, int num_threads); @@ -124,38 +124,13 @@ cdef class HPolytope: # Get max inscribed ball for the initial polytope temp_center, radius = inner_ball(self._A, self._b, solver) - + cdef double[::1] inner_point_for_c = np.asarray(temp_center) - + cdef double[::1] bias_vector_ = np.asarray(bias_vector) - - if method == 'cdhr': - int_method = 1 - elif method == 'rdhr': - int_method = 2 - elif method == 'billiard_walk': - int_method = 3 - elif method == 'ball_walk': - int_method = 4 - elif method == 'dikin_walk': - int_method = 5 - elif method == 'john_walk': - int_method = 6 - elif method == 'vaidya_walk': - int_method = 7 - elif method == 'gaussian_hmc_walk': - int_method = 8 - elif method == 'exponential_hmc_walk': - int_method = 9 - elif method == 'hmc_leapfrog_gaussian': - int_method = 10 - elif method == 'hmc_leapfrog_exponential': - int_method = 11 - else: - raise RuntimeError("Uknown MCMC sampling method") - + self.polytope_cpp.apply_sampling(walk_len, number_of_points, number_of_points_to_burn, \ - int_method, &inner_point_for_c[0], radius, &samples[0,0], variance_value, &bias_vector_[0]) + method, &inner_point_for_c[0], radius, &samples[0,0], variance_value, &bias_vector_[0]) return np.asarray(samples) @@ -172,12 +147,12 @@ cdef class HPolytope: cdef double[:,::1] T_matrix = np.zeros((n_variables, n_variables), dtype=np.float64, order="C") cdef double[::1] shift = np.zeros((n_variables), dtype=np.float64, order="C") cdef double round_value - + # Get max inscribed ball for the initial polytope center, radius = inner_ball(self._A, self._b, solver) - + cdef double[::1] inner_point_for_c = np.asarray(center) - + if rounding_method == 'john_position': int_method = 1 elif rounding_method == 'isotropic_position': @@ -190,8 +165,8 @@ cdef class HPolytope: self.polytope_cpp.apply_rounding(int_method, &new_A[0,0], &new_b[0], &T_matrix[0,0], &shift[0], round_value, &inner_point_for_c[0], radius) return np.asarray(new_A),np.asarray(new_b),np.asarray(T_matrix),np.asarray(shift),np.asarray(round_value) - - + + # (m)ultiphase (m)onte (c)arlo (s)ampling algorithm to generate steady states of a metabolic network def mmcs(self, ess = 1000, psrf_check = True, parallelism = False, num_threads = 2, solver = None): @@ -205,7 +180,7 @@ cdef class HPolytope: cdef int N_ess = ess cdef bint check_psrf = bool(psrf_check) # restrict variables to {0,1} using Python's rules cdef bint parallel = bool(parallelism) - + self.polytope_cpp.mmcs_initialize(n_variables, ess, check_psrf, parallel, num_threads) # Get max inscribed ball for the initial polytope @@ -215,14 +190,14 @@ cdef class HPolytope: while True: check = self.polytope_cpp.mmcs_step(&inner_point_for_c[0], radius, N_samples) - + if check > 1.0 and check < 2.0: break self.polytope_cpp.get_polytope_as_matrices(&new_A[0,0], &new_b[0]) new_temp_c, radius = inner_ball(np.asarray(new_A), np.asarray(new_b), solver) inner_point_for_c = np.asarray(new_temp_c) - + cdef double[:,::1] samples = np.zeros((n_variables, N_samples), dtype=np.float64, order="C") self.polytope_cpp.get_mmcs_samples(&T_matrix[0,0], &T_shift[0], &samples[0,0]) self.polytope_cpp.get_polytope_as_matrices(&new_A[0,0], &new_b[0]) diff --git a/setup.py b/setup.py index c3a90e1e..77ec7ec7 100755 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ source_directory_list = ["dingo", join("dingo", "bindings")] -compiler_args = ["-std=c++11", "-O3", "-DBOOST_NO_AUTO_PTR", "-ldl", "-lm", "-fopenmp"] +compiler_args = ["-std=c++17", "-O3", "-DBOOST_NO_AUTO_PTR", "-ldl", "-lm", "-fopenmp"] lp_solve_compiler_args = ["-DYY_NEVER_INTERACTIVE", "-DLoadInverseLib=0", "-DLoadLanguageLib=0", "-DRoleIsExternalInvEngine", "-DINVERSE_ACTIVE=3", "-DLoadableBlasLib=0"] diff --git a/volesti b/volesti index 93bbc500..7c967086 160000 --- a/volesti +++ b/volesti @@ -1 +1 @@ -Subproject commit 93bbc5008b8ec4682fd20702e0531e4dc889e963 +Subproject commit 7c967086f2bbbc0b4c62fbd82c54ef915b86fd9c From 658ddfaf23a940fe9f4697467c96ddb7643b2667 Mon Sep 17 00:00:00 2001 From: akisschinas Date: Tue, 22 Oct 2024 16:05:11 +0300 Subject: [PATCH 2/4] Add mmcs method to sampling --- dingo/bindings/bindings.cpp | 23 +++++++++++++++-------- dingo/bindings/bindings.h | 3 ++- dingo/volestipy.pyx | 9 ++++++--- volesti | 2 +- 4 files changed, 24 insertions(+), 13 deletions(-) diff --git a/dingo/bindings/bindings.cpp b/dingo/bindings/bindings.cpp index 881b1f31..ab86b085 100644 --- a/dingo/bindings/bindings.cpp +++ b/dingo/bindings/bindings.cpp @@ -91,7 +91,8 @@ double HPolytopeCPP::apply_sampling(int walk_len, double radius, double* samples, double variance_value, - double* bias_vector_){ + double* bias_vector_, + int ess){ RNGType rng(HP.dimension()); HP.normalize(); @@ -133,6 +134,11 @@ double HPolytopeCPP::apply_sampling(int walk_len, } else if (strcmp(method, "vaidya_walk")) { // vaidya walk uniform_sampling(rand_points, HP, rng, walk_len, number_of_points, starting_point, number_of_points_to_burn); + } else if (strcmp(method, "mmcs")) { // vaidya walk + MT S; + int total_ess; + mmcs(HP, ess, S, total_ess, walk_len, rng); + samples = S.data(); } else if (strcmp(method, "gaussian_hmc_walk")) { // Gaussian sampling with exact HMC walk NT a = NT(1)/(NT(2)*variance); gaussian_sampling(rand_points, HP, rng, walk_len, number_of_points, a, @@ -162,14 +168,15 @@ double HPolytopeCPP::apply_sampling(int walk_len, throw std::runtime_error("This function must not be called."); } - // The following block of code allows us to copy the sampled points - auto n_si=0; - for (auto it_s = rand_points.cbegin(); it_s != rand_points.cend(); it_s++){ - for (auto i = 0; i != it_s->dimension(); i++){ - samples[n_si++] = (*it_s)[i]; - } + if (!strcmp(method, "mmcs")) { + // The following block of code allows us to copy the sampled points + auto n_si=0; + for (auto it_s = rand_points.cbegin(); it_s != rand_points.cend(); it_s++){ + for (auto i = 0; i != it_s->dimension(); i++){ + samples[n_si++] = (*it_s)[i]; + } + } } - return 0.0; } ////////// End of "generate_samples()" ////////// diff --git a/dingo/bindings/bindings.h b/dingo/bindings/bindings.h index eb4dcb20..553ea99e 100644 --- a/dingo/bindings/bindings.h +++ b/dingo/bindings/bindings.h @@ -140,7 +140,8 @@ class HPolytopeCPP{ // the apply_sampling() function double apply_sampling(int walk_len, int number_of_points, int number_of_points_to_burn, - char* method, double* inner_point, double radius, double* samples, double variance_value, double* bias_vector); + char* method, double* inner_point, double radius, double* samples, + double variance_value, double* bias_vector, int ess); void mmcs_initialize(int d, int ess, bool psrf_check, bool parallelism, int num_threads); diff --git a/dingo/volestipy.pyx b/dingo/volestipy.pyx index b1259684..6e8d3a98 100644 --- a/dingo/volestipy.pyx +++ b/dingo/volestipy.pyx @@ -54,7 +54,8 @@ cdef extern from "bindings.h": # Random sampling double apply_sampling(int walk_len, int number_of_points, int number_of_points_to_burn, \ - char* method, double* inner_point, double radius, double* samples, double variance_value, double* bias_vector) + char* method, double* inner_point, double radius, double* samples, \ + double variance_value, double* bias_vector, int ess) # Initialize the parameters for the (m)ultiphase (m)onte (c)arlo (s)ampling algorithm void mmcs_initialize(unsigned int d, int ess, int psrf_check, int parallelism, int num_threads); @@ -117,7 +118,8 @@ cdef class HPolytope: raise Exception('"{}" is not implemented to compute volume. Available methods are: {}'.format(vol_method, volume_methods)) # Likewise, the generate_samples() function - def generate_samples(self, method, number_of_points, number_of_points_to_burn, walk_len, variance_value, bias_vector, solver = None): + def generate_samples(self, method, number_of_points, number_of_points_to_burn, walk_len, + variance_value, bias_vector, solver = None, ess = 1000): n_variables = self._A.shape[1] cdef double[:,::1] samples = np.zeros((number_of_points, n_variables), dtype = np.float64, order = "C") @@ -130,7 +132,8 @@ cdef class HPolytope: cdef double[::1] bias_vector_ = np.asarray(bias_vector) self.polytope_cpp.apply_sampling(walk_len, number_of_points, number_of_points_to_burn, \ - method, &inner_point_for_c[0], radius, &samples[0,0], variance_value, &bias_vector_[0]) + method, &inner_point_for_c[0], radius, &samples[0,0], \ + variance_value, &bias_vector_[0], ess) return np.asarray(samples) diff --git a/volesti b/volesti index 7c967086..2626ef48 160000 --- a/volesti +++ b/volesti @@ -1 +1 @@ -Subproject commit 7c967086f2bbbc0b4c62fbd82c54ef915b86fd9c +Subproject commit 2626ef4874bdac20103568c8d71fbeb521c680b8 From b714027145508767279d98b79db21f7e9e2af06e Mon Sep 17 00:00:00 2001 From: akisschinas Date: Tue, 22 Oct 2024 16:46:29 +0300 Subject: [PATCH 3/4] Add tests that call mmcs from sampling function --- dingo/PolytopeSampler.py | 8 +-- dingo/bindings/bindings.cpp | 4 +- tests/sampling_no_multiphase.py | 123 ++++++++++---------------------- 3 files changed, 45 insertions(+), 90 deletions(-) diff --git a/dingo/PolytopeSampler.py b/dingo/PolytopeSampler.py index f6d76d52..27f37853 100644 --- a/dingo/PolytopeSampler.py +++ b/dingo/PolytopeSampler.py @@ -161,7 +161,7 @@ def generate_steady_states( return steady_states def generate_steady_states_no_multiphase( - self, method = 'billiard_walk', n=1000, burn_in=0, thinning=1, variance=1.0, bias_vector=None + self, method = 'billiard_walk', n=1000, burn_in=0, thinning=1, variance=1.0, bias_vector=None, ess=1000 ): """A member function to sample steady states. @@ -181,7 +181,7 @@ def generate_steady_states_no_multiphase( else: bias_vector = bias_vector.astype('float64') - samples = P.generate_samples(method.encode('utf-8'), n, burn_in, thinning, variance, bias_vector, self._parameters["solver"]) + samples = P.generate_samples(method.encode('utf-8'), n, burn_in, thinning, variance, bias_vector, self._parameters["solver"], ess) samples_T = samples.T steady_states = map_samples_to_steady_states( @@ -216,7 +216,7 @@ def sample_from_polytope( @staticmethod def sample_from_polytope_no_multiphase( - A, b, method = 'billiard_walk', n=1000, burn_in=0, thinning=1, variance=1.0, bias_vector=None, solver=None + A, b, method = 'billiard_walk', n=1000, burn_in=0, thinning=1, variance=1.0, bias_vector=None, solver=None, ess=1000 ): """A static function to sample from a full dimensional polytope with an MCMC method. @@ -235,7 +235,7 @@ def sample_from_polytope_no_multiphase( P = HPolytope(A, b) - samples = P.generate_samples(method.encode('utf-8'), n, burn_in, thinning, variance, bias_vector, solver) + samples = P.generate_samples(method.encode('utf-8'), n, burn_in, thinning, variance, bias_vector, solver, ess) samples_T = samples.T return samples_T diff --git a/dingo/bindings/bindings.cpp b/dingo/bindings/bindings.cpp index ab86b085..0e3a62f4 100644 --- a/dingo/bindings/bindings.cpp +++ b/dingo/bindings/bindings.cpp @@ -137,7 +137,9 @@ double HPolytopeCPP::apply_sampling(int walk_len, } else if (strcmp(method, "mmcs")) { // vaidya walk MT S; int total_ess; - mmcs(HP, ess, S, total_ess, walk_len, rng); + //TODO: avoid passing polytopes as non-const references + const Hpolytope HP_const = HP; + mmcs(HP_const, ess, S, total_ess, walk_len, rng); samples = S.data(); } else if (strcmp(method, "gaussian_hmc_walk")) { // Gaussian sampling with exact HMC walk NT a = NT(1)/(NT(2)*variance); diff --git a/tests/sampling_no_multiphase.py b/tests/sampling_no_multiphase.py index 4f074504..86e304f3 100644 --- a/tests/sampling_no_multiphase.py +++ b/tests/sampling_no_multiphase.py @@ -14,6 +14,41 @@ from dingo import MetabolicNetwork, PolytopeSampler from dingo.pyoptinterface_based_impl import set_default_solver +def sampling(model, testing_class): + sampler = PolytopeSampler(model) + + #Gaussian hmc sampling + steady_states = sampler.generate_steady_states_no_multiphase(method = 'mmcs', ess=1000) + + testing_class.assertTrue( steady_states.shape[0] == 95 ) + testing_class.assertTrue( steady_states.shape[1] == 1000 ) + + #Gaussian hmc sampling + steady_states = sampler.generate_steady_states_no_multiphase(method = 'gaussian_hmc_walk', n=500) + + testing_class.assertTrue( steady_states.shape[0] == 95 ) + testing_class.assertTrue( steady_states.shape[1] == 500 ) + + #exponential hmc sampling + steady_states = sampler.generate_steady_states_no_multiphase(method = 'exponential_hmc_walk', n=500, variance=50) + + testing_class.assertTrue( steady_states.shape[0] == 95 ) + testing_class.assertTrue( steady_states.shape[1] == 500 ) + + #hmc sampling with Gaussian distribution + steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_gaussian', n=500) + + testing_class.assertTrue( steady_states.shape[0] == 95 ) + testing_class.assertTrue( steady_states.shape[1] == 500 ) + + #hmc sampling with exponential distribution + steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_exponential', n=500, variance=50) + + testing_class.assertTrue( steady_states.shape[0] == 95 ) + testing_class.assertTrue( steady_states.shape[1] == 500 ) + + #steady_states[12].mean() seems to have a lot of discrepancy between experiments, so we won't check the mean for now + #self.assertTrue( abs( steady_states[12].mean() - 2.504 ) < 1e-03 ) class TestSampling(unittest.TestCase): @@ -21,101 +56,19 @@ def test_sample_json(self): input_file_json = os.getcwd() + "/ext_data/e_coli_core.json" model = MetabolicNetwork.from_json( input_file_json ) - sampler = PolytopeSampler(model) - - #Gaussian hmc sampling - steady_states = sampler.generate_steady_states_no_multiphase(method = 'gaussian_hmc_walk', n=500) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #exponential hmc sampling - steady_states = sampler.generate_steady_states_no_multiphase(method = 'exponential_hmc_walk', n=500, variance=50) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #hmc sampling with Gaussian distribution - steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_gaussian', n=500) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #hmc sampling with exponential distribution - steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_exponential', n=500, variance=50) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #steady_states[12].mean() seems to have a lot of discrepancy between experiments, so we won't check the mean for now - #self.assertTrue( abs( steady_states[12].mean() - 2.504 ) < 1e-03 ) + sampling(model, self) def test_sample_mat(self): input_file_mat = os.getcwd() + "/ext_data/e_coli_core.mat" model = MetabolicNetwork.from_mat(input_file_mat) - sampler = PolytopeSampler(model) - - #gaussian hmc sampling - steady_states = sampler.generate_steady_states_no_multiphase(method = 'gaussian_hmc_walk', n=500) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #exponential hmc sampling - steady_states = sampler.generate_steady_states_no_multiphase(method = 'exponential_hmc_walk', n=500, variance=50) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #hmc sampling with Gaussian distribution - steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_gaussian', n=500) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #hmc sampling with exponential distribution - steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_exponential', n=500, variance=50) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #steady_states[12].mean() seems to have a lot of discrepancy between experiments, so we won't check the mean for now - #self.assertTrue( abs( steady_states[12].mean() - 2.504 ) < 1e-03 ) - + sampling(model, self) def test_sample_sbml(self): input_file_sbml = os.getcwd() + "/ext_data/e_coli_core.xml" model = MetabolicNetwork.from_sbml( input_file_sbml ) - sampler = PolytopeSampler(model) - - #gaussian hmc sampling - steady_states = sampler.generate_steady_states_no_multiphase(method = 'gaussian_hmc_walk', n=500) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #exponential hmc sampling - steady_states = sampler.generate_steady_states_no_multiphase(method = 'exponential_hmc_walk', n=500, variance=50) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #hmc sampling with Gaussian distribution - steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_gaussian', n=500) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #hmc sampling with exponential distribution - steady_states = sampler.generate_steady_states_no_multiphase(method = 'hmc_leapfrog_exponential', n=500, variance=50) - - self.assertTrue( steady_states.shape[0] == 95 ) - self.assertTrue( steady_states.shape[1] == 500 ) - - #steady_states[12].mean() seems to have a lot of discrepancy between experiments, so we won't check the mean for now - #self.assertTrue( abs( steady_states[12].mean() - 2.504 ) < 1e-03 ) + sampling(model, self) From eeeaf6ad4301885a49adbeb30e86264839a5a84c Mon Sep 17 00:00:00 2001 From: akisschinas Date: Tue, 22 Oct 2024 17:15:57 +0300 Subject: [PATCH 4/4] Update volesti submodule --- volesti | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/volesti b/volesti index 2626ef48..c3109bba 160000 --- a/volesti +++ b/volesti @@ -1 +1 @@ -Subproject commit 2626ef4874bdac20103568c8d71fbeb521c680b8 +Subproject commit c3109bba06a9b623446bdde4c5fadb02722de876