From 085f90efb28bcf22e5f340f401af97be38d3ba9c Mon Sep 17 00:00:00 2001 From: Giomara Date: Fri, 12 Jul 2024 11:06:40 +0300 Subject: [PATCH] emo 2023 --- desdeo_emo/EAs/BaseEA.py | 5 +- desdeo_emo/EAs/NSGAIIINUMS.py | 92 + desdeo_emo/EAs/RNSGAIII.py | 30 +- desdeo_emo/EAs/__init__.py | 2 + desdeo_emo/selection/NUMS_select.py | 359 ++ desdeo_emo/selection/RNSGAIII_select.py | 122 +- desdeo_emo/selection/SelectionBase.py | 20 +- desdeo_emo/selection/__init__.py | 3 +- desdeo_emo/utilities/ReferenceVectors.py | 65 +- desdeo_emo/utilities/preference_converters.py | 2 +- docs/notebooks/1_5.txt | 4 + docs/notebooks/5.txt | 34 + docs/notebooks/7.txt | 34 + docs/notebooks/9.txt | 34 + docs/notebooks/ExampeRNSGAIII (copy).ipynb | 1357 +++++ docs/notebooks/ExampeRNSGAIII.ipynb | 528 +- docs/notebooks/ExampleMOEAD.ipynb | 140 +- docs/notebooks/Proof_concept_UPEMO.ipynb | 4434 +++++++++++++++++ docs/notebooks/River_Pollution.ipynb | 111 +- docs/notebooks/baseADM.py | 127 + docs/notebooks/extendedADM_NSGA_RNSGA_NUMS.py | 417 ++ .../extendedADM_NSGA_RNSGA_NUMS_PS.py | 324 ++ .../extendedADM_NSGA_RNSGA_NUMS_PS_5.py | 324 ++ .../extendedADM_NSGA_RNSGA_NUMS_PS_7.py | 324 ++ .../extendedADM_NSGA_RNSGA_NUMS_PS_9.py | 324 ++ .../extendedADM_NSGA_RNSGA_NUMS_Ranges.py | 398 ++ .../extendedADM_NSGAvsRVEA (copy).py | 320 ++ docs/notebooks/extendedADM_NSGAvsRVEA.py | 329 ++ ...ndedADM_NSGAvsRVEARanges (another copy).py | 310 ++ .../extendedADM_NSGAvsRVEARanges (copy).py | 247 + .../notebooks/extendedADM_NSGAvsRVEARanges.py | 310 ++ docs/notebooks/extendedADM_NSGAvsRVEA_PS.py | 283 ++ .../extendedADM_NSGAvsRVEA_Ranges.py | 320 ++ docs/notebooks/extendedADM_RVEA_RPvsRanges.py | 328 ++ docs/notebooks/generatePreference.py | 415 ++ docs/notebooks/rmetric.py | 222 + docs/notebooks/visualize_adm_test_dtlz.py | 280 ++ 37 files changed, 12441 insertions(+), 537 deletions(-) create mode 100644 desdeo_emo/EAs/NSGAIIINUMS.py create mode 100644 desdeo_emo/selection/NUMS_select.py create mode 100644 docs/notebooks/1_5.txt create mode 100644 docs/notebooks/5.txt create mode 100644 docs/notebooks/7.txt create mode 100644 docs/notebooks/9.txt create mode 100644 docs/notebooks/ExampeRNSGAIII (copy).ipynb create mode 100644 docs/notebooks/Proof_concept_UPEMO.ipynb create mode 100644 docs/notebooks/baseADM.py create mode 100644 docs/notebooks/extendedADM_NSGA_RNSGA_NUMS.py create mode 100644 docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS.py create mode 100644 docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_5.py create mode 100644 docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_7.py create mode 100644 docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_9.py create mode 100644 docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_Ranges.py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEA (copy).py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEA.py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEARanges (another copy).py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEARanges (copy).py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEARanges.py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEA_PS.py create mode 100644 docs/notebooks/extendedADM_NSGAvsRVEA_Ranges.py create mode 100644 docs/notebooks/extendedADM_RVEA_RPvsRanges.py create mode 100644 docs/notebooks/generatePreference.py create mode 100644 docs/notebooks/rmetric.py create mode 100644 docs/notebooks/visualize_adm_test_dtlz.py diff --git a/desdeo_emo/EAs/BaseEA.py b/desdeo_emo/EAs/BaseEA.py index fc7146ac..434cafc8 100644 --- a/desdeo_emo/EAs/BaseEA.py +++ b/desdeo_emo/EAs/BaseEA.py @@ -409,8 +409,8 @@ def request_plot(self) -> SimplePlotRequest: columns=self.population.problem.get_objective_names(), ) dimensions_data.loc["minimize"] = self.population.problem._max_multiplier - dimensions_data.loc["ideal"] = self.population.ideal_objective_vector - dimensions_data.loc["nadir"] = self.population.nadir_objective_vector + dimensions_data.loc["ideal"] = self.population.problem.ideal_fitness + dimensions_data.loc["nadir"] = self.population.problem.nadir_fitness data = pd.DataFrame( self.population.objectives, columns=self.population.problem.objective_names ) @@ -419,7 +419,6 @@ def request_plot(self) -> SimplePlotRequest: ) def request_preferences(self) -> Type[BaseRequest]: - if self.interact is False: return if self._interaction_location == "Problem": diff --git a/desdeo_emo/EAs/NSGAIIINUMS.py b/desdeo_emo/EAs/NSGAIIINUMS.py new file mode 100644 index 00000000..26177329 --- /dev/null +++ b/desdeo_emo/EAs/NSGAIIINUMS.py @@ -0,0 +1,92 @@ +from typing import Dict + +from desdeo_emo.EAs.BaseEA import BaseDecompositionEA +from desdeo_emo.population.Population import Population +from desdeo_emo.selection.NUMS_select import NUMS_select +from desdeo_problem import MOProblem +import numpy as np + +class NSGAIIINUMS(BaseDecompositionEA): + """Python Implementation of NSGA-III. Based on the pymoo package. + + Most of the relevant code is contained in the super class. This class just assigns + the NSGAIII selection operator to BaseDecompositionEA. + + Parameters + ---------- + problem : MOProblem + The problem class object specifying the details of the problem. + population_size : int, optional + The desired population size, by default None, which sets up a default value + of population size depending upon the dimensionaly of the problem. + population_params : Dict, optional + The parameters for the population class, by default None. See + desdeo_emo.population.Population for more details. + initial_population : Population, optional + An initial population class, by default None. Use this if you want to set up + a specific starting population, such as when the output of one EA is to be + used as the input of another. + lattice_resolution : int, optional + The number of divisions along individual axes in the objective space to be + used while creating the reference vector lattice by the simplex lattice + design. By default None + selection_type : str, optional + One of ["mean", "optimistic", "robust"]. To be used in data-driven optimization. + To be used only with surrogate models which return an "uncertainity" factor. + Using "mean" is equivalent to using the mean predicted values from the surrogate + models and is the default case. + Using "optimistic" results in using (mean - uncertainity) values from the + the surrogate models as the predicted value (in case of minimization). It is + (mean + uncertainity for maximization). + Using "robust" is the opposite of using "optimistic". + a_priori : bool, optional + A bool variable defining whether a priori preference is to be used or not. + By default False + interact : bool, optional + A bool variable defining whether interactive preference is to be used or + not. By default False + n_iterations : int, optional + The total number of iterations to be run, by default 10. This is not a hard + limit and is only used for an internal counter. + n_gen_per_iter : int, optional + The total number of generations in an iteration to be run, by default 100. + This is not a hard limit and is only used for an internal counter. + total_function_evaluations :int, optional + Set an upper limit to the total number of function evaluations. When set to + zero, this argument is ignored and other termination criteria are used. + """ + + def __init__( + self, + problem: MOProblem, + population_size: int = None, + population_params: Dict = None, + n_survive: int = None, + initial_population: Population = None, + lattice_resolution: int = None, + selection_type: str = None, + interact: bool = False, + use_surrogates: bool = False, + n_iterations: int = 10, + n_gen_per_iter: int = 100, + total_function_evaluations: int = 0, + keep_archive: bool = False, + ): + super().__init__( + problem=problem, + population_size=population_size, + population_params=population_params, + initial_population=initial_population, + lattice_resolution=lattice_resolution, + interact=interact, + use_surrogates=use_surrogates, + n_iterations=n_iterations, + n_gen_per_iter=n_gen_per_iter, + total_function_evaluations=total_function_evaluations, + keep_archive=keep_archive, + ) + self.selection_type = selection_type + selection_operator = NUMS_select( + self.population, n_survive, selection_type=selection_type + ) + self.selection_operator = selection_operator diff --git a/desdeo_emo/EAs/RNSGAIII.py b/desdeo_emo/EAs/RNSGAIII.py index 561e748c..fd5ee41d 100644 --- a/desdeo_emo/EAs/RNSGAIII.py +++ b/desdeo_emo/EAs/RNSGAIII.py @@ -1,13 +1,10 @@ from typing import Dict -import numpy as np - from desdeo_emo.EAs.BaseEA import BaseDecompositionEA from desdeo_emo.population.Population import Population from desdeo_emo.selection.RNSGAIII_select import RNSGAIII_select from desdeo_problem import MOProblem - -from scipy.special import comb +import numpy as np class RNSGAIII(BaseDecompositionEA): """Python Implementation of NSGA-III. Based on the pymoo package. @@ -62,8 +59,7 @@ class RNSGAIII(BaseDecompositionEA): def __init__( self, problem: MOProblem, - population_size_per_rp: int, - ref_points: np.array, + population_size: int = None, population_params: Dict = None, n_survive: int = None, initial_population: Population = None, @@ -76,26 +72,9 @@ def __init__( total_function_evaluations: int = 0, keep_archive: bool = False, ): - self.n_ref_points = ref_points.shape[0] - self.pop_size_rp = population_size_per_rp - self.ref_points = ref_points - temp_lattice_resolution = 0 - temp_number_of_vectors = 0 - while True: - temp_lattice_resolution += 1 - temp_number_of_vectors = comb( - temp_lattice_resolution + problem.n_of_objectives - 1, - problem.n_of_objectives - 1, - exact=True, - ) - if temp_number_of_vectors > population_size_per_rp: - break - lattice_resolution = temp_lattice_resolution - 1 - pop_size = (temp_number_of_vectors * self.n_ref_points) + problem.n_of_objectives - super().__init__( problem=problem, - population_size=pop_size, + population_size=population_size, population_params=population_params, initial_population=initial_population, lattice_resolution=lattice_resolution, @@ -108,5 +87,6 @@ def __init__( ) self.selection_type = selection_type selection_operator = RNSGAIII_select( - self.population, self.pop_size_rp, self.ref_points, n_survive, selection_type=selection_type, ) + self.population, n_survive, selection_type=selection_type + ) self.selection_operator = selection_operator diff --git a/desdeo_emo/EAs/__init__.py b/desdeo_emo/EAs/__init__.py index 02538109..5a225bb6 100644 --- a/desdeo_emo/EAs/__init__.py +++ b/desdeo_emo/EAs/__init__.py @@ -12,6 +12,7 @@ "IOPIS_RVEA", "MOEA_D", "RNSGAIII", + "NSGAIIINUMS" ] from desdeo_emo.EAs.BaseEA import BaseEA, BaseDecompositionEA @@ -22,3 +23,4 @@ from desdeo_emo.EAs.IOPIS import IOPIS_NSGAIII, IOPIS_RVEA from desdeo_emo.EAs.MOEAD import MOEA_D from desdeo_emo.EAs.RNSGAIII import RNSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS diff --git a/desdeo_emo/selection/NUMS_select.py b/desdeo_emo/selection/NUMS_select.py new file mode 100644 index 00000000..565abbe9 --- /dev/null +++ b/desdeo_emo/selection/NUMS_select.py @@ -0,0 +1,359 @@ +import numpy as np + +# from pygmo import fast_non_dominated_sorting as nds +from desdeo_tools.utilities import fast_non_dominated_sort +from typing import List, Union +from desdeo_emo.utilities.preference_converters import UPEMO +from desdeo_emo.selection.SelectionBase import InteractiveDecompositionSelectionBase +from desdeo_emo.population.Population import Population +from desdeo_tools.interaction import ( + BoundPreference, + NonPreferredSolutionPreference, + PreferredSolutionPreference, + ReferencePointPreference, + validate_ref_point_data_type, + validate_ref_point_dimensions, + validate_ref_point_with_ideal, +) + +class NUMS_select(InteractiveDecompositionSelectionBase): + """The NSGA-III selection operator. Code is heavily based on the version of nsga3 in + the pymoo package by msu-coinlab. + + Parameters + ---------- + pop : Population + [description] + n_survive : int, optional + [description], by default None + + """ + + def __init__( + self, pop: Population, n_survive: int = None, selection_type: str = None + ): + super().__init__(pop.pop_size, pop.problem.n_of_fitnesses, selection_type) + self.worst_fitness: np.ndarray = -np.full((1, pop.fitness.shape[1]), np.inf) + self.extreme_points: np.ndarray = None + if n_survive is None: + self.n_survive: int = pop.pop_size + if selection_type is None: + selection_type = "mean" + self.selection_type = selection_type + self.ideal: np.ndarray = pop.ideal_fitness_val + + def do(self, pop: Population) -> List[int]: + """Select individuals for mating for NSGA-III. + + Parameters + ---------- + pop : Population + The current population. + + Returns + ------- + List[int] + List of indices of the selected individuals + """ + ref_dirs = self.vectors.values_planar + fitness = self._calculate_fitness(pop) + # Calculating fronts and ranks + # fronts, dl, dc, rank = nds(fitness) + fronts = fast_non_dominated_sort(fitness) + fronts = [np.where(fronts[i])[0] for i in range(len(fronts))] + non_dominated = fronts[0] + fmin = np.amin(fitness, axis=0) + self.ideal = np.amin(np.vstack((self.ideal, fmin)), axis=0) + + # Calculating worst points + self.worst_fitness = np.amax(np.vstack((self.worst_fitness, fitness)), axis=0) + worst_of_population = np.amax(fitness, axis=0) + worst_of_front = np.max(fitness[non_dominated, :], axis=0) + self.extreme_points = self.get_extreme_points_c( + fitness[non_dominated, :], self.ideal, extreme_points=self.extreme_points + ) + nadir_point = self.get_nadir_point( + self.extreme_points, + self.ideal, + self.worst_fitness, + worst_of_population, + worst_of_front, + ) + + # Finding individuals in first 'n' fronts + selection = np.asarray([], dtype=int) + for front_id in range(len(fronts)): + if len(np.concatenate(fronts[: front_id + 1])) < self.n_survive: + continue + else: + fronts = fronts[: front_id + 1] + selection = np.concatenate(fronts) + break + F = fitness[selection] + + last_front = fronts[-1] + + # Selecting individuals from the last acceptable front. + if len(selection) > self.n_survive: + niche_of_individuals, dist_to_niche = self.associate_to_niches( + F, ref_dirs, self.ideal, nadir_point + ) + # if there is only one front + if len(fronts) == 1: + n_remaining = self.n_survive + until_last_front = np.array([], dtype=np.int) + niche_count = np.zeros(len(ref_dirs), dtype=np.int) + + # if some individuals already survived + else: + until_last_front = np.concatenate(fronts[:-1]) + id_until_last_front = list(range(len(until_last_front))) + niche_count = self.calc_niche_count( + len(ref_dirs), niche_of_individuals[id_until_last_front] + ) + n_remaining = self.n_survive - len(until_last_front) + + last_front_selection_id = list(range(len(until_last_front), len(selection))) + if np.any(selection[last_front_selection_id] != last_front): + print("error!!!") + selected_from_last_front = self.niching( + fitness[last_front, :], + n_remaining, + niche_count, + niche_of_individuals[last_front_selection_id], + dist_to_niche[last_front_selection_id], + ) + final_selection = np.concatenate( + (until_last_front, last_front[selected_from_last_front]) + ) + if self.extreme_points is None: + print("Error") + if final_selection is None: + print("Error") + else: + final_selection = selection + return final_selection.astype(int) + + def get_extreme_points_c(self, F, ideal_point, extreme_points=None): + """Taken from pymoo""" + # calculate the asf which is used for the extreme point decomposition + asf = np.eye(F.shape[1]) + asf[asf == 0] = 1e6 + + # add the old extreme points to never loose them for normalization + _F = F + if extreme_points is not None: + _F = np.concatenate([extreme_points, _F], axis=0) + + # use __F because we substitute small values to be 0 + __F = _F - ideal_point + __F[__F < 1e-3] = 0 + + # update the extreme points for the normalization having the highest asf value + # each + F_asf = np.max(__F * asf[:, None, :], axis=2) + I = np.argmin(F_asf, axis=1) + extreme_points = _F[I, :] + return extreme_points + + def get_nadir_point( + self, + extreme_points, + ideal_point, + worst_point, + worst_of_front, + worst_of_population, + ): + LinAlgError = np.linalg.LinAlgError + try: + + # find the intercepts using gaussian elimination + M = extreme_points - ideal_point + b = np.ones(extreme_points.shape[1]) + plane = np.linalg.solve(M, b) + intercepts = 1 / plane + + nadir_point = ideal_point + intercepts + + if ( + not np.allclose(np.dot(M, plane), b) + or np.any(intercepts <= 1e-6) + or np.any(nadir_point > worst_point) + ): + raise LinAlgError() + + except LinAlgError: + nadir_point = worst_of_front + + b = nadir_point - ideal_point <= 1e-6 + nadir_point[b] = worst_of_population[b] + return nadir_point + + def niching(self, F, n_remaining, niche_count, niche_of_individuals, dist_to_niche): + survivors = [] + + # boolean array of elements that are considered for each iteration + mask = np.full(F.shape[0], True) + + while len(survivors) < n_remaining: + + # all niches where new individuals can be assigned to + next_niches_list = np.unique(niche_of_individuals[mask]) + + # pick a niche with minimum assigned individuals - break tie if necessary + next_niche_count = niche_count[next_niches_list] + next_niche = np.where(next_niche_count == next_niche_count.min())[0] + next_niche = next_niches_list[next_niche] + next_niche = next_niche[np.random.randint(0, len(next_niche))] + + # indices of individuals that are considered and assign to next_niche + next_ind = np.where( + np.logical_and(niche_of_individuals == next_niche, mask) + )[0] + + # shuffle to break random tie (equal perp. dist) or select randomly + np.random.shuffle(next_ind) + + if niche_count[next_niche] == 0: + next_ind = next_ind[np.argmin(dist_to_niche[next_ind])] + else: + # already randomized through shuffling + next_ind = next_ind[0] + + mask[next_ind] = False + survivors.append(int(next_ind)) + + niche_count[next_niche] += 1 + + return survivors + + def associate_to_niches( + self, F, ref_dirs, ideal_point, nadir_point, utopian_epsilon=0.0 + ): + utopian_point = ideal_point - utopian_epsilon + + denom = nadir_point - utopian_point + denom[denom == 0] = 1e-12 + + # normalize by ideal point and intercepts + N = (F - utopian_point) / denom + dist_matrix = self.calc_perpendicular_distance(N, ref_dirs) + + niche_of_individuals = np.argmin(dist_matrix, axis=1) + dist_to_niche = dist_matrix[np.arange(F.shape[0]), niche_of_individuals] + + return niche_of_individuals, dist_to_niche + + def calc_niche_count(self, n_niches, niche_of_individuals): + niche_count = np.zeros(n_niches, dtype=np.int) + index, count = np.unique(niche_of_individuals, return_counts=True) + niche_count[index] = count + return niche_count + + def calc_perpendicular_distance(self, N, ref_dirs): + u = np.tile(ref_dirs, (len(N), 1)) + v = np.repeat(N, len(ref_dirs), axis=0) + + norm_u = np.linalg.norm(u, axis=1) + + scalar_proj = np.sum(v * u, axis=1) / norm_u + proj = scalar_proj[:, None] * u / norm_u[:, None] + val = np.linalg.norm(proj - v, axis=1) + matrix = np.reshape(val, (len(N), len(ref_dirs))) + + return matrix + + def _calculate_fitness(self, pop) -> np.ndarray: + if self.selection_type == "mean": + return pop.fitness + if self.selection_type == "optimistic": + return pop.fitness - pop.uncertainity + if self.selection_type == "robust": + return pop.fitness + pop.uncertainity + + + def manage_preferences( + self, + pop: Population, + preference: Union[ + PreferredSolutionPreference, + NonPreferredSolutionPreference, + ReferencePointPreference, + BoundPreference, + None, + ], + ): + + if preference is None: + return self.adapt_RVs(pop.fitness) + if self._interaction_request_id != preference.request_id: + raise ValueError("Wrong request object provided. Request IDs don't match.") + if self.interaction_type == "Reference point": + return self.manage_reference_point(pop, preference) + if self.interaction_type == "Preferred solutions": + return self.manage_preferred_solutions(pop, preference) + if self.interaction_type == "Non-preferred solutions": + return self.manage_non_preferred_solutions(pop, preference) + if self.interaction_type == "Preferred ranges": + return self.manage_preferred_ranges(pop, preference) + raise ValueError("Interaction type not set.") + + def manage_reference_point( + self, pop: Population, preference: ReferencePointPreference + ): + if not isinstance(preference, ReferencePointPreference): + raise TypeError( + "Preference object must be an instance of ReferencePointPreference." + ) + ideal = pop.ideal_fitness_val + refpoint = preference.response.values * pop.problem._max_multiplier + refpoint = refpoint[0] + #unit_ref_point = (refpoint - ideal) / (nadir - ideal) + + refpoint = refpoint - ideal + norm = np.sqrt(np.sum(np.square(refpoint))) + refpoint = refpoint / norm + #print(refpoint) + self.vectors.interactive_adapt_NUMS(refpoint) + #self.vectors.add_edge_vectors() + #self.vectors.neighbouring_angles() + + def manage_preferred_ranges(self, pop: Population, preference: BoundPreference): + if not isinstance(preference, BoundPreference): + raise TypeError("Preference object must be an instance of BoundPreference.") + #print(preference.response) + preference = np.atleast_2d(UPEMO(preference.response.T)) + preference = preference[0] + #print(preference) + ideal = pop.ideal_fitness_val + refpoint = preference * pop.problem._max_multiplier + + #unit_ref_point = (refpoint - ideal) / (nadir - ideal) + + refpoint = refpoint - ideal + norm = np.sqrt(np.sum(np.square(refpoint))) + refpoint = refpoint / norm + #print(refpoint) + self.vectors.interactive_adapt_NUMS(refpoint) + #self.vectors.add_edge_vectors() + #self.vectors.neighbouring_angles() + + def manage_preferred_solutions(self, pop: Population, preference: PreferredSolutionPreference): + if not isinstance(preference, PreferredSolutionPreference): + raise TypeError("Preference object must be an instance of BoundPreference.") + #print(preference.response) + preference = np.atleast_2d(UPEMO(preference.response)) + preference = preference[0] + #print("preference",preference) + ideal = pop.ideal_fitness_val + refpoint = preference * pop.problem._max_multiplier + + #unit_ref_point = (refpoint - ideal) / (nadir - ideal) + + refpoint = refpoint - ideal + norm = np.sqrt(np.sum(np.square(refpoint))) + refpoint = refpoint / norm + #print(refpoint) + self.vectors.interactive_adapt_NUMS(refpoint) + #self.vectors.add_edge_vectors() + #self.vectors.neighbouring_angles() \ No newline at end of file diff --git a/desdeo_emo/selection/RNSGAIII_select.py b/desdeo_emo/selection/RNSGAIII_select.py index b5f89d04..1f9248cb 100644 --- a/desdeo_emo/selection/RNSGAIII_select.py +++ b/desdeo_emo/selection/RNSGAIII_select.py @@ -2,11 +2,19 @@ # from pygmo import fast_non_dominated_sorting as nds from desdeo_tools.utilities import fast_non_dominated_sort -from typing import List +from typing import List, Union +from desdeo_emo.utilities.preference_converters import UPEMO from desdeo_emo.selection.SelectionBase import InteractiveDecompositionSelectionBase from desdeo_emo.population.Population import Population -from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors - +from desdeo_tools.interaction import ( + BoundPreference, + NonPreferredSolutionPreference, + PreferredSolutionPreference, + ReferencePointPreference, + validate_ref_point_data_type, + validate_ref_point_dimensions, + validate_ref_point_with_ideal, +) class RNSGAIII_select(InteractiveDecompositionSelectionBase): """The NSGA-III selection operator. Code is heavily based on the version of nsga3 in @@ -22,14 +30,9 @@ class RNSGAIII_select(InteractiveDecompositionSelectionBase): """ def __init__( - self, - pop: Population, - pop_size_rp: int, - ref_points: np.array, - n_survive: int = None, - selection_type: str = None, + self, pop: Population, n_survive: int = None, selection_type: str = None ): - super().__init__(pop_size_rp, pop.problem.n_of_fitnesses, selection_type) + super().__init__(pop.pop_size, pop.problem.n_of_fitnesses, selection_type) self.worst_fitness: np.ndarray = -np.full((1, pop.fitness.shape[1]), np.inf) self.extreme_points: np.ndarray = None if n_survive is None: @@ -38,8 +41,6 @@ def __init__( selection_type = "mean" self.selection_type = selection_type self.ideal: np.ndarray = pop.ideal_fitness_val - self.ref_points = ref_points - self.pop_size_rp = pop_size_rp def do(self, pop: Population) -> List[int]: """Select individuals for mating for NSGA-III. @@ -55,7 +56,6 @@ def do(self, pop: Population) -> List[int]: List of indices of the selected individuals """ ref_dirs = self.vectors.values_planar - fitness = self._calculate_fitness(pop) # Calculating fronts and ranks # fronts, dl, dc, rank = nds(fitness) @@ -80,15 +80,6 @@ def do(self, pop: Population) -> List[int]: worst_of_front, ) - ref_dirs = ReferenceVectors( - number_of_vectors=self.pop_size_rp, - creation_type="RP_based", - ref_point=self.ref_points, - number_of_objectives=pop.problem.n_of_objectives, - ideal_vector=self.ideal, - nadir_vector=nadir_point, - ) - ref_dirs = ref_dirs.values_planar # Finding individuals in first 'n' fronts selection = np.asarray([], dtype=int) for front_id in range(len(fronts)): @@ -279,3 +270,90 @@ def _calculate_fitness(self, pop) -> np.ndarray: return pop.fitness - pop.uncertainity if self.selection_type == "robust": return pop.fitness + pop.uncertainity + + + def manage_preferences( + self, + pop: Population, + preference: Union[ + PreferredSolutionPreference, + NonPreferredSolutionPreference, + ReferencePointPreference, + BoundPreference, + None, + ], + ): + + if preference is None: + return self.adapt_RVs(pop.fitness) + if self._interaction_request_id != preference.request_id: + raise ValueError("Wrong request object provided. Request IDs don't match.") + if self.interaction_type == "Reference point": + return self.manage_reference_point(pop, preference) + if self.interaction_type == "Preferred solutions": + return self.manage_preferred_solutions(pop, preference) + if self.interaction_type == "Non-preferred solutions": + return self.manage_non_preferred_solutions(pop, preference) + if self.interaction_type == "Preferred ranges": + return self.manage_preferred_ranges(pop, preference) + raise ValueError("Interaction type not set.") + + def manage_reference_point( + self, pop: Population, preference: ReferencePointPreference + ): + if not isinstance(preference, ReferencePointPreference): + raise TypeError( + "Preference object must be an instance of ReferencePointPreference." + ) + ideal = pop.ideal_fitness_val + refpoint = preference.response.values * pop.problem._max_multiplier + + #unit_ref_point = (refpoint - ideal) / (nadir - ideal) + + refpoint = refpoint - ideal + norm = np.sqrt(np.sum(np.square(refpoint))) + refpoint = refpoint / norm + #print(refpoint) + self.vectors.interactive_adapt_RNSGAIII(refpoint) + #self.vectors.add_edge_vectors() + #self.vectors.neighbouring_angles() + + def manage_preferred_ranges(self, pop: Population, preference: BoundPreference): + if not isinstance(preference, BoundPreference): + raise TypeError("Preference object must be an instance of BoundPreference.") + + preference = np.atleast_2d(UPEMO(preference.response.T)) + preference = preference[0] + ideal = pop.ideal_fitness_val + refpoint = preference * pop.problem._max_multiplier + + #unit_ref_point = (refpoint - ideal) / (nadir - ideal) + + refpoint = refpoint - ideal + norm = np.sqrt(np.sum(np.square(refpoint))) + refpoint = refpoint / norm + #print(refpoint) + self.vectors.interactive_adapt_RNSGAIII(refpoint) + #self.vectors.add_edge_vectors() + #self.vectors.neighbouring_angles() + + def manage_preferred_solutions(self, pop: Population, preference: PreferredSolutionPreference): + if not isinstance(preference, PreferredSolutionPreference): + raise TypeError("Preference object must be an instance of BoundPreference.") + + #print(preference.response) + preference = np.atleast_2d(UPEMO(preference.response)) + preference = preference[0] + #print(preference) + ideal = pop.ideal_fitness_val + refpoint = preference * pop.problem._max_multiplier + + #unit_ref_point = (refpoint - ideal) / (nadir - ideal) + + refpoint = refpoint - ideal + norm = np.sqrt(np.sum(np.square(refpoint))) + refpoint = refpoint / norm + #print(refpoint) + self.vectors.interactive_adapt_RNSGAIII(refpoint) + #self.vectors.add_edge_vectors() + #self.vectors.neighbouring_angles() \ No newline at end of file diff --git a/desdeo_emo/selection/SelectionBase.py b/desdeo_emo/selection/SelectionBase.py index ac8db51b..e7851233 100644 --- a/desdeo_emo/selection/SelectionBase.py +++ b/desdeo_emo/selection/SelectionBase.py @@ -87,6 +87,20 @@ def _calculate_fitness(self, pop: Population) -> np.ndarray: if self.selection_type == "robust": return pop.fitness + pop.uncertainity + + def validate_specified_solutions(indices: np.ndarray, n_solutions: int) -> None: + """Validate the Decision maker's choice of preferred/non-preferred solutions. + Args: + indices (np.ndarray): Index/indices of preferred solutions specified by the Decision maker. + n_solutions (int): Number of solutions in total. + Returns: + Raises: + ValidationError: In case the preference is invalid. + """ + + x=0 + return + def set_interaction_type(self, interaction_type: str = None) -> Union[None, dict]: if interaction_type is None: return self.allowable_interaction_types @@ -162,6 +176,9 @@ def request_preferred_solutions( "\tnumpy.array([2, 4, 5, 16]), for choosing the solutions with indices 2, 4, 5, and 16.\n" "The reference vectors will be focused around the chosen preferred solutions." ) + def validator(indices: np.ndarray, n_solutions: int): + x=0 + return self._interaction_request_id = np.random.randint(0, 1e9) @@ -170,6 +187,7 @@ def request_preferred_solutions( message=message, interaction_priority="recommended", request_id=self._interaction_request_id, + preference_validator= validator, ) def request_non_preferred_solutions( @@ -271,7 +289,7 @@ def manage_preferred_solutions( "Preference object must be an instance of PreferredSolutionPreference." ) self.vectors.interactive_adapt_1( - z=pop.objectives[preference.response], + z=preference.response, n_solutions=np.shape(pop.objectives)[0], ) self.vectors.add_edge_vectors() diff --git a/desdeo_emo/selection/__init__.py b/desdeo_emo/selection/__init__.py index b6bdc88b..c6f9771b 100644 --- a/desdeo_emo/selection/__init__.py +++ b/desdeo_emo/selection/__init__.py @@ -1,9 +1,10 @@ """This module provides implementations of various selection operators. """ -__all__ = ["APD_Select", "NSGAIII_select", "TournamentSelection", "MOEAD_select", "RNSGAIII_select"] +__all__ = ["APD_Select", "NSGAIII_select", "TournamentSelection", "MOEAD_select", "RNSGAIII_select", "NUMS_select"] from desdeo_emo.selection.APD_Select_constraints import APD_Select from desdeo_emo.selection.NSGAIII_select import NSGAIII_select from desdeo_emo.selection.TournamentSelection import TournamentSelection from desdeo_emo.selection.MOEAD_select import MOEAD_select from desdeo_emo.selection.RNSGAIII_select import RNSGAIII_select +from desdeo_emo.selection.NUMS_select import NUMS_select diff --git a/desdeo_emo/utilities/ReferenceVectors.py b/desdeo_emo/utilities/ReferenceVectors.py index 243499d0..53e8fafe 100644 --- a/desdeo_emo/utilities/ReferenceVectors.py +++ b/desdeo_emo/utilities/ReferenceVectors.py @@ -144,7 +144,7 @@ def line_plane_intersection(l0, l1, p0, p_no, epsilon=1e-6): return ref_proj -def get_ref_dirs_from_points(ref_point, ref_dirs, mu=0.1): +def get_ref_dirs_from_points(ref_point, ref_dirs, n_obj, mu=0.1): """ This function takes user specified reference points, and creates smaller sets of equidistant Das-Dennis points around the projection of user points on the Das-Dennis hyperplane @@ -154,7 +154,7 @@ def get_ref_dirs_from_points(ref_point, ref_dirs, mu=0.1): :return: Set of reference points """ - n_obj = ref_point.shape[1] + #n_obj = ref_point.shape[1] #print(ref_dirs) @@ -187,7 +187,7 @@ def get_ref_dirs_from_points(ref_point, ref_dirs, mu=0.1): :, None] val.extend(ref_dir_for_aspiration_point) - val.extend(np.eye(n_obj)) # Add extreme points + #val.extend(np.eye(n_obj)) # Add extreme points return np.array(val) def denormalize(x, xl, xu): @@ -204,7 +204,7 @@ def __init__( creation_type: str = "Uniform", vector_type: str = "Spherical", ref_point: list = None, - sparse_parameter: float = 0.1, + sparse_parameter: float = 0.05, ideal_vector: np.array = None, nadir_vector: np.array = None, ): @@ -366,9 +366,10 @@ def _create(self, creation_type: str = "Uniform"): self.values = denormalize(complete_set, self.ideal_vector, self.nadir_vector) self.values_planar = np.copy(self.values) - self.number_of_vectors = number_of_vectors * num_ref_points + self.number_of_objectives + self.number_of_vectors = number_of_vectors * num_ref_points - #self.normalize() + self.normalize() + self.add_edge_vectors() return @@ -601,6 +602,58 @@ def slow_interactive_adapt(self, ref_point): self.normalize() return reached + def interactive_adapt_RNSGAIII(self, ref_point, translation_param=0.2): + #unit_ref_points = (self.ref_point - self.ideal_vector) / (self.nadir_vector - self.ideal_vector) + + complete_set = get_ref_dirs_from_points(np.array([ref_point]), self.initial_values, self.number_of_objectives, mu=translation_param) + #print(complete_set) + + + #denormalize + w = np.sqrt(sum(complete_set**2)) + + #self.values = denormalize(complete_set, ideal, nadir) + self.values = complete_set + self.values_planar = np.copy(self.values) + self.add_edge_vectors() + self.normalize() + return + + def interactive_adapt_NUMS(self, pivot_point, flag=1, roi_size=0.2): + epsilon_value = 0.0001 + #pivot_point = ref_point/sum(ref_point) + new_RVs = np.copy(self.values) + + if(flag): #keep the boundary + alpha = self.number_of_objectives/self.lattice_resolution + beta = 1.0 - roi_size + else: + alpha = self.number_of_objectives/self.lattice_resolution + beta = 1.0 - (1 - self.number_of_objectives/self.lattice_resolution) * roi_size + + eta = (np.log(alpha)/np.log(beta)) - 1 + + for i in range(0, self.number_of_vectors): + if(np.sum(np.abs(pivot_point - self.initial_values_planar[i])) > epsilon_value): + + norm_pivot_rv = np.linalg.norm(pivot_point - self.initial_values_planar[i]) + temp_delta = np.zeros(self.number_of_objectives) + for j in range(0,self.number_of_objectives): + value = pivot_point[j] * (norm_pivot_rv/(pivot_point[j] - self.initial_values_planar[i][j])) + if (value>0): + temp_delta[j] = pivot_point[j] * (norm_pivot_rv/(pivot_point[j] - self.initial_values_planar[i][j])) + + delta = np.min(temp_delta[np.nonzero(temp_delta)]) + temp = delta - np.linalg.norm(pivot_point - self.initial_values_planar[i]) + if ((temp < epsilon_value) and (flag==0)): + rho = roi_size * np.linalg.norm(pivot_point - self.initial_values_planar[i]) + else: + rho = delta - delta * (temp/delta)**(1/(eta + 1)) + adaptation_value = (self.initial_values_planar[i] - pivot_point) / np.linalg.norm(pivot_point - self.initial_values_planar[i]) + new_RVs[i] = pivot_point + rho * adaptation_value + self.values = np.copy(new_RVs) + self.values_planar = np.copy(new_RVs) + def add_edge_vectors(self): """Add edge vectors to the list of reference vectors. diff --git a/desdeo_emo/utilities/preference_converters.py b/desdeo_emo/utilities/preference_converters.py index 96914e19..afb53e8e 100644 --- a/desdeo_emo/utilities/preference_converters.py +++ b/desdeo_emo/utilities/preference_converters.py @@ -18,4 +18,4 @@ def UPEMO(preference: np.ndarray): _type_: _description_ """ preference = np.atleast_2d(preference) - return preference.max(axis=0) + return preference.min(axis=0) diff --git a/docs/notebooks/1_5.txt b/docs/notebooks/1_5.txt new file mode 100644 index 00000000..886ac52e --- /dev/null +++ b/docs/notebooks/1_5.txt @@ -0,0 +1,4 @@ +Problem DTLZ1 Objectives 5 Run 1 of 10 +Problem DTLZ1 Objectives 5 Run 1 of 10 +Problem DTLZ1 Objectives 5 Run 1 of 10 +Problem DTLZ1 Objectives 5 Run 1 of 10 diff --git a/docs/notebooks/5.txt b/docs/notebooks/5.txt new file mode 100644 index 00000000..a9abad39 --- /dev/null +++ b/docs/notebooks/5.txt @@ -0,0 +1,34 @@ +Problem DTLZ1 Objectives 5 Run 1 of 10 +Problem DTLZ1 Objectives 5 Run 2 of 10 +Problem DTLZ1 Objectives 5 Run 3 of 10 +Problem DTLZ1 Objectives 5 Run 4 of 10 +Problem DTLZ1 Objectives 5 Run 5 of 10 +Problem DTLZ1 Objectives 5 Run 6 of 10 +Problem DTLZ1 Objectives 5 Run 7 of 10 +Problem DTLZ1 Objectives 5 Run 8 of 10 +Problem DTLZ1 Objectives 5 Run 9 of 10 +Problem DTLZ1 Objectives 5 Run 10 of 10 +Results for Problem DTLZ1 with 5 objectives +Learning phase +Ns +1205.2 & 407.4918 & 398.3 & 96.2310 & 440.6 & 80.3308 +Decision phase +Ns +1866.3 & 37.4514 & 349.6 & 36.9681 & 377.5 & 2.2472 +Problem DTLZ3 Objectives 5 Run 1 of 10 +Problem DTLZ3 Objectives 5 Run 2 of 10 +Problem DTLZ3 Objectives 5 Run 3 of 10 +Problem DTLZ3 Objectives 5 Run 4 of 10 +Problem DTLZ3 Objectives 5 Run 5 of 10 +Problem DTLZ3 Objectives 5 Run 6 of 10 +Problem DTLZ3 Objectives 5 Run 7 of 10 +Problem DTLZ3 Objectives 5 Run 8 of 10 +Problem DTLZ3 Objectives 5 Run 9 of 10 +Problem DTLZ3 Objectives 5 Run 10 of 10 +Results for Problem DTLZ3 with 5 objectives +Learning phase +Ns +1599.5 & 345.5605 & 404.2 & 107.6362 & 463.5 & 54.9877 +Decision phase +Ns +1895.4 & 3.3226 & 348.7 & 21.6474 & 378.6 & 1.4967 diff --git a/docs/notebooks/7.txt b/docs/notebooks/7.txt new file mode 100644 index 00000000..fbe9f9ef --- /dev/null +++ b/docs/notebooks/7.txt @@ -0,0 +1,34 @@ +Problem DTLZ1 Objectives 7 Run 1 of 10 +Problem DTLZ1 Objectives 7 Run 2 of 10 +Problem DTLZ1 Objectives 7 Run 3 of 10 +Problem DTLZ1 Objectives 7 Run 4 of 10 +Problem DTLZ1 Objectives 7 Run 5 of 10 +Problem DTLZ1 Objectives 7 Run 6 of 10 +Problem DTLZ1 Objectives 7 Run 7 of 10 +Problem DTLZ1 Objectives 7 Run 8 of 10 +Problem DTLZ1 Objectives 7 Run 9 of 10 +Problem DTLZ1 Objectives 7 Run 10 of 10 +Results for Problem DTLZ1 with 7 objectives +Learning phase +Ns +1237.9 & 307.3807 & 101.1 & 59.6899 & 300.3 & 76.0514 +Decision phase +Ns +1267.5 & 12.3713 & 48.4 & 39.4416 & 254.0 & 1.5492 +Problem DTLZ3 Objectives 7 Run 1 of 10 +Problem DTLZ3 Objectives 7 Run 2 of 10 +Problem DTLZ3 Objectives 7 Run 3 of 10 +Problem DTLZ3 Objectives 7 Run 4 of 10 +Problem DTLZ3 Objectives 7 Run 5 of 10 +Problem DTLZ3 Objectives 7 Run 6 of 10 +Problem DTLZ3 Objectives 7 Run 7 of 10 +Problem DTLZ3 Objectives 7 Run 8 of 10 +Problem DTLZ3 Objectives 7 Run 9 of 10 +Problem DTLZ3 Objectives 7 Run 10 of 10 +Results for Problem DTLZ3 with 7 objectives +Learning phase +Ns +1508.1 & 409.6194 & 65.6 & 33.4610 & 194.3 & 91.0066 +Decision phase +Ns +1263.8 & 24.1031 & 28.8 & 20.4392 & 245.7 & 21.9456 diff --git a/docs/notebooks/9.txt b/docs/notebooks/9.txt new file mode 100644 index 00000000..64ecdd00 --- /dev/null +++ b/docs/notebooks/9.txt @@ -0,0 +1,34 @@ +Problem DTLZ1 Objectives 9 Run 1 of 10 +Problem DTLZ1 Objectives 9 Run 2 of 10 +Problem DTLZ1 Objectives 9 Run 3 of 10 +Problem DTLZ1 Objectives 9 Run 4 of 10 +Problem DTLZ1 Objectives 9 Run 5 of 10 +Problem DTLZ1 Objectives 9 Run 6 of 10 +Problem DTLZ1 Objectives 9 Run 7 of 10 +Problem DTLZ1 Objectives 9 Run 8 of 10 +Problem DTLZ1 Objectives 9 Run 9 of 10 +Problem DTLZ1 Objectives 9 Run 10 of 10 +Results for Problem DTLZ1 with 9 objectives +Learning phase +Ns +4053.6 & 753.0172 & 16.7 & 47.4406 & 560.6 & 103.1079 +Decision phase +Ns +2492.2 & 5.4000 & 11.9 & 34.3874 & 485.7 & 21.8406 +Problem DTLZ3 Objectives 9 Run 1 of 10 +Problem DTLZ3 Objectives 9 Run 2 of 10 +Problem DTLZ3 Objectives 9 Run 3 of 10 +Problem DTLZ3 Objectives 9 Run 4 of 10 +Problem DTLZ3 Objectives 9 Run 5 of 10 +Problem DTLZ3 Objectives 9 Run 6 of 10 +Problem DTLZ3 Objectives 9 Run 7 of 10 +Problem DTLZ3 Objectives 9 Run 8 of 10 +Problem DTLZ3 Objectives 9 Run 9 of 10 +Problem DTLZ3 Objectives 9 Run 10 of 10 +Results for Problem DTLZ3 with 9 objectives +Learning phase +Ns +4075.1 & 704.5970 & 136.7 & 60.7833 & 583.7 & 96.0532 +Decision phase +Ns +2472.6 & 25.2198 & 51.2 & 42.5977 & 485.4 & 15.6729 diff --git a/docs/notebooks/ExampeRNSGAIII (copy).ipynb b/docs/notebooks/ExampeRNSGAIII (copy).ipynb new file mode 100644 index 00000000..e387b763 --- /dev/null +++ b/docs/notebooks/ExampeRNSGAIII (copy).ipynb @@ -0,0 +1,1357 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Solving DTLZ1 using MOEA/D with two scalarization functions\n", + "\n", + "In this example we are going to solve the DTLZ1 benchmark problem using MOEA/D. To this aim, we need to import the following packages:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# Package for plotting the results\n", + "from typing import Dict, Literal, Tuple, Type, Union\n", + "\n", + "import plotly.graph_objects as go\n", + "import numpy as np\n", + "\n", + "# Package to create the Problem object\n", + "from desdeo_problem.testproblems.TestProblems import test_problem_builder\n", + "\n", + "# Packages for using MOEA/D and the available scalarization functions\n", + "from desdeo_emo.EAs.RNSGAIII import RNSGAIII\n", + "from desdeo_emo.EAs import RVEA, NSGAIII\n", + "\n", + "from desdeo_emo.utilities.preference_converters import UPEMO" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, we will use the implementation of ZDT1 available in desdeo-problem." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "problem = test_problem_builder(name=\"ZDT1\")\n", + "\n", + "#multiple ref points\n", + "ref_points = np.array([[0.3, 0.4], [0.8, 0.1], [0.1, 0.8]])\n", + "\n", + "# ranges\n", + "\n", + "ranges = np.array(\n", + " [\n", + " [0.2, 0.3], # Lower\n", + " [0.5, 0.8] # Upper\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[0.2 0.3]]\n", + "Running iteration 1\n", + "Running iteration 2\n", + "Running iteration 3\n", + "Running iteration 4\n", + "Running iteration 5\n", + "Running iteration 6\n", + "Running iteration 7\n", + "Running iteration 8\n", + "Running iteration 9\n", + "Running iteration 10\n" + ] + }, + { + "data": { + "text/html": [ + " \n", + " " + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plotly.com" + }, + "data": [ + { + "marker": { + "size": 1 + }, + "mode": "markers", + "type": "scatter", + "x": [ + 0.3011452483454223, + 0.31270531191545775, + 0.2977066311971161, + 0.2920029397030884, + 0.2908616832730554, + 0.298847571485183, + 0.26949188859031786, + 0.28744971808535713, + 0.2784344557912568, + 0.31619752929681244, + 0, + 0.2863217185904435, + 0.2885903427062412, + 0.276180779535968, + 0.270605177534008, + 0.2851930569920779, + 0.2672442931835724, + 0.2773107976500515, + 0.2954192527223315, + 0.3173669670577721, + 0.2897239048580536, + 0.27171524317924595, + 0.31853476451694546, + 0.319663643783288, + 0.28406069579185683, + 0.26837222162084756, + 0.3045942868324052, + 0.30574312607603793, + 0.2639334295403229, + 0.2818099936669955, + 0.2999879780959519, + 0.3022946519966077, + 0.27953965275830683, + 0.31153410942745113, + 0.27508617307246663, + 0.28068356633741975, + 0.3103831578306179, + 0.27395001879990477, + 0.31401056182050185, + 0.28293333403705756, + 0.29688293482989014, + 0.6025128998067257, + 0.2650096063942354, + 0.294327305442872, + 0.3092139431253951, + 0.3154202966377042, + 0.306899728500492, + 0.2931360108928444, + 0.30345441341954016, + 0.2661559612944148 + ], + "y": [ + 0.4512768550099589, + 0.4408609513809767, + 0.45443158479459045, + 0.45967468325275945, + 0.4607252227461937, + 0.45336579783220343, + 0.480914581973258, + 0.463884165581844, + 0.4723769884888907, + 0.4377263107855999, + 1.0000310874771006, + 0.46494217046773473, + 0.4628349562965415, + 0.4745115496036429, + 0.4798526085770868, + 0.4659983657839802, + 0.48309745562350975, + 0.47344033851251105, + 0.4565052665806841, + 0.43668730168335784, + 0.4617712160365299, + 0.47878188330147436, + 0.4356597412855353, + 0.4346634211041068, + 0.4670538596037134, + 0.4819978409437935, + 0.44813887829205307, + 0.4470978672620983, + 0.48629384073694804, + 0.46918475221562567, + 0.4523170178621706, + 0.4502300951593495, + 0.4713180605875603, + 0.4418948892748467, + 0.47556296666972947, + 0.47023688627532234, + 0.4429275228183232, + 0.4766410587532617, + 0.4396822012566816, + 0.4681287016097612, + 0.45517220977764505, + 0.22381430776450756, + 0.48524556784974304, + 0.45751378478720206, + 0.443962192984911, + 0.43843348687100464, + 0.44604369033057184, + 0.4586214294652305, + 0.44917787758412187, + 0.4841336353084938 + ] + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + } + } + }, + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[0.30114525 0.45127686]\n", + " [0.31270531 0.44086095]\n", + " [0.29770663 0.45443158]\n", + " [0.29200294 0.45967468]\n", + " [0.29086168 0.46072522]\n", + " [0.29884757 0.4533658 ]\n", + " [0.26949189 0.48091458]\n", + " [0.28744972 0.46388417]\n", + " [0.27843446 0.47237699]\n", + " [0.31619753 0.43772631]\n", + " [0. 1.00003109]\n", + " [0.28632172 0.46494217]\n", + " [0.28859034 0.46283496]\n", + " [0.27618078 0.47451155]\n", + " [0.27060518 0.47985261]\n", + " [0.28519306 0.46599837]\n", + " [0.26724429 0.48309746]\n", + " [0.2773108 0.47344034]\n", + " [0.29541925 0.45650527]\n", + " [0.31736697 0.4366873 ]\n", + " [0.2897239 0.46177122]\n", + " [0.27171524 0.47878188]\n", + " [0.31853476 0.43565974]\n", + " [0.31966364 0.43466342]\n", + " [0.2840607 0.46705386]\n", + " [0.26837222 0.48199784]\n", + " [0.30459429 0.44813888]\n", + " [0.30574313 0.44709787]\n", + " [0.26393343 0.48629384]\n", + " [0.28180999 0.46918475]\n", + " [0.29998798 0.45231702]\n", + " [0.30229465 0.4502301 ]\n", + " [0.27953965 0.47131806]\n", + " [0.31153411 0.44189489]\n", + " [0.27508617 0.47556297]\n", + " [0.28068357 0.47023689]\n", + " [0.31038316 0.44292752]\n", + " [0.27395002 0.47664106]\n", + " [0.31401056 0.4396822 ]\n", + " [0.28293333 0.4681287 ]\n", + " [0.29688293 0.45517221]\n", + " [0.6025129 0.22381431]\n", + " [0.26500961 0.48524557]\n", + " [0.29432731 0.45751378]\n", + " [0.30921394 0.44396219]\n", + " [0.3154203 0.43843349]\n", + " [0.30689973 0.44604369]\n", + " [0.29313601 0.45862143]\n", + " [0.30345441 0.44917788]\n", + " [0.26615596 0.48413364]]\n" + ] + } + ], + "source": [ + "#Outputs\n", + "UPEMO(ref_points)\n", + "UPEMO(ranges)\n", + "\n", + "print(np.atleast_2d(UPEMO(ranges)))\n", + "\n", + "evolver = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(ranges)), n_iterations=10, n_gen_per_iter=100)\n", + "\n", + "evolver.start()\n", + "while evolver.continue_evolution():\n", + " evolver.iterate()\n", + " print(f\"Running iteration {evolver._iteration_counter}\")\n", + "\n", + "objectives = evolver.population.objectives\n", + "fig5 = go.Figure(data=go.Scatter(x=objectives[:,0],\n", + " y=objectives[:,1],\n", + " mode=\"markers\",\n", + " marker_size=1))\n", + "fig5.show()\n", + "\n", + "print(objectives)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# How to run tests\n", + "\n", + "class automated_tests():\n", + " def __init__(self, maybe_some_other_params = None):\n", + " self.maybe_some_other_params = maybe_some_other_params\n", + " self.adm = \"Initialize something here\" \n", + " # I assume that the adm is this one:\n", + " # https://github.com/industrial-optimization-group/adm-emo/blob/master/adm_emo/zdt1_adm_test.ipynb\n", + " self.problem = test_problem_builder(name=\"ZDT2\") # This should be a parameter\n", + " self.EAs = {\n", + " \"RVEA\": RVEA,\n", + " \"NSGAIII\": NSGAIII,\n", + " \"RNSGAIII\": RNSGAIII\n", + " }\n", + " self.fronts = {\n", + " \"RVEA\": None,\n", + " \"NSGAIII\": None,\n", + " \"RNSGAIII\": None\n", + " }\n", + " self.composite_front = None\n", + " \n", + " def get_reference_point(self):\n", + " # something to do with self.fronts, self.composite_front, and self.adm\n", + " pass\n", + " \n", + " def get_ranges(self):\n", + " # Same as before\n", + " pass\n", + " \n", + " def iterate(self, emo_params: Dict):\n", + " emo_params_example = {\n", + " \"RVEA\": {\n", + " \"interact\": True,\n", + " \"n_gen_per_iter\": 100,\n", + " \"n_iterations\" : 5\n", + " },\n", + " \"NSGAIII\": {\"blah\":20}, # The keys in the internal dict should be the same as the EA parameter names\n", + " \"RNSGAIII\": {\"others\":20}\n", + " }\n", + " preference = self.get_reference_point()\n", + " # or\n", + " preference = self.get_ranges()\n", + " preference = UPEMO(preference)\n", + " for method_name, _ in self.EAs.items():\n", + " if method_name in [\"RVEA\", \"NSGAIII\"]:\n", + " evolver = self.EAs[method_name](\n", + " self.problem,\n", + " **emo_params[method_name]\n", + " )\n", + " evolver.set_interaction_type('Reference point')\n", + " pref, plot = evolver.start()\n", + " while evolver.continue_evolution():\n", + " pref.response = pd.DataFrame(\n", + " [preference],\n", + " columns=pref.content['dimensions_data'].columns)\n", + " pref, plot = evolver.iterate(pref)\n", + " # done evolving, do archiving, use fronts and composite_front\n", + " elif method_name == \"RNSGAIII\":\n", + " evolver = self.EAs[method_name](\n", + " self.problem,\n", + " 50,\n", + " np.atleast_2d(preference),\n", + " **emo_params[method_name] #other params\n", + " )\n", + " evolver.start()\n", + " while evolver.continue_evolution():\n", + " evolver.iterate()\n", + " # done evolving, do archiving, use fronts and composite_front\n", + " def multiple_runs(self):\n", + " pass\n", + " # Define the structure of the tests: list of problems, list of adms, list of parameter values.\n", + " # Loop over these experiments by assigning self.problem, self.adm, and emo_params\n", + " # Run iterate as many times as needed\n", + " # You can also save initial population in some variable for future iterations if you wish" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we need to pass some parameters to the MOEA/D: the problem to solve, and the number of generations. The rest of the parameters will take the default values, for more information about the available options, please refer to the API documentation. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.14" + }, + "vscode": { + "interpreter": { + "hash": "695f87bea94b36c10be7936e9307946b8fdfe2c4db260bb09ff657dfeafbd618" + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/notebooks/ExampeRNSGAIII.ipynb b/docs/notebooks/ExampeRNSGAIII.ipynb index 6fab084c..1ecf4fe2 100644 --- a/docs/notebooks/ExampeRNSGAIII.ipynb +++ b/docs/notebooks/ExampeRNSGAIII.ipynb @@ -16,6 +16,8 @@ "outputs": [], "source": [ "# Package for plotting the results\n", + "from typing import Dict, Literal, Tuple, Type, Union\n", + "\n", "import plotly.graph_objects as go\n", "import numpy as np\n", "\n", @@ -24,6 +26,7 @@ "\n", "# Packages for using MOEA/D and the available scalarization functions\n", "from desdeo_emo.EAs.RNSGAIII import RNSGAIII\n", + "from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS\n", "from desdeo_emo.EAs import RVEA, NSGAIII\n", "\n", "from desdeo_emo.utilities.preference_converters import UPEMO" @@ -42,17 +45,19 @@ "metadata": {}, "outputs": [], "source": [ - "problem = test_problem_builder(name=\"ZDT2\")\n", + "problem = test_problem_builder(name=\"ZDT1\")\n", + "problem.ideal_fitness = np.asarray([0] * 2)\n", + "problem.nadir_fitness = abs(np.random.normal(size=2, scale=0.15)) + 1\n", "\n", "#multiple ref points\n", - "ref_points = np.array([[0.3, 0.4], [0.8, 0.1], [0.1, 0.8]])\n", + "ref_points = np.array([[0.3, 0.4]])\n", "\n", "# ranges\n", "\n", "ranges = np.array(\n", " [\n", - " [0.3, 0.3], # Lower\n", - " [0.8, 0.8] # Upper\n", + " [0.2, 0.3], # Lower\n", + " [0.5, 0.8] # Upper\n", " ]\n", ")" ] @@ -61,140 +66,19 @@ "cell_type": "code", "execution_count": 3, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([0.8, 0.8])" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "#Outputs\n", - "UPEMO(ref_points)\n", - "UPEMO(ranges)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# How to run tests\n", - "\n", - "class automated_tests():\n", - " def __init__(self, maybe_some_other_params = None):\n", - " self.maybe_some_other_params = maybe_some_other_params\n", - " self.adm = \"Initialize something here\" \n", - " # I assume that the adm is this one:\n", - " # https://github.com/industrial-optimization-group/adm-emo/blob/master/adm_emo/zdt1_adm_test.ipynb\n", - " self.problem = test_problem_builder(name=\"ZDT2\") # This should be a parameter\n", - " self.EAs = {\n", - " \"RVEA\": RVEA,\n", - " \"NSGAIII\": NSGAIII,\n", - " \"RNSGAIII\": RNSGAIII\n", - " }\n", - " self.fronts = {\n", - " \"RVEA\": None,\n", - " \"NSGAIII\": None,\n", - " \"RNSGAIII\": None\n", - " }\n", - " self.composite_front = None\n", - " \n", - " def get_reference_point(self):\n", - " # something to do with self.fronts, self.composite_front, and self.adm\n", - " pass\n", - " \n", - " def get_ranges(self):\n", - " # Same as before\n", - " pass\n", - " \n", - " def iterate(self, emo_params: Dict):\n", - " emo_params_example = {\n", - " \"RVEA\": {\n", - " \"interact\": True,\n", - " \"n_gen_per_iter\": 100,\n", - " \"n_iterations\" : 5\n", - " },\n", - " \"NSGAIII\": {\"blah\":20}, # The keys in the internal dict should be the same as the EA parameter names\n", - " \"RNSGAIII\": {\"others\":20}\n", - " }\n", - " preference = self.get_reference_point()\n", - " # or\n", - " preference = self.get_ranges()\n", - " preference = UPEMO(preference)\n", - " for method_name, _ in self.EAs.items():\n", - " if method_name in [\"RVEA\", \"NSGAIII\"]:\n", - " evolver = self.EAs[method_name](\n", - " self.problem,\n", - " **emo_params[method_name]\n", - " )\n", - " evolver.set_interaction_type('Reference point')\n", - " pref, plot = evolver.start()\n", - " while evolver.continue_evolution()\n", - " pref.response = pd.DataFrame(\n", - " [preference],\n", - " columns=pref.content['dimensions_data'].columns)\n", - " pref, plot = evolver.iterate(pref)\n", - " # done evolving, do archiving, use fronts and composite_front\n", - " elif method_name == \"RNSGAIII\":\n", - " evolver = self.EAs[method_name](\n", - " self.problem,\n", - " 50,\n", - " np.atleast_2d(preference),\n", - " **emo_params[method_name] #other params\n", - " )\n", - " evolver.start()\n", - " while evolver.continue_evolution():\n", - " evolver.iterate()\n", - " # done evolving, do archiving, use fronts and composite_front\n", - " def multiple_runs(self):\n", - " # Define the structure of the tests: list of problems, list of adms, list of parameter values.\n", - " # Loop over these experiments by assigning self.problem, self.adm, and emo_params\n", - " # Run iterate as many times as needed\n", - " # You can also save initial population in some variable for future iterations if you wish" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we need to pass some parameters to the MOEA/D: the problem to solve, and the number of generations. The rest of the parameters will take the default values, for more information about the available options, please refer to the API documentation. " - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "evolver = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(ranges)), n_iterations=10, n_gen_per_iter=30)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Running iteration 1\n", - "Running iteration 2\n", - "Running iteration 3\n", - "Running iteration 4\n", - "Running iteration 5\n", - "Running iteration 6\n", - "Running iteration 7\n", - "Running iteration 8\n", - "Running iteration 9\n", - "Running iteration 10\n" + "Please provide a reference point worse than the ideal point:\n", + "\n", + "f1 0.0\n", + "f2 0.0\n", + "Name: ideal, dtype: object\n", + "The reference point will be used to focus the reference vectors towards the preferred region.\n", + "If a reference point is not provided, the reference vectors are spread uniformly in the objective space.\n", + "[0.6 0.8]\n" ] }, { @@ -287,7 +171,7 @@ "data": { "application/vnd.plotly.v1+json": { "config": { - "plotlyServerURL": "https://plot.ly" + "plotlyServerURL": "https://plotly.com" }, "data": [ { @@ -297,119 +181,112 @@ "mode": "markers", "type": "scatter", "x": [ - 0.5593318714295057, - 0.5808188927136447, - 0.610973151263948, - 0.5329624576195996, - 0.6091507361644628, - 0.5492705073515463, - 0.5370549757721967, - 0.6053129921796643, - 0.5651018374655041, - 0.5960551305858404, - 0.5788055878106991, - 0.5309557292833357, - 0.6072006287947866, - 0.5730493593308343, - 0.5513512863540656, - 0.5349979056318265, - 0.5940354940930149, - 0.5768145655245105, - 0.586552630076195, - 0.5432104085415947, - 0.5184132102669012, - 0.5826169326429322, - 0.5473914813868974, - 0.599716112733096, - 0.5902918080850285, - 0.553263786626874, - 0.5883683355206103, - 0.5206006767747389, - 0.5390998906983901, - 0.5632151332252286, - 0.5672043260377828, - 0.569195780010813, - 0.5551738122180687, - 0.5979372300395939, - 0.5453488940185817, + 0.35532120068289164, + 0.3113904642132602, + 0.33424709034003636, + 0.31979711934602967, + 0.30799694705966874, + 0.3271323976530837, + 0.3392287329450934, + 0.32955933201333115, + 0.3276892360550757, + 0.3156994457214237, + 0.3373209299704781, + 0.32081171965512895, + 0.3186400862219846, + 0.3413478112737472, + 1, + 0.3216646919994997, + 0.33010264195953215, + 0.3234351577361826, + 0.3311533801239209, + 0.3305590277017084, + 0.3598694569140233, + 0.32913594745711006, + 0.3365014780260988, + 0.31372905794848815, + 0.3226292726252054, + 0.3490724120412126, + 0.3025762449562491, + 0.36717654480350304, + 0.3254786641980508, + 0.32602506984003177, + 0.3349239110214327, + 0.31721918012673617, + 0.33288136825853526, + 0.32861506943217883, + 0.33339256812279217, + 0.33224546010133005, + 0.349592308074397, + 0.3356773964967832, + 0.3241133472755245, + 0.3440078253402525, + 0.3381999226172209, + 0.32817256863048677, 0, - 0.5750207695301554, - 0.5225886466355696, - 0.5845995471499794, - 0.6128992476396417, - 0.5921676239050236, - 0.5246289885561629, - 0.5710348807898037, - 0.8974795503458932, - 0.5413530681437971, - 0.5572639863324738, - 0.6015598115321903, - 0.5266971514340374, - 0.549252335948469, - 0.6146772173093021, - 0.6035017119308279, - 0.5613652722997271, - 0.5288465209970012 + 0.32482894968857534, + 0.3402600337838854, + 0.333023423593726, + 0.3525203528334348, + 0.32662238234794155, + 0.3426931940866978, + 0.345520343826107 ], "y": [ - 0.6882276170299813, - 0.6634122048283481, - 0.6275293938977767, - 0.7168754142501211, - 0.6300240631442849, - 0.6991927265353892, - 0.7123471974577441, - 0.6344817794658645, - 0.6813918820384407, - 0.645595348766057, - 0.6656928004915234, - 0.7189485248182573, - 0.6321025724691882, - 0.6725110820541904, - 0.6968433354695085, - 0.7144146468680125, - 0.6479060043878057, - 0.6679949093631204, - 0.6568338676026533, - 0.7057919409067802, - 0.7320338459702748, - 0.6612407564094845, - 0.7012001773799984, - 0.6411263598406103, - 0.6524967545341488, - 0.6946592260227078, - 0.6545420595475888, - 0.7298470868763806, - 0.7102321551889877, - 0.6836766073875403, - 0.6792400717319647, - 0.6770408318839835, - 0.6924052830240824, - 0.6433194611936177, - 0.7034692543740922, - 1.0004383241296866, - 0.6701490310880968, - 0.7275655657996962, - 0.6589535338544629, - 0.6253125067445532, - 0.6501292479583048, - 0.7254184680416027, - 0.6744624883605834, - 0.19620282546915788, - 0.70787955556073, - 0.6901314008826267, - 0.6388245423806503, - 0.7232891863967473, - 0.6994228055967259, - 0.623158269850505, - 0.6367543235216215, - 0.6855960409677042, - 0.7211274029184701 + 0.40430640422743747, + 0.4423545247289378, + 0.4222819107028718, + 0.43491703767972306, + 0.44542842330090476, + 0.4283273969828645, + 0.41799017720218323, + 0.4262592540440862, + 0.4279414872715979, + 0.43846998463944276, + 0.41959203571960885, + 0.4339694623950303, + 0.43599124789572963, + 0.4161655954088418, + 0.00023347801386454514, + 0.433154861876285, + 0.42582501638187975, + 0.43165195293905473, + 0.42491096231556635, + 0.42535281631031907, + 0.40046264761684847, + 0.4266963569789766, + 0.4203075702910921, + 0.44017412701278774, + 0.43235821056315354, + 0.41089887828782906, + 0.4502982907359005, + 0.39441375512015214, + 0.4298523344792014, + 0.42930692068286386, + 0.42162471707092675, + 0.4371751514310889, + 0.42336539955798813, + 0.42705832123806614, + 0.42286972791360206, + 0.42390124550391983, + 0.40914960131902606, + 0.4209887995548887, + 0.4309773667749471, + 0.41384560887124255, + 0.41878213833098926, + 0.42748066196826606, + 1.0005015133537376, + 0.43044603618647426, + 0.4170669873324351, + 0.42332671493149576, + 0.4066039907686427, + 0.42880792895733916, + 0.41493101599683635, + 0.4125417007682344 ] } ], "layout": { - "autosize": true, "template": { "data": { "bar": [ @@ -1225,30 +1102,13 @@ "zerolinewidth": 2 } } - }, - "xaxis": { - "autorange": true, - "range": [ - -0.05510839344229169, - 0.9525879437881849 - ], - "type": "linear" - }, - "yaxis": { - "autorange": true, - "range": [ - 0.14021174644848816, - 1.0564294031503563 - ], - "type": "linear" } } }, - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAyAAAAHCCAYAAAAXY63IAAAgAElEQVR4Xu3dTYylZ3km4K/cVmj+bZMIB2vAISiKQZZmsoAEGYlsEJuAxSKwiWQBQmbhDSz4k1ggGYYFbLzAQgOylA2wQMZsEBtAWExggSJZxEwgxpGCBRPZ+AdjjyDu6a+cqlQ35e5TdZ/66r3PuZosYvu8z3nOdZ+237vrb+fc+V+TXwQIECBAgAABAgQIEFhAYEcBWUDZUxAgQIAAAQIECBAgsCuggHgjECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAHxHiBAgAABAgQIECBAYDEBBWQxak9EgAABAgQIECBAgIAC4j1AgAABAgQIECBAgMBiAgrIYtSeiAABAgQIECBAgAABBcR7gAABAgQIECBAgACBxQQUkMWoPREBAgQIECBAgAABAgqI9wABAgQIECBAgAABAosJKCCLUXsiAgQIECBAgAABAgQUEO8BAgQIECBAgAABAgQWE1BAFqP2RAQIECBAgAABAgQIKCDeAwQIECBAgAABAgQILCaggCxG7YkIECBAgAABAgQIEFBAvAcIECBAgAABAgQIEFhMQAFZjNoTESBAgAABAgQIECCggHgPECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAHxHiBAgAABAgQIECBAYDEBBWQxak9EgAABAgQIECBAgIAC4j1AgAABAgQIECBAgMBiAgrIYtSeiAABAgQIECBAgAABBcR7gAABAgQIECBAgACBxQQUkMWoPREBAgQIECBAgAABAgqI9wABAgQIECBAgAABAosJKCCLUXsiAgQIECBAgAABAgQUEO8BAgQIECBAgAABAgQWE1BAFqP2RAQIECBAgAABAgQIKCDeAwQIECBAgAABAgQILCaggCxG7YkIECBAgAABAgQIEFBAvAcIECBAgAABAgQIEFhMQAFZjNoTESBAgAABAgQIECCggHgPECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAHxHiBAgAABAgQIECBAYDEBBWQxak9EgAABAgQIECBAgIAC4j1AgAABAgQIECBAgMBiAgrIYtSeiAABAgQIECBAgAABBcR7gAABAgQIECBAgACBxQQUkMWoPREBAgQIECBAgAABAgqI9wABAgQIECBAgAABAosJKCCLUXsiAgQIECBAgAABAgQUEO8BAgQIECBAgAABAgQWE1BAFqP2RAQIECBAgAABAgQIKCDeAwQIECBAgAABAgQILCaggCxG7YkIECBAgAABAgQIEFBAvAcIECBAgAABAgQIEFhMQAFZjNoTESBAgAABAgQIECCggHgPECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAHxHiBAgAABAgQIECBAYDEBBWQxak9EgAABAgQIECBAgIAC4j1AgAABAgQIECBAgMBiAgrIYtSeiAABAgQIECBAgAABBcR7gAABAgQIECBAgACBxQQUkMWoPREBAgQIECBAgAABAgqI9wABAgQIECBAgAABAosJKCCLUXsiAgQIECBAgAABAgQUEO8BAgQIECBAgAABAgQWE1BAFqP2RAQIECBAgAABAgQIKCDeAwQIECBAgAABAgQILCaggCxG7YkIECBAgAABAgQIEFBAvAcIECBAgAABAgQIEFhMQAFZjNoTESBAgAABAgQIECCggHgPECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAHxHiBAgAABAgQIECBAYDEBBWQxak9EgAABAgQIECBAgIAC4j1AgAABAgQIECBAgMBiAgrIYtSeiAABAgQIECBAgAABBcR7gAABAgQIECBAgACBxQQUkMWoPREBAgQIECBAgAABAgqI9wABAgQIECBAgAABAosJKCCLUXsiAgQIECBAgAABAgQUEO8BAgQIECBAgAABAgQWE1BAFqP2RAQIECBAgAABAgQIKCDeAwQIECBAgAABAgQILCaggCxG7YkIECBAgAABAgQIEFBAvAcIECBAgAABAgQIEFhMQAFZjNoTESBAgAABAgQIECCggHgPECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAHxHiBAgAABAgQIECBAYDEBBWQxak9EgAABAgQIECBAgIAC4j1AgAABAgQIECBAgMBiAgrIYtSeiAABAgQIECBAgAABBcR7gAABAgQIECBAgACBxQQUkMWoPREBAgQIECBAgAABAgqI9wABAgQIECBAgAABAosJKCCLUXsiAgQIECBAgAABAgQUEO8BAgQIECBAgAABAgQWE1BAFqP2RAQIECBAgAABAgQIKCDeAwQIECBAgAABAgQILCaggCxG7YkIECBAgAABAgQIEFBAvAcIECBAgAABAgQIEFhMQAFZjNoTESBAgAABAgQIECCggHgPECBAgAABAgQIECCwmIACshi1JyJAgAABAgQIECBAQAEJ3wMPPfxUOGE9x1/xsufvDhpln/W8KlNWEXjx86+cpp2d6Ynf/HaVh3vMBgm86Hz2V5zP/nHZb1Cqq72UF569crryzM702JN+368mtjmPkv0YWe7du8bYpm8LBSTMbJQLvwISBll8XAEpDi9cXQEJAYuPu4QWhxeuLvsQcE3HFZAMUgHJ/Ib5iIMCEgZZfFwBKQ4vXF0BCQGLj7uEFocXri77EHBNxxWQDFIByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDArKi3x1f+Or0la9/a/ru3XdccOKhh59accLJPsxPQj9Z35GnKyAjp3OyuykgJ+s78nSX0JHTOdndZH+yvqtOV0BWlTr8cQrIZfzu/sa908f+5//afdQ1V71YAcneb06fgIACcgKoJSMVkJKgTmBNl9ATQC0ZKfsxglJAshwUkBX9fARkRSgPW1xAAVmcfJgnVECGiWLxRVxCFycf5gllP0YUCkiWgwKyop8CsiKUhy0uoIAsTj7MEyogw0Sx+CIuoYuTD/OEsh8jCgUky0EBWdHvuQrIfzxzbsUJJ/uwn/7Ls3u85k93TvaJTB9OYOc/Iz83xltxOJ9NXkj2m5zupV/bnP38W3+Q/wRtbxCn8MplfwrohzzlmSvct5IkFJAV9Z6rgPzy0adXnHCyD3v5VWd3n2CUfU721Zp+UOBFz7vy/E1kZ/r1078Fs2UCLzyf/RXns39C9luW/DS94A+unM6cOZ/9U37fb1v4sh8j8b171xjb9G2hgKyYmU/BWhHKwxYX8ClYi5MP84Q+BWuYKBZfxKfhLE4+zBPKfowofApWloMCsqKfArIilIctLqCALE4+zBMqIMNEsfgiLqGLkw/zhLIfIwoFJMtBAbmM38Fvw7v30Le95Y3Tpz76vt2/9HNAsjeg07mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ8CEvo5ngsoILlh6wQFpDW5fG+X0NywdYLsx0hOAclyUEAyPwUk9HM8F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ8CEvo5ngsoILlh6wQFpDW5fG+X0NywdYLsx0hOAclyUEAyPwUk9HM8F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshy2soC8/ZaPTT998Oe7cq+5/rrpa3fdfknFN9182/TIo0/sP+ZH375r//9/6OGnsgTWdHrvN8Io+6zpZRmzgoACsgLShj5EAdnQYFd4WS6hKyBt6ENkP0awCkiWw9YVkHd/4NPTw488vl865jLysmteMn3xsx86VPLif37x+VEu/ApI9huh+bQC0pxetrsCkvk1n3YJbU4v2132md+6TisgmeTWFZD5oxkfvPWd081vvWlX7u5v3Dt95s4vT9+9+45DJefH/+3f/PV023vesfvP7/jCV6evfP1b+49XQLI3oNO5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLIetKiD33f/A9K73f2L60uc+Pt14w6t35Q77ewdJP/LJz0/3fPN709ve8sbpUx993zR/ROS1f/aq3f9//qWAZG9Ap3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgrIIaXkIOleQbnmqhfvfx3Iwa8BeebcuSyBNZ2+Ymdnd9Io+6zpZRmzgsDO9Gz2587/z6/tEpD9duV98NXKXvb+nX+674G9e9fpbtH77ArIZQrI6958y3T7h9+7/ylbex8R2Sshv3jk6SHSv/aas7t7jLLPEChbssSLnn9mms4X0F//5ndb8oq9zD2BF57Pfv6P4BOy37o3xQvOnpmuPLMzPf6k3/fbFr7sx0h87941xjZ9W2xVAZnjOcrXgKzyKVs+BavvTb9pG/sUrE1LdPXX41OwVrfatEf6NJxNS3T11yP71a1O8pE+BSvT3boCcrnvgjV/jcf8a+9b884fAXnDX9yw/12y5o+A3PuD+3wReva+c3qNAgrIGjHLRikgZYGtcV2X0DVilo2S/RiBKSBZDltXQGauS/0ckIsLyPz4uYTs/Zq/FuTgd8zyEZDsDeh0LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMthKwtIRnbhaQVknZpmHUdAATmO2macUUA2I8fjvAqX0OOobcYZ2Y+RowKS5aCAZH6+DW/o53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBeRoyX3rO1dMf/WGc9PZs+eOdnDAR7uEDhjKQivJfiHoyzyNApLloIBkfgpI6Od4LqCA5IatExSQoyf3vX/YmV79J9N01Uun6iLiEnr07DflhOzHSFIByXJQQDI/BST0czwXUEByw9YJCsjxkvvVo9P0/LM70w//cZpu+PNz09VXHW/OaZ5yCT1N/dN9btmfrv/esysgWQ4KSOangIR+jucCCkhu2DpBAcmTu//HO+fLyLnppedLSFMRcQnNs2+dIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCArK+5H7xy53pgZ/1fETEJXR92bdNkv0YiSkgWQ4KSOangIR+jucCCkhu2DpBAVl/cvNHRJ5+ei4iY3+NiEvo+rNvmSj7MZJSQLIcFJDMTwEJ/RzPBRSQ3LB1ggJyMsk9/fTO9Ohj0+5HROYvWL/25eN91yyX0JPJvmGq7MdISQHJclBAMj8FJPRzPBdQQHLD1gkKyMknN39EZP517bVjfbG6S+jJZz/qM8h+jGQUkCwHBSTzU0BCP8dzAQUkN2ydoIAsl9z8NSK/+tX83bPOTddfv9zzPtczuYSefgantYHsT0v+wudVQLIcFJDMTwEJ/RzPBRSQ3LB1ggKyfHIPPjhND/1iZ3rtDfPPEjm9T81yCV0++1GeUfZjJKGAZDkoIJmfAhL6OZ4LKCC5YesEBeT0kvvOd6bpxS/dmV7xx6fzNSIuoaeX/Wk/s+xPO4Fnn18ByXJQQDI/BST0czwXUEByw9YJCsjpJzd/RORn/3rF9FdvOLfoT1Z3CT397E9rA9mflvyFz6uAZDkoIJmfAhL6OZ4LKCC5YesEBWSc5L73Dzu73zHrqpcu8+17XULHyX7pTWS/tPjhz6eAZDkoIJmfAhL6OZ4LKCC5YesEBWSs5H716PxF6jvTD/9xmt74lyf79SEuoWNlv+Q2sl9S+7mfSwHJclBAMj8FJPRzPBdQQHLD1gkKyLjJfes7V+x+kfr/+O8nU0RcQsfN/qQ3k/1JC682XwFZzem5HqWAZH4KSOjneC6ggOSGrRMUkLGTmz8i8ovz3zHr6qvX/6lZLqFjZ3+S28n+JHVXn62ArG512CMVkMxPAQn9HM8FFJDcsHWCAtKR3PyT1e//8bO7rusjIi6hHdmfxJayPwnVo89UQI5udvCEApL5KSChn+O5gAKSG7ZOUEC6kpuLyPz1Iev4GSIuoV3Zr3Nb2a9T8/izFJDj280nFZDMTwEJ/RzPBRSQ3LB1ggLSmdz8U9Ufe2yannlmmm748+N9jYhLaGf269ha9utQzGcoIJmhApL5KSChn+O5gAKSG7ZOUEBak3t27/lrRL7/gzPTG17/H9PVVx3ttbiEHs1rkx4t+zHSVECyHBSQzE8BCf0czwUUkNywdYIC0prchXvv/QyR5z3v3MpFxCV0M7I/zquQ/XHU1n9GAclMFZDMTwEJ/RzPBRSQ3LB1ggLSmtzv7z1/fcgv/32afvPkNP3J9Zf/YYYuoZuT/VFfieyPKnYyj1dAMlcFJPNTQEI/x3MBBSQ3bJ2ggLQm99x7r/ods1xCNy/7VV+R7FeVOtnHKSCZrwKS+SkgoZ/juYACkhu2TlBAWpO7/N573zHruuum6VX/7fe/UN0l9PKGm/oI2Y+RrAKS5aCAZH4KSOjneC6ggOSGrRMUkNbkVt97/o5ZT/+/c9NTv9m54DtmuYSubrhpj5T9GIkqIFkOCkjmp4CEfo7nAgpIbtg6QQFpTe7oez/62M70s59N0x//8TRd+/Jzk0vo0Q035YTsx0hSAclyUEAyPwUk9HM8F1BAcsPWCQpIa3LH3/vBB6fpqfNfsP6Ka89Mr7zu/M8TefK3xx/mZKWAAjJGbApIloMCkvkpIKGf47mAApIbtk5QQFqTy/f+2b9cOf3RH+1MV/7B76azZ4/3wwzzLUw4DQEF5DTUf/85FZAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7z1fQh97fGf60f/53fTk+W/d+8a/VEJy1Y4JCsgYOSkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/74CV0/o5Z//v7z36R+vz1IX5ttoACMka+CkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL9z7sEjp/fcj//fcrple+UhHJhcedoICMkY0CkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9L3UJ/dWj0/STnygiufKYExSQMXJRQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+9yqX0Pt/vDPNZcTXh+TeI01YJfuR9t3UXRSQLFkFJPNTQEI/x3MBBSQ3bJ2ggLQml++96iV0/voQ3yUr9x5pwqrZj7TzJu6igGSpKiCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXLYygLy9ls+Nv30wZ/vyr3m+uumr911+2UVX/fmW/Yfc+vfvW267T3v2P3rhx5+6rJnl3jA3m+EUfZZ4jV7jmcFFJDtfScoINubvUuo7P0QytN9Dyggmf/WFZB3f+DT08OPPL5fOuYy8rJrXjJ98bMfOlTyvvsfmN71/k9MB0vHwQeOcuFXQLLfCM2nFZDm9LLdFZDMr/m0AtKcXra77DO/dZ1WQDLJrSsgb7r5tumDt75zuvmtN+3K3f2Ne6fP3Pnl6bt333Go5FxYXv6HV0+f+uj7Dv3nCkj2BnQ6F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWwVQVk76MZX/rcx6cbb3j1rtxhf+8g6fypV9dc9eLpkUef2P/bB88rINkb0OlcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMC8p+fYnWwVOyR7pWT2z/83v2PmHzkk5+f7vnm96YfffuuTN5pAgQIECBAgAABAlsooICsUEAuLifzR0X2SomPgGzh75rBXrKPgAwWyILr+AjIgtiDPZU/BR8skAXXkf2C2Jd4Kh8ByXLYqgIyUx31a0AOlo09agUke9M5vV4BBWS9nk3TFJCmtNa7q0voej2bpsl+jLQUkCyHrSsgl/suWPN3xZp/7X1r3vnxP3ng3/a/SH3+FKx7f3Df/l/7CEj2BnQ6F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWwdenFbmYAAB0sSURBVAVk5rrUzwG5uIDMj59LyPd/eP+u9PwF6Qe/Y5YCkr0Bnc4FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOWxlAcnILjytgKxT06zjCCggx1HbjDMKyGbkeJxX4RJ6HLXNOCP7MXJUQLIcFJDMz09CD/0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ8CEvo5ngsoILlh6wQFpDW5fG+X0NywdYLsx0hOAclyUEAyPwUk9HM8F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ8CEvo5ngsoILlh6wQFpDW5fG+X0NywdYLsx0hOAclyUEAyPwUk9HM8F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5bGUBefstH5t++uDPd+Vec/1109fuun0lxTu+8NXpzr+/Z7r9w++dbn7rTbtnHnr4qZXOnvSD9n4jjLLPSb9e8/9LQAHZ3neDArK92buEyv6xJ3+7vQgDvHIFJAth6wrIuz/w6enhRx7fLx1zGXnZNS+ZvvjZD11Sci4fX/n6t6ZHHn1CAcnec06vWUABWTNo0TgFpCisNa+qgKwZtGic7McISwHJcti6AvKmm2+bPnjrO/c/gnH3N+6dPnPnl6fv3n3Hc0rulY/5Ma978y0KSPaec3rNAgrImkGLxikgRWGteVWX0DWDFo2T/RhhKSBZDltVQO67/4HpXe//xPSlz318uvGGV+/KHfb3DpIeLB/z37+4gPzikaezBNZ0+tprzu5OGmWfNb0sY1YQeNHzz0zTzs7069/8boVHe8gmCbzwfPZXnM/+CdlvUqwrvZYXnD0zXXlmZ3r8Sb/vVwLboAfJfoww9+5dY2zTt4UCckgp2Yvx4vJxWAF55ty5IVKfLyHzr1H2GQJlS5bYmZ7N/tz5//m1XQKy3668D77a3ezP/9+5Qf4btL1JLP/KZb+8+WHPuHfvGmObvi0UkEsUkPnrRb7/w/sPTfXWv3vbdNt73uGL0Pve8xu3sU/B2rhIV35BPgVrZaqNe6BPw9m4SFd+QbJfmepEH+hTsDLerSogM9VxvgbkILGvAcnecE6vX0ABWb9py0QFpCWp9e/pErp+05aJsh8jKQUky2HrCsjlvgvW/F2x5l/P9a15FZDsDef0+gUUkPWbtkxUQFqSWv+eLqHrN22ZKPsxklJAshy2roDMXJf6OSAKSPaGcnp5AQVkefNRnlEBGSWJ5fdwCV3efJRnlP0YSSggWQ5bWUAysgtPj/KD//wgwnWm2jVLAenKa53bKiDr1Oya5RLaldc6t5X9OjWPP0sBOb7dfFIByfx8EXro53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ8CEvo5ngsoILlh6wQFpDW5fG+X0NywdYLsx0hOAclyUEAyPwUk9HM8F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky0EByfwUkNDP8VxAAckNWycoIK3J5Xu7hOaGrRNkP0ZyCkiWgwKS+SkgoZ/juYACkhu2TlBAWpPL93YJzQ1bJ8h+jOQUkCwHBSTzU0BCP8dzAQUkN2ydoIC0Jpfv7RKaG7ZOkP0YySkgWQ4KSOangIR+jucCCkhu2DpBAWlNLt/bJTQ3bJ0g+zGSU0CyHBSQzE8BCf0czwUUkNywdYIC0ppcvrdLaG7YOkH2YySngGQ5KCCZnwIS+jmeCygguWHrBAWkNbl8b5fQ3LB1guzHSE4ByXJQQDI/BST0czwXUEByw9YJCkhrcvneLqG5YesE2Y+RnAKS5aCAZH4KSOjneC6ggOSGrRMUkNbk8r1dQnPD1gmyHyM5BSTLQQHJ/BSQ0M/xXEAByQ1bJyggrcnle7uE5oatE2Q/RnIKSJaDApL5KSChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ8CEvo5ngsoILlh6wQFpDW5fG+X0NywdYLsx0hOAclyUEAyPwUk9HM8F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWggGR+Ckjo53guoIDkhq0TFJDW5PK9XUJzw9YJsh8jOQUky2ErC8jbb/nY9NMHf74r95rrr5u+dtftz6n47g98evr+D+/f/+cXP/6hh5/KEljT6b3fCKPss6aXZcwKAgrICkgb+hAFZEODXeFluYSugLShD5H9GMEqIFkOW1dA5kLx8COP75eOuYy87JqXTF/87IcOlXzTzbdN3737jv1/Nv/1Ta+/cfrUR9+3+/dGufArINlvhObTCkhzetnuCkjm13zaJbQ5vWx32Wd+6zqtgGSSW1dA5gLxwVvfOd381pt25e7+xr3TZ+788gUl41KkH/nk56d/+ud/3S8wCkj2BnQ6F1BAcsPWCQpIa3L53i6huWHrBNmPkZwCkuWwVQXkvvsfmN71/k9MX/rcx6cbb3j1rtxhf+9SpPNHTF77Z6/a/wjIL3/1dJbAmk6//Oqzu5NG2WdNL8uYFQRedPbKadqZpl8/9bsVHu0hmyQwX0R2ZL9Jka78Wl7wvCunM2em6Ynf+H2/MtqGPFD2YwS5d+8aY5u+LRSQQ0rJc8U4f/Tjnm9+b/rRt+/af8h/PHNuiNTPXHH+FnL+1yj7DIGyJUvMF9C5gZw7N8Z7cUvYh3iZc/Y75//3jOyHyGPJJWS/pPZYzyX7MfLYu3eNsU3fFgrIigXkji98dbrz7++54KMnc9w+BavvTb9pG/sUrE1LdPXX41OwVrfatEf6NJxNS3T117Ot2T/44LNG11+/utVJPtKnYGW6W1VAZqrjfA3IYR/52GNXQLI3oNO5gAKSG7ZOUEBak8v33tZLaC7XP0H2Y2SogGQ5bF0Budx3wZq/xmP+tfeteS/+64u5FZDsDeh0LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMth6wrIzHWpnwNysHDsfYH6YcS3f/i9u99JSwHJ3oBO5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIctrKAZGQXnlZA1qlp1nEEFJDjqG3GGQVkM3I8zqtwCT2O2mackf0YOSogWQ4KSObnIyChn+O5gAKSG7ZOUEBak8v3dgnNDVsnyH6M5BSQLAcFJPNTQEI/x3MBBSQ3bJ2ggLQml+/tEpobtk6Q/RjJKSBZDgpI5qeAhH6O5wIKSG7YOkEBaU0u39slNDdsnSD7MZJTQLIcFJDMTwEJ/RzPBRSQ3LB1ggLSmly+t0tobtg6QfZjJKeAZDkoIJmfAhL6OZ4LKCC5YesEBaQ1uXxvl9DcsHWC7MdITgHJclBAMj8FJPRzPBdQQHLD1gkKSGty+d4uoblh6wTZj5GcApLloIBkfgpI6Od4LqCA5IatExSQ1uTyvV1Cc8PWCbIfIzkFJMtBAcn8FJDQz/FcQAHJDVsnKCCtyeV7u4Tmhq0TZD9GcgpIloMCkvkpIKGf47mAApIbtk5QQFqTy/d2Cc0NWyfIfozkFJAsBwUk81NAQj/HcwEFJDdsnaCAtCaX7+0Smhu2TpD9GMkpIFkOCkjmp4CEfo7nAgpIbtg6QQFpTS7f2yU0N2ydIPsxklNAshwUkMxPAQn9HM8FFJDcsHWCAtKaXL63S2hu2DpB9mMkp4BkOSggmZ/TBAgQIECAAAECBAgcQUABOQKWhxIgQIAAAQIECBAgkAkoIJmf0wQIECBAgAABAgQIHEFAATkClocSIECAAAECBAgQIJAJKCCZ3xCn337Lx6afPvjz3V1ec/1109fuun2IvSyxPoGjZPzuD3x6+v4P799/cu+J9eVwGpOOkv3B/e74wlenO//+nun2D793uvmtN53G6p4zFDhO9q978y37z3rr371tuu097wi3cPw0BI6a/Ztuvm165NEn9lf90bfvOo21PSeBlQUUkJWpxnzgfNl8+JHH90vH/C+tl13zkumLn/3QmAvb6sgCR814/g/Rd+++Y/955r++6fU3Tp/66PuO/NwOnK7AUbPf23YuH1/5+rd2LyQKyOlmeNxnP2r2993/wPSu939iUjqOKz7OuaNmf/F/9y8+P84rswmB/xJQQMrfDfPl8oO3vnP/Tzjv/sa902fu/PIFF9Dyl7j166cZf+STn5/+6Z//1UfGCt9Jx8l+r3zMJXT+03AFpDD48ysfNfv50vnyP7zaHzR0xn3B1kfNfn783/7NX+9/tOvgvwM2gMNL2FABBaQ42L0/8frS5z4+3XjDq3dfyWF/r/glbv3q68h4/tOx1/7Zq1xMyt5Nx8n+4ouHAlIW+n+ue5zs56yvuerFF3wazsH/NnRKbN/Wx8l+/kOme775veltb3nj7r/n/Tt/+943ja9YAWlMLfiPVPHL3crVj/Mfo4NQe/9h8vnAfW+fo2Z/2J96KiB9uT/XHyRd6g+X9v7ZwY92+b2/HdkffL8cLKD+nd+Z/zZtrYAUp33UC0rxS93a1ZOM974I2Z+Cdr59jpr9xd984OCr9nUBXe+Bo2b/XOVEAe3K/Tjlcz5zcc7KZ1/u27ixAlKe+lE/V7T85W7l+sfJ2H+ANuOtcpzsD75yF9De98FRsz8sa/l35n+U7I9aVjtFbL2JAgpIeapH/W4Z5S93K9e/XMbz5/vOv/a+/fLFf72VaBvyoo+a/cUv2wW0941w1Oznx//kgX/b/wYk8x9C3PuD+3xDksK3wFGzn3+fv+Evbtj/7peyLwx9C1dWQDYg9KN+v/ANeMlb9xIulfHBwrH3p2GHAfluSJ1vm1WzP+zVKSCdme9tfdTsD34a3vz1AAe/HXe3xPZtf9TsD/78F9lv3/ul8RUrII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqYACUhqctQkQIECAAAECBAg0CiggjanZmQABAgQIECBAgECpgAJSGpy1CRAgQIAAAQIECDQKKCCNqdmZAAECBAgQIECAQKmAAlIanLUJECBAgAABAgQINAooII2p2ZkAAQIECBAgQIBAqcD/B/RnyIzi7L/OAAAAAElFTkSuQmCC", "text/html": [ - "
\n", + " " + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plotly.com" + }, + "data": [ + { + "dimensions": [ + { + "label": "0", + "values": [ + 2.5145480011594884e-33, + 0.384901918386449, + 0.24305814435560943, + 0.38085611020096105, + 0.6684784265898254, + 2.048910495236387e-17, + 2.3355490609461924e-17, + 0.37662828009974475, + 6.133887364400373e-17, + 0.38549052955946844, + 0.3019246968056084, + 0.37891017110082964, + 1.8500623569283723e-17, + 5.109613790882294e-17, + 0.8350713588396989, + 0.9763956763410305, + 0.6697213212836423, + 0.9732892539675633, + 0.3015630523799032, + 0.30094316739636734, + 5.268912276456174e-06, + 2.053187436825009e-17, + 3.3914382639756e-17, + 0.9081910176810469, + 0.8360103989920105, + 3.1334105509450554e-33, + 0.30099851668558164, + 1.4829450683445403e-17, + 1.5128524052843544e-19, + 4.101832928744045e-17, + 0.33584166546377897, + 1.8578357055198296e-17, + 5.963240120313745e-17, + 9.708229893786912e-18, + 0.3819168996022643, + 9.163104293497902e-34, + 0.34135444236234597, + 0.3398463959334143, + 4.109749651827473e-17, + 0.3831588744478954, + 0.9095154943789079, + 1.8641757316913187e-17, + 6.13035019249314e-17, + 5.566658421790186e-17, + 0.5542009313254979, + 0.30526007802292965, + 3.653996915554397e-33, + 0.6677652451063221, + 3.412605036372378e-17, + 0.29602912888990957, + 0.6726047159394002, + 0.2999365745584653, + 0.6682917213794158, + 0.44354455864862663, + 1.0017719391155075, + 0.6697670498429056, + 2.3552590934991478e-17, + 5.954593424977486e-17, + 0.3058100551110263, + 3.132954363532496e-33, + 0.38183489840048307, + 0.7584320461986945, + 0.7698525157982057, + 1.4540180396866892e-17, + 0.6737675647080623, + 0.38167086530608724, + 0.33744838971468893, + 0.2385262405649654, + 0.6691400642329315, + 2.061022015438644e-17, + 1.830684230285113e-17, + 2.37558465568577e-05, + 0.9089743476184237, + 0.3329921780015298, + 0.00089851388299033, + 5.123596302226045e-17, + 0.30297085416035535, + 0.8392420911127763, + 0.3836516213396321, + 0.0001431080970221159, + 4.1093504902338105e-17, + 5.975702089684614e-17, + 0.6680918756090959, + 0.6705225306390169, + 0.3773262826933476, + 4.1092756716978843e-17, + 0.23860235314977052, + 4.70974074262358e-19, + 5.549883789838917e-17, + 0.9089827123654605, + 0.3016891368105771, + 2.5244485265298175e-18, + 0.378893713754844, + 0.6698939866183388, + 1.8534120861830296e-17, + 2.3189146072012528e-17, + 0.30044135959822593, + 1.4821730192299045e-17, + 5.954408996878002e-17, + 0.2441285243621788, + 0.5581664587389367, + 1.5675844096895604e-17, + 0.9718229237100835, + 0.7601791764686479, + 0.761342019665136, + 5.982223793519246e-19, + 0.00040118040499191935, + 0.33264046629768984, + 4.117351341199903e-17, + 1.8685347500421718e-17, + 7.1021565255823415e-40, + 0.9162243941551383, + 3.64552018097226e-33, + 1.3605834357389584e-18, + 0.8349746987207827, + 0.5545474945292206, + 0.6698188821147867, + 0.5563102256189548, + 0.9063565149831696, + 5.107092722267473e-17, + 1.5231413853562512e-21, + 0.3036757646552103, + 5.570591680339526e-17, + 0.9740410989523056, + 1.5032236650678942e-17, + 3.429027882909457e-17 + ] + }, + { + "label": "1", + "values": [ + 4.106568527203459e-17, + 0.7622303604292567, + 0, + 0.38127455398683163, + 0, + 0.33461247710979497, + 0.38142410735442944, + 0.38174407612429484, + 1.001739827135632, + 0.3797605885859188, + 0, + 0.381887688707364, + 0.3021381116933401, + 0.8344632582128669, + 0.5567211808470623, + 0, + 0.670137631010382, + 0.24287459211020432, + 0.9083013536311778, + 0.30286879644746556, + 1.3752264327252823e-07, + 0.33531095467762917, + 0.553863900405709, + 0, + 0, + 5.117247769931148e-17, + 0, + 0.24218330858775353, + 7.578579162129208e-21, + 0.6698801534613736, + 0.6701445268378928, + 0.3034075958575692, + 2.5944128736492937e-19, + 1.7950109984994115e-17, + 0, + 1.496448494354064e-17, + 5.307618169764418e-07, + 0.6753498957876564, + 0.6711730524570579, + 0.3803756126410241, + 0.3019016683336644, + 0.3044430007066904, + 0, + 0.9091043108373632, + 0, + 0, + 5.967429822375647e-17, + 0.3334230101475297, + 0.5573206966691729, + 0.9115428949688953, + 0.33682647064918736, + 0, + 0, + 0.4430597979150938, + 0, + 0, + 0.3846429999472454, + 0.9724589047427071, + 0.9175567947289265, + 5.116502759348706e-17, + 0.7617015940840337, + 6.705550170932175e-05, + 0.38694749661952726, + 0.23745916629987243, + 0.3353366253022659, + 0, + 0.6710775683977888, + 0, + 0.6688236855310402, + 0.3365904384633362, + 0.2989734234490642, + 2.502384953176678e-06, + 0.30162213980593744, + 0, + 0.6693174392032448, + 0.8367467756080026, + 0, + 0, + 0.38979023971014864, + 7.234724453131023e-05, + 0.6711078644217908, + 0.9759062113002919, + 0.6688210942461738, + 0, + 0, + 0.6710956456276083, + 0.9744157299771814, + 5.682174701589799e-17, + 0.9063648055427838, + 0, + 0, + 3.3987294230764455e-17, + 0.7583833938269067, + 0, + 0.3026851639956022, + 0.3787074948982468, + 0.30203338971802496, + 4.022834703317344e-20, + 0.9724287853483459, + 0, + 0, + 1.9201823535457036e-20, + 0.00011919946024019343, + 0.38344082793201445, + 0.38056545276645976, + 6.13773213440946e-17, + 0.7618794298663307, + 0, + 0.672414502543356, + 0.3051548824270179, + 3.152102190535145e-33, + 0.3056363840751314, + 5.953586264236209e-17, + 2.7523827422438177e-21, + 0, + 0.8361812900047259, + 0, + 0, + 0, + 0.8340515364631222, + 4.0959123472578834e-17, + 0.303284338767353, + 0.9097466607054358, + 8.669625747603507e-05, + 0.24549505475611372, + 0.5600027510457513 + ] + }, + { + "label": "2", + "values": [ + 0.6706535353805869, + 0.3818598480883347, + 0.9779485842843456, + 0, + 0.3339877796054491, + 0.671891412548264, + 0.3809294677484364, + 0.7617407561923203, + 0, + 0.3734509602094207, + 0.30364247452715787, + 0.7641083696230874, + 0.91229289897544, + 0, + 0, + 0, + 0, + 0, + 0, + 1.4750969860360369e-05, + 0.3045341471438795, + 1.120110317244063e-11, + 0, + 0.30075887600397233, + 0.5584485081006062, + 0.835709981603508, + 0.9078558836625495, + 0, + 1.4883684504774466e-17, + 0.32994749250811245, + 0, + 0.3025066181281772, + 0.9738802341139242, + 0.3332758137661961, + 0.37727895876366885, + 0.2443885854102505, + 0.6724418000692121, + 0, + 0.6716854039477083, + 0.0011663730507952984, + 0, + 0, + 7.810663348072324e-21, + 0.30376999981236846, + 0, + 0, + 6.2358078210672855e-19, + 0.001002102709987248, + 0, + 0.3022066831499138, + 0, + 0, + 0.6684866128460935, + 0.4549901798651011, + 0, + 2.040490538838719e-08, + 0.7631060330643716, + 0, + 0, + 0.8355883121420828, + 0.0010316861226885947, + 0.38067024343097056, + 0.3787238874126692, + 0, + 0.672989132496112, + 0.7585705495193236, + 0.6738014448174487, + 0, + 0.33289138350240793, + 0.6675118710062506, + 0, + 0.30595826175865604, + 0, + 0.6714419851085051, + 0.667311110997135, + 0, + 0.9071172484780564, + 2.0293189762290517e-13, + 0.38472547645599264, + 0.557890045823639, + 0.335522698493594, + 0.2440739029806477, + 0, + 4.911755505094038e-05, + 0.37939339517017406, + 6.476791168348605e-07, + 0, + 0.9280014267845312, + 0, + 2.8775481311448842e-08, + 0.30335619919292656, + 0.5565836382838734, + 0.3846165214637913, + 0.6704643077291945, + 0.9112819226885367, + 0.37822265134559496, + 0, + 0.2420581149655808, + 3.11943153884516e-11, + 0, + 0, + 3.029263135361261e-17, + 0.243235016044039, + 0, + 0.37728286461152977, + 1.0024153355128573, + 0.38447309789179523, + 0, + 2.0584988748496167e-08, + 0.30313987329480857, + 5.1477735339362194e-17, + 0.3045357960870269, + 0.9722944229113778, + 6.127041938983577e-17, + 0, + 0, + 0.33300224412368806, + 0.8345731624461845, + 0.30301073659612143, + 0.5563801008432437, + 0.6689132495902752, + 0.9088561689638509, + 0.2987200940327218, + 0, + 0.9763417466289364, + 0.8380315213615325 + ] + }, + { + "label": "3", + "values": [ + 0.6722025341910578, + 0.37233695346612056, + 0, + 0.7633107507053176, + 0.6708179715280775, + 4.9945790040522354e-09, + 0.7593823817716205, + 0, + 0, + 0.7655283387470887, + 0, + 0.38119109636732973, + 0.30418434461671756, + 0.5557266672963868, + 0, + 0.24367722436888747, + 0.33448386228900656, + 0, + 0.305432385768423, + 0, + 0.9116394956740691, + 0.6698680747165844, + 0.8347349224321696, + 0, + 0, + 0, + 0.2994468342244929, + 0, + 0.24308160188226693, + 0.6710675604751569, + 0, + 0.001069184400149319, + 0, + 0.674161902695352, + 0.7570820141123537, + 0, + 0.6743578240509494, + 0.6762901205750358, + 0, + 0.3823409016560067, + 0.30233042031703455, + 0.3029481499187936, + 0, + 0, + 0.8338762996937836, + 0.3076591097931843, + 0.9746084553401987, + 0, + 0, + 0, + 0.6695001788803909, + 0.9081956238946071, + 0.3358312549381567, + 0.44909892811022556, + 0, + 0.6717250637824351, + 0.37959527400983945, + 0.24325292632755602, + 0, + 0.5570257282713619, + 0.3796990498785911, + 0.3805894625659633, + 0.38037765960410347, + 0.9735313431881528, + 0, + 0.38024807637988184, + 0, + 0.9734453093549195, + 0, + 0.672798213990518, + 0.9085811443409636, + 0.3074245345983466, + 0, + 0, + 0.33593315772777016, + 0, + 0, + 0, + 0.0010626636932018576, + 0.8370330150795288, + 0.0015488719555464081, + 0, + 0, + 0.33314796772321037, + 0.3806472824388774, + 0.3324533840987789, + 0, + 0.30902605226935514, + 0.3022435850790924, + 0.30269103189570973, + 0.9074438252730206, + 0, + 0, + 0, + 0, + 0.38367019258157564, + 0.9079674980294694, + 0.9733626993060012, + 0, + 0, + 8.1505368939888e-08, + 0.557030773475845, + 0, + 0.3832581619201262, + 0.002328686822045946, + 0, + 0.3819288536978428, + 0.670533054330794, + 0.6695581528588456, + 0.9129671703169929, + 0.8406952171875679, + 0, + 0.2427457843582645, + 1.0008685660084196, + 0.5569032357837865, + 3.136460515277023e-05, + 0, + 0, + 0.3017744992100304, + 0, + 0.33387656923780423, + 0, + 0.30060334438580044, + 0, + 0, + 0 + ] + }, + { + "label": "4", + "values": [ + 0.3335231963802626, + 4.855121721642774e-05, + 0, + 0.3823509495710601, + 0, + 0.6710699316843917, + 0.37721170044902363, + 0.379318187562345, + 0, + 0, + 0.9077833592485152, + 0.0005958635224639075, + 0, + 0, + 0, + 0, + 0, + 1.3481713145342611e-06, + 1.6060366796663546e-06, + 0.9074498440942897, + 0.30203295377202927, + 0.668984813370741, + 0, + 0.3022549770227314, + 0, + 0.5568739934348603, + 0, + 0.9775140858404265, + 0.9742359987136806, + 0, + 0.6716960840914084, + 0.9079883111801733, + 0.24338758955113435, + 0.6725916292410457, + 0.3774357974707894, + 0.9750955426135139, + 0, + 0, + 0.33578951711765226, + 0.7602187778327066, + 0, + 0.9057942973926261, + 1.0011621712511818, + 0.3043666288208056, + 0, + 0.9099939133790967, + 0.2435231998174185, + 0.6676897984385757, + 0.8346876870358648, + 0, + 0.0001228087496341862, + 0.3016933836356472, + 0, + 0.4517007874511679, + 0, + 0.3346086096919855, + 0.3809739611054312, + 0, + 0.30440671818497533, + 0, + 0.3809899658805493, + 0.3779752181491704, + 0, + 0, + 0, + 0.37818621484677245, + 0, + 0, + 0, + 0, + 0.30205543630523934, + 0.9083371023172905, + 0.3001238249181435, + 0.6674720853444452, + 0, + 0.5575600258341863, + 0.3036693099687446, + 0.5593151386759923, + 0.7691414660543033, + 0, + 0.6702344598038964, + 0, + 0.3340193686187243, + 0.6714695239218621, + 0.759151407294496, + 0.6691999747412044, + 0, + 0.3096956427592386, + 0.30055468729835605, + 0.30333875572746155, + 0, + 0.835297233220135, + 0.37947967639827657, + 0.33973012836017547, + 0.30465143262401706, + 0.7579459103502963, + 0, + 0, + 0.2406520818556934, + 0.9755898018468125, + 0.8358394047432979, + 0.836729705140153, + 0, + 0.3774083127048792, + 0.38012630946543186, + 0, + 0.3790476899928798, + 0.6695535762342704, + 0.3338566977322679, + 0, + 0.5584879493434204, + 0, + 0, + 0, + 0, + 0, + 0.6672422191703645, + 0, + 0, + 0, + 0.6687777133015945, + 0, + 0, + 0.23995136365499853, + 0, + 0 + ] + } + ], + "domain": { + "x": [ + 0, + 1 + ], + "y": [ + 0, + 1 + ] + }, + "name": "", + "type": "parcoords" + } + ], + "layout": { + "legend": { + "tracegroupgap": 0 + }, + "margin": { + "t": 60 + }, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + } + } + }, + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "#problem = multiple_clutch_brakes()\n", + "problem = test_problem_builder(name=\"DTLZ2\", n_of_variables=15, n_of_objectives=5)\n", + "general_evolver = NSGAIII(problem, interact=False, n_iterations=5, n_gen_per_iter=100)\n", + "\n", + "\n", + "general_evolver.start()\n", + "while general_evolver.continue_evolution():\n", + " general_evolver.iterate()\n", + " print(f\"Running iteration {general_evolver._iteration_counter}\")\n", + "\n", + "objectives = general_evolver.population.objectives\n", + "\n", + "pcc(objectives)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "5fb4eda7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0.2 0.7 0. 0. 0. ]\n", + "Running iteration 1\n", + "Running iteration 2\n", + "Running iteration 3\n", + "Running iteration 4\n", + "Running iteration 5\n", + "Running iteration 6\n", + "Running iteration 7\n", + "Running iteration 8\n", + "Running iteration 9\n", + "Running iteration 10\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/giomara/EMO2022/desdeo-emo/.venv/lib/python3.8/site-packages/plotly/express/_core.py:279: FutureWarning:\n", + "\n", + "iteritems is deprecated and will be removed in a future version. Use .items instead.\n", + "\n" + ] + }, + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plotly.com" + }, + "data": [ + { + "dimensions": [ + { + "label": "0", + "values": [ + 0.2853507621577206, + 0.24758072939141276, + 0.2572880046552738, + 0.3702869132986019, + 0.3344286447651211, + 1.4037640420892137e-17, + 0.2961110067068138, + 0.2964406274759657, + 1.0000564815946287, + 0.24731196675703881, + 6.123550600098711e-17, + 0.25766202062983345, + 0.2572048387774734, + 0.24730366673522766, + 0.22934762670224323, + 0.2959857206150315, + 0.33437619160820453, + 0.25722186496992466, + 0.2573670212184895, + 0.2562363175771634, + 0.24718084849046273, + 0.29693893971724544, + 0.321579941666219, + 0.2565060999089882, + 0.2966269396693705, + 0.23820578726134153, + 1.7849417971402597e-17, + 0.3342354866516267, + 0.2850589755726835, + 6.12110303890707e-17, + 0.2382529654962746, + 0.24735462007230055, + 0.2856448343750128, + 0.27476837005576377, + 0.2383458738004509, + 0.24946515155025276, + 0.2969332207722239, + 0.3309883824062357, + 0.24025324042788435, + 0.25655974523858543, + 0.2006705521955799, + 0.2245630801300912, + 0.32146381963186366, + 0.25772594524212183, + 0.2976636856006247, + 0.24794971562431428, + 0.28505299636522274, + 0.24821684505204689, + 0.2551869668835031, + 0.2272085745138896, + 0.0004855918642244998, + 1.4389290020351058e-17, + 0.255531081264954, + 0.25721870347326087, + 0.24678831554153036, + 0.27430518274207627, + 0.29636679399062726, + 0.238005740645704, + 0.2557057653392873, + 0.3206089665029282, + 0.25745466736749734, + 0.25764222599201736, + 0.26987546660109485, + 0.29693980921670465, + 0.25787870705222854, + 0.2953739765246295, + 0.334569005957736, + 0.2896367145257792, + 0.3485462701464428, + 0.3162898438500313, + 6.127707338462459e-17, + 0.22802843078409787, + 1.564517379687442e-17, + 0.45278319081164936, + 0.23767066491546146 + ] + }, + { + "label": "1", + "values": [ + 0.9584436524458989, + 0.9687403883838938, + 0.9644743326994154, + 0.9290868116330947, + 0.942462702980667, + 5.960586910136201e-17, + 0.9536457025803807, + 0.9534829484944524, + 0, + 0.967975844100183, + 1.000051705416152, + 0.9658998324794476, + 0.9645112699873462, + 0.9674838191899152, + 0.9735065916455816, + 0.9535404380285059, + 0.9423679233757862, + 0.9645238711073248, + 0.9644534730953647, + 0.9631747232403709, + 0.9689306463582682, + 0.954813664083898, + 0.9470194290777338, + 0.9614389861550299, + 0.9548368605066198, + 0.9712526957979399, + 5.857706771024034e-17, + 0.9424865911458519, + 0.9584703474751393, + 8.632800398215804e-19, + 0.9712666480260715, + 0.9675884666280126, + 0.9581701584304454, + 0.9617068620244767, + 0.973075442734336, + 0.9680895958446883, + 0.9547226376943395, + 0.9456629576113081, + 0.9710050130509977, + 0.9614021081588541, + 0.9816496064057612, + 0.9715733682819223, + 0.9429858219183687, + 0.9660102432011052, + 0.9548627500562517, + 0.9691804378739542, + 0.9585469907477039, + 0.9677998888159131, + 0.9646143042614276, + 0.9744340713555868, + 0.9962887608583049, + 5.95282252788196e-17, + 0.9624355508752993, + 0.9644806178381987, + 0.9696555948627963, + 0.9599423541271108, + 0.9551823087600696, + 0.9703661115863558, + 0.9613298363933125, + 0.9473327309125196, + 0.9647457455593049, + 0.9644002796582491, + 0.9631734401325897, + 0.9548176722571079, + 0.9662710709671309, + 0.9519314927281367, + 0.9466405825622024, + 0.9632542702066047, + 0.9367940517170279, + 0.9384425911085719, + 2.2338896427770294e-18, + 0.9713753648308859, + 5.924231652158653e-17, + 0.9092374450814482, + 0.9720763423510388 + ] + }, + { + "label": "2", + "values": [ + 0, + 0.016131608362986216, + 0.059490497872205575, + 0, + 0, + 1.0000687450813186, + 0.05861347589411669, + 1.1483891024583846e-05, + 0, + 0.05726132844037993, + 0, + 0.016891729487083847, + 0, + 0, + 0, + 0, + 0.016467487308380893, + 0.01695721134489387, + 0, + 0.00011127182889027113, + 0, + 0.016744525906103248, + 0, + 0, + 0.016767912397339126, + 0.01614765639465321, + 2.4943000105510562e-22, + 0, + 0.00011294287239147208, + 1.0255723110680377e-18, + 0.0006063880108560559, + 0, + 0.015487631102477827, + 0, + 0.0001445798587424829, + 0.016306124971616494, + 0.015468895239446941, + 4.117945876410016e-05, + 0.021045789286096794, + 0.10126388679889005, + 0.015862466350340165, + 0.01661694140742654, + 0.1024614219041982, + 0.01733557208871761, + 1.178419815812434e-05, + 0.0209100567577415, + 0.015475846742431756, + 0.015127482144002564, + 0.02836618131112169, + 0.016536427718831168, + 0, + 1.0124614132059813e-18, + 0.05945260192319412, + 0.059890733814527086, + 0, + 0, + 0, + 0.05670172245295324, + 0.020799362750851397, + 0.0003961347130760296, + 0.0014968949803228456, + 0, + 0, + 0, + 0.0003410265869724726, + 0.05906014453716297, + 0.01621684204817111, + 0.015808244313502993, + 0.01732998943875058, + 0.10383214104934725, + 1.0345717141762647e-18, + 0.05874306397205551, + 1.0006697594487615, + 1.0855374103952432e-05, + 3.2830372713013433e-06 + ] + }, + { + "label": "3", + "values": [ + 0.016154339424177676, + 0, + 7.709253413049487e-06, + 0, + 0.016305862467035647, + 0, + 0, + 0, + 0, + 1.8718812036634423e-05, + 0, + 0.016833422008928997, + 0.01664459183713012, + 7.757934467542551e-06, + 0, + 0.05875155647344817, + 0, + 0.059276902699932375, + 0.059267574606135756, + 0.10149146013949868, + 0.01630590139125997, + 5.936210061327687e-05, + 0, + 0, + 0.016902756590253552, + 0, + 1.0000633583870986, + 0, + 0, + 1.0493829440634124e-18, + 0, + 0.05686893876955573, + 5.952134404573911e-05, + 7.761209021203553e-06, + 0.015251671757052922, + 0.017460284851252707, + 0.017123979829540427, + 0.016088137474520703, + 0.015237474157315207, + 3.6120244426515672e-06, + 0, + 0.016731209523534837, + 0, + 0, + 0.016468733302295584, + 0.015991530081861695, + 1.4663630758581508e-05, + 0.05598025206865708, + 0.017995199342371277, + 0, + 7.559968528152302e-06, + 1.0003049214964066, + 0.06936841491799235, + 0.016673947006957124, + 0.01585782769189773, + 0.059802975345613935, + 0.005144573687478253, + 1.4838189448713688e-07, + 0.10237201942989825, + 7.495287307367951e-06, + 0.058105423864155216, + 0.01665447836215329, + 4.889951783444262e-05, + 0.016983329431882715, + 4.403714869513684e-05, + 0.058216978371252164, + 0, + 2.946884565463137e-06, + 0, + 0.011103323235470293, + 1.0492215011756459e-18, + 0.016531852938565576, + 6.952725050179032e-05, + 0.01588831057876957, + 0 + ] + }, + { + "label": "4", + "values": [ + 0, + 0.016273349416177113, + 0.017290560160519188, + 0, + 0, + 0, + 0.0001595964870773995, + 0.05870383664371375, + 0, + 4.825591358483462e-05, + 0, + 0.017339537137009673, + 0.058937251452624184, + 0.057125843276912926, + 0, + 0, + 0, + 0, + 0.01696682566343196, + 0, + 0.016453161377903085, + 0.01660058646118783, + 0, + 0.10121737222031355, + 0, + 0.0004281307699378788, + 0, + 0.015623393861392292, + 0.015858987577571988, + 1.0000385441223076, + 0.01586028505478502, + 0, + 0.01689447202244262, + 0, + 0, + 0.01627718198746015, + 0, + 0, + 0, + 0, + 0.0004280407061778706, + 0.07184393849325892, + 0, + 0.059309217418494244, + 0.016494378913397785, + 1.27777243875212e-06, + 0.00014632206615216935, + 0.016813711134882142, + 0.0721469391221567, + 0.001050944341742571, + 0.11052888740261817, + 0.0004266035469331617, + 0.01751036206462492, + 0, + 0, + 0, + 0, + 0.00023310830964861482, + 0, + 0.0001700857180430078, + 2.3554149624573014e-05, + 0.05853359360157474, + 0.015472672819040025, + 0.016610500110500755, + 0.017534750056353778, + 0, + 0.0004242791565838384, + 1.4048073298326534e-07, + 0.0577516814832853, + 0.10699391288426979, + 1.0016844157836948, + 0.02973023567251369, + 0.00041043961418548515, + 0.058311631431921196, + 0.007272435645412066 + ] + } + ], + "domain": { + "x": [ + 0, + 1 + ], + "y": [ + 0, + 1 + ] + }, + "name": "", + "type": "parcoords" + } + ], + "layout": { + "legend": { + "tracegroupgap": 0 + }, + "margin": { + "t": 60 + }, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + } + } + }, + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "ranges = np.array([[0.2,0.7,0.0,0.0,0.0],\n", + " [0.4,1.0,0.4,0.4,0.8]])\n", + "\n", + "print(UPEMO(ranges))\n", + "\n", + "objectives = rnsga3(ranges)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "8685bc18", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2.38205787e-01 9.71252696e-01 1.61476564e-02 0.00000000e+00\n", + " 4.28130770e-04]\n", + "[[0.2 0.7 0. 0. 0. ]\n", + " [0.4 1. 0.4 0.4 0.8]]\n", + "Running iteration 1\n", + "Running iteration 2\n", + "Running iteration 3\n", + "Running iteration 4\n", + "Running iteration 5\n", + "Running iteration 6\n", + "Running iteration 7\n", + "Running iteration 8\n", + "Running iteration 9\n", + "Running iteration 10\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/giomara/EMO2022/desdeo-emo/.venv/lib/python3.8/site-packages/plotly/express/_core.py:279: FutureWarning:\n", + "\n", + "iteritems is deprecated and will be removed in a future version. Use .items instead.\n", + "\n" + ] + }, + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plotly.com" + }, + "data": [ + { + "dimensions": [ + { + "label": "0", + "values": [ + 0.21956176438110242, + 0.21971731288613086, + 0.2113615369906323, + 0.25905214073087046, + 0.2034775731175876, + 0.29764647782680415, + 0.25961194138337346, + 1.2396198050791017e-17, + 0.211112795928084, + 0.24934694592926146, + 1.2970665835967682e-17, + 0.21100630145007795, + 0.2590609332308025, + 0.2034456841163183, + 0.2402962422083574, + 0.25849203831615747, + 0.19578668122924428, + 0.25925393210977954, + 6.123744844060644e-17, + 0.21956063942799042, + 0.21124864252150316, + 0.2190119939733477, + 0.2198999737173413, + 0.2981459080437536, + 0.2594337267641697, + 0.24937050420802778, + 0.2861185240058057, + 1.0001261861386666, + 0.2196469241288946, + 0.21152890030377824, + 0.2189497354844849, + 0.2617028567184007, + 0.22153365136671155, + 0.25038716016378554, + 0.22003383544512764, + 1.309827225398352e-17, + 0.33534727741670933, + 0.211551753295338, + 0.21944421104857728, + 0.21932541933049018, + 0.2530359671385349, + 0.21093454582249258, + 0.25350121033535306, + 0.22149190307372243, + 0.297244482119741, + 0.20364225395409338, + 0.2192275254957154, + 0.2977417058557551, + 0.2107633751140339, + 0.21994170123589143, + 0.2579797644692365, + 0.9983655687858187, + 0.22093579300594024, + 0.30604319728848395, + 0.34554171435821085, + 0.2189554451018077, + 0.2197911184336665, + 0.21933355599730994, + 1.3451957551056174e-17, + 0.28462468236699606, + 0.026393024475124375, + 0.2030396033324089, + 0.295177309776377, + 0.2597146851370958, + 0.2063882499276081, + 0.011442807312607118, + 0.2596568716994102, + 0.186419143556838, + 0.26393933433178746, + 0.08914598392911013, + 0.20522299058297294, + 0.24759401820833984, + 0.29802321696159473, + 0.24204820071915575, + 0.2958517104035985 + ] + }, + { + "label": "1", + "values": [ + 0.9727805999571975, + 0.9734212955131272, + 0.9769407971935656, + 0.9643458916195423, + 0.9787506694323685, + 0.9546436309871443, + 0.9652734712777375, + 5.996906028538684e-17, + 0.9750523985198007, + 0.9684188757004496, + 5.983836977509971e-17, + 0.9762778632263829, + 0.9643513461076473, + 0.9790695340024983, + 0.9708919323641556, + 0.9635325321288025, + 0.9807412801956062, + 0.9653907643752846, + 1.0000834278625041, + 0.9727813007390096, + 0.9768935989568046, + 0.971828917820908, + 0.9748133036481409, + 0.9545179278173588, + 0.9655638976990321, + 0.9684103990292471, + 0.9583271418096392, + 0, + 0.9734367608401717, + 0.9771702915991829, + 0.9691083646651067, + 0.9650159658448196, + 0.9754032733683292, + 0.9706507729664863, + 0.9737819051958653, + 5.981833559743479e-17, + 0.9450572990335016, + 0.9770788020877462, + 0.9719631690763356, + 0.9739160983260948, + 0.9658557405785761, + 0.9759174469438464, + 0.9672853452216913, + 0.9758300143539751, + 0.9470291201881975, + 0.9803906493719577, + 0.9739011002797628, + 0.9543185868120965, + 0.974617336788706, + 0.9750767018351149, + 0.9658314089065011, + 0, + 0.9747487282221173, + 0.9505471411961895, + 0.9383741796266918, + 0.9712664022277945, + 0.9759841592714974, + 0.9710774930926425, + 5.976915887588422e-17, + 0.9583389311617284, + 0.12506464704415418, + 0.9762978221771059, + 0.9557399936437879, + 0.9680259749664089, + 0.9770010691536403, + 0.05060676515498837, + 0.9643436903236062, + 0.9968020372087697, + 0.9653532227593108, + 0.9957821025423523, + 0.9789213945170737, + 0.9683200115801922, + 0.9545756760170555, + 0.9393566587842307, + 0.9524453201775137 + ] + }, + { + "label": "2", + "values": [ + 0.07529251482319482, + 0.033244969410467264, + 0.03212518189141407, + 0, + 0.030916147441252095, + 0, + 0.03311049314124218, + 1.0000739372373713, + 0.07228000533617854, + 0, + 0, + 0, + 8.947607232491577e-05, + 0, + 0, + 0.07458621676854219, + 0, + 0.03258756928019038, + 0, + 0.07503721098503514, + 0.031999755216452305, + 0, + 0.03322054387370241, + 0, + 0, + 0, + 0, + 0, + 0.03338333881374783, + 0, + 0.11639375393492164, + 0, + 0, + 0.03151448866713879, + 0.02010222265231268, + 8.042323671703377e-22, + 0, + 0.03239809658082363, + 0, + 0, + 0, + 0, + 0, + 0, + 0.16046538749837078, + 0, + 0.03201074592317511, + 0.03223727217405764, + 0.07571739740434226, + 0.0005095580139804912, + 0.029192854975212963, + 6.737169333888148e-05, + 0.0317604707416712, + 0.03199531029859954, + 0, + 0.07405970185066328, + 0.03279698431772236, + 0.02587838158691125, + 2.7216467640832788e-24, + 0.03124498869196948, + 0.992933896147401, + 0.0779591102396623, + 0, + 8.337466014031293e-05, + 0.0015005212347923675, + 0, + 0, + 0.03219453436188099, + 0.03202674242748873, + 0, + 0, + 0.030943832163353203, + 0, + 0.03071474995748532, + 0.07638844991965055 + ] + }, + { + "label": "3", + "values": [ + 5.85480123883428e-05, + 0, + 0, + 0, + 0, + 0.016287448190070317, + 0.01663049033786591, + 0, + 4.739029848828855e-05, + 1.980290973658281e-07, + 1.0163034947118736e-18, + 0.056382929185630336, + 0.058164818784317514, + 0, + 0, + 0, + 0, + 2.2597917273363686e-07, + 0, + 0.01609401802985453, + 0.016233102236146862, + 0.10024287273914839, + 0.01683517564784808, + 5.725239589374754e-05, + 0.01644413391512346, + 0.01617058634811684, + 0, + 0, + 0.058849174434844526, + 0.0159700869043237, + 1.7073616319586808e-06, + 0.016446682329506853, + 0.0013727712774378481, + 0, + 0.05843522039984015, + 1.0000531475897354, + 6.560885644763665e-08, + 4.865379281406412e-05, + 1.1442203924771678e-10, + 0.01654026865559108, + 0, + 6.707675723332179e-05, + 0, + 0.0014211671621449614, + 0, + 0.015637071867962066, + 0.05934497892590528, + 0, + 7.134680929806383e-05, + 0.01538331205184535, + 0.01387466872177667, + 0.056857244924641, + 0, + 0.060641644026167625, + 0.016451250151822, + 0.0003361682720942325, + 0.016400907684027764, + 0.0008644688496349607, + 0, + 0.013843709998551956, + 0, + 0.01607147599273936, + 0.001501231448494786, + 0, + 0.05884993191128969, + 0.9988025304912966, + 0.05495235996370381, + 0.060244559792969464, + 0, + 0.016635434165392197, + 1.9618114162566196e-07, + 1.8865468918144962e-09, + 5.544173424687055e-05, + 0.24597051206727774, + 0.0009011002669216128 + ] + }, + { + "label": "4", + "values": [ + 0.017203350416901995, + 0.05888040066835755, + 0.016453378705456687, + 0.0585750718274053, + 0, + 0, + 0, + 0, + 0, + 0.016464415598142698, + 1.0000669532423605, + 2.2042776226109616e-07, + 0, + 0.015991547294991113, + 5.2502343229473685e-05, + 0, + 0, + 0.01649954540964314, + 0, + 0, + 1.8883998012965762e-10, + 0, + 0.01736829716079278, + 0.01641562848518145, + 0.016945054527683315, + 0, + 0, + 0, + 0, + 0.016047237840487085, + 5.538210884767603e-05, + 0.016954458564436205, + 0.010787014685771075, + 0, + 0.01645640791756021, + 0, + 0.00016654172874537386, + 4.446266968511249e-05, + 0.1008901426660099, + 0.058885394858008255, + 0.05728319746446103, + 0.056785882952959804, + 0.016921595392075297, + 0, + 0.00011814215434268296, + 0, + 0.04766176716864365, + 0, + 0.01800986369003808, + 0.05938253468115832, + 3.974570207847624e-06, + 0.01772607856723898, + 0.016394318626145866, + 0.05664162989251643, + 0, + 0.0589212355164733, + 0.0351070807539649, + 0.09592325355621563, + 1.0005210221329346, + 1.7439549333783452e-08, + 0.05713416042494107, + 0, + 0, + 0, + 0.0011550996927919231, + 5.2847778278438194e-05, + 4.052297251523168e-05, + 0, + 0, + 0.018719101732744093, + 0, + 0.01647155111216021, + 0.01642082487427939, + 0.0017316501153644762, + 0.016600472783346306 + ] + } + ], + "domain": { + "x": [ + 0, + 1 + ], + "y": [ + 0, + 1 + ] + }, + "name": "", + "type": "parcoords" + } + ], + "layout": { + "legend": { + "tracegroupgap": 0 + }, + "margin": { + "t": 60 + }, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + } + } + }, + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "preferred_solution=objectives[25]\n", + "print(preferred_solution)\n", + "print(ranges)\n", + "\n", + "objectives = rnsga3(preferred_solution)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7633b90c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/notebooks/River_Pollution.ipynb b/docs/notebooks/River_Pollution.ipynb index 5319e9ba..8fc68d1b 100644 --- a/docs/notebooks/River_Pollution.ipynb +++ b/docs/notebooks/River_Pollution.ipynb @@ -106,100 +106,45 @@ "cell_type": "code", "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "eaError", + "evalue": "Interaction type not set. Use the set_interaction_type() method.", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31meaError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn [7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m pref, plot \u001b[38;5;241m=\u001b[39m \u001b[43mevolver\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstart\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/EMO2022/desdeo-emo/desdeo_emo/EAs/BaseEA.py:84\u001b[0m, in \u001b[0;36mBaseEA.start\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m eaError(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSelection operator not initialized.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 83\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minteract \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minteraction_type_set_bool:\n\u001b[0;32m---> 84\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m eaError(\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mInteraction type not set. Use the set_interaction_type() method.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 86\u001b[0m )\n\u001b[1;32m 87\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrequests()\n", + "\u001b[0;31meaError\u001b[0m: Interaction type not set. Use the set_interaction_type() method." + ] + } + ], "source": [ "pref, plot = evolver.start()" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " f1 f2 f3 f4 f5\n", - "minimize -1 -1 -1 -1 1\n", - "ideal 6.33643 3.40833 7.49815 -0.00301699 0.013136\n", - "nadir -inf -inf -inf -inf inf\n" - ] - } - ], + "outputs": [], "source": [ "print(plot.content[\"dimensions_data\"])" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Please provide preferences. There is four ways to do this. You can either:\n", - "\n", - "\t1: Select preferred solution(s)\n", - "\t2: Select non-preferred solution(s)\n", - "\t3: Specify a reference point worse than or equal to the ideal point\n", - "\t4: Specify desired ranges for objectives.\n", - "\n", - "In case you choose \n", - "\n", - "1, please specify index/indices of preferred solutions in a numpy array (indexing starts from 0).\n", - "For example: \n", - "\tnumpy.array([1]), for choosing the solutions with index 1.\n", - "\tnumpy.array([2, 4, 5, 16]), for choosing the solutions with indices 2, 4, 5, and 16.\n", - "\n", - "2, please specify index/indices of non-preferred solutions in a numpy array (indexing starts from 0).\n", - "For example: \n", - "\tnumpy.array([3]), for choosing the solutions with index 3.\n", - "\tnumpy.array([1, 2]), for choosing the solutions with indices 1 and 2.\n", - "\n", - "3, please provide a reference point worse than or equal to the ideal point:\n", - "\n", - "f1 6.33643\n", - "f2 3.40833\n", - "f3 7.49815\n", - "f4 -0.00301699\n", - "f5 0.013136\n", - "Name: ideal, dtype: object\n", - "The reference point will be used to focus the reference vectors towards the preferred region.\n", - "If a reference point is not provided, the previous state of the reference vectors is used.\n", - "If the reference point is the same as the ideal point, the reference vectors are spread uniformly in the objective space.\n", - "\n", - "4, please specify desired lower and upper bound for each objective, starting from \n", - "the first objective and ending with the last one. Please specify the bounds as a numpy array containing \n", - "lists, so that the first item of list is the lower bound and the second the upper bound, for each \n", - "objective. \n", - "\tFor example: numpy.array([[1, 2], [2, 5], [0, 3.5]]), for problem with three objectives.\n", - "Ideal vector: \n", - "f1 6.33643\n", - "f2 3.40833\n", - "f3 7.49815\n", - "f4 -0.00301699\n", - "f5 0.013136\n", - "Name: ideal, dtype: object\n", - "Nadir vector: \n", - "f1 -inf\n", - "f2 -inf\n", - "f3 -inf\n", - "f4 -inf\n", - "f5 inf\n", - "Name: nadir, dtype: object.\n" - ] - } - ], + "outputs": [], "source": [ "print(pref[0].content['message'])" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -209,17 +154,9 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Current generation number:400. Is looping back recommended: Yes\n" - ] - } - ], + "outputs": [], "source": [ "pref, plot = evolver.iterate(pref[2])\n", "figure = animate_next_(\n", @@ -244,9 +181,9 @@ ], "metadata": { "kernelspec": { - "display_name": "desdeo-emo", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "desdeo-emo" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -258,7 +195,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.8.14" } }, "nbformat": 4, diff --git a/docs/notebooks/baseADM.py b/docs/notebooks/baseADM.py new file mode 100644 index 00000000..96c9953a --- /dev/null +++ b/docs/notebooks/baseADM.py @@ -0,0 +1,127 @@ +import numpy as np + +# import plotly.graph_objects as go + +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors +from pygmo import fast_non_dominated_sorting as nds + + +def generate_composite_front(*fronts): + + _fronts = np.vstack(fronts) + + cf = _fronts[nds(_fronts)[0][0]] + + return cf + + +def generate_composite_front_with_identity(*fronts): + # This is currently working for two fronts + # First two fronts should be the individual fronts from each algorithm to be compared + # This function counts the number of solutions provided to the composite front by each algorithm to be compared. + + first_front = np.shape(fronts[0]) + second_front = np.shape(fronts[1]) + + _fronts = np.vstack(fronts) + # print(nds(_fronts)[0][0]) + + temp = nds(_fronts)[0][0] + first_nds = temp[temp < first_front[0] - 1] + second_nds = temp[temp > first_front[0] - 1] + + # Following lines are needed since composite front is keeping all the solutions from the very beginning. + # I am finding always the newly added nondominated solutions after each iteration by each algorithm + remaining_fronts = (first_front[0]) + (second_front[0]) + remaining_nds = temp[temp > remaining_fronts] + + # print(remaining_nds) + + first = first_nds.shape[0] + second = second_nds.shape[0] + second -= remaining_nds.shape[0] + + cf = _fronts[temp] + + return first, second, cf + +def generate_composite_front_with_identity_2(*fronts): + # This is currently working for two fronts + # First two fronts should be the individual fronts from each algorithm to be compared + # This function counts the number of solutions provided to the composite front by each algorithm to be compared. + + first_front = np.shape(fronts[0]) + second_front = np.shape(fronts[1]) + third_front = np.shape(fronts[2]) + + _fronts = np.vstack(fronts) + # print(nds(_fronts)[0][0]) + + temp = nds(_fronts)[0][0] + + # Following lines are needed since composite front is keeping all the solutions from the very beginning. + # I am finding always the newly added nondominated solutions after each iteration by each algorithm + remaining_fronts = (first_front[0]) + (second_front[0]) + (third_front[0]) + remaining_nds = temp[temp > remaining_fronts] + + + first_nds = temp[temp < first_front[0] - 1] + second_nds = temp[((temp > first_front[0] - 1) & (temp < (first_front[0] + second_front[0] - 1)))] + third_nds = temp[temp > (first_front[0] + second_front[0] - 1)] + + + + # print(remaining_nds) + + first = first_nds.shape[0] + second = second_nds.shape[0] + third = third_nds.shape[0] + third -= remaining_nds.shape[0] + + cf = _fronts[temp] + + return first, second, third, cf + + +def translate_front(front, ideal): + translated_front = np.subtract(front, ideal) + return translated_front + + +def normalize_front(front, translated_front): + translated_norm = np.linalg.norm(translated_front, axis=1) + translated_norm = np.repeat(translated_norm, len(translated_front[0, :])).reshape( + len(front), len(front[0, :]) + ) + + translated_norm[translated_norm == 0] = np.finfo(float).eps + normalized_front = np.divide(translated_front, translated_norm) + return normalized_front + + +def assign_vectors(front, vectors: ReferenceVectors): + cosine = np.dot(front, np.transpose(vectors.values)) + if cosine[np.where(cosine > 1)].size: + cosine[np.where(cosine > 1)] = 1 + if cosine[np.where(cosine < 0)].size: + cosine[np.where(cosine < 0)] = 0 + + theta = np.arccos(cosine) # check this theta later, if needed or not + assigned_vectors = np.argmax(cosine, axis=1) + + return assigned_vectors, theta + + +class baseADM: + def __init__(self, composite_front, vectors: ReferenceVectors): + + self.composite_front = composite_front + self.vectors = vectors + self.ideal_point = composite_front.min(axis=0) + self.translated_front = translate_front(self.composite_front, self.ideal_point) + self.normalized_front = normalize_front( + self.composite_front, self.translated_front + ) + self.assigned_vectors, self.theta = assign_vectors( + self.normalized_front, self.vectors + ) diff --git a/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS.py b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS.py new file mode 100644 index 00000000..196c6716 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS.py @@ -0,0 +1,417 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA as NSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS as NUMS +from desdeo_emo.EAs.RNSGAIII import RNSGAIII + + +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +#problem_names = ["DTLZ1", "DTLZ3"] +problem_names = ["DTLZ3"] +n_objs = np.asarray([9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iNSGAIII", "NUMS", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + median_ns_nums_learning = [] + median_rm_nums_learning = [] + median_ns_rnsga_learning = [] + median_rm_rnsga_learning = [] + + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + median_ns_nums_decision = [] + median_rm_nums_decision = [] + median_ns_rnsga_decision = [] + median_rm_rnsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_nums = NUMS(problem=problem, interact=True, n_gen_per_iter=gen) + int_rnsga = RNSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + int_nsga.set_interaction_type('Reference point') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + # build initial composite front + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, int_nums.population.objectives, int_rnsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + rm_nums_learning = np.array([]) + rm_nums_decision = np.array([]) + ns_nums_learning = np.array([]) + ns_nums_decision = np.array([]) + + rm_rnsga_learning = np.array([]) + rm_rnsga_decision = np.array([]) + ns_rnsga_learning = np.array([]) + ns_rnsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.generateRP4learning(base) + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.ideal_fitness[boundrnd] + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + + ) + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + + ) + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + ns_nums_learning = np.append(ns_nums_learning, nums_n_solutions) + ns_rnsga_learning = np.append(ns_rnsga_learning, rnsga_n_solutions) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + + nums_transformer = Normalizer().fit(int_nums.population.objectives) + norm_nums = nums_transformer.transform( + int_nums.population.objectives + ) + + rnsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_rnsga = rnsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + + rigd_nsga, rhv_nsga = rmetric.calc(norm_nsga, others=np.vstack((norm_nums,norm_rnsga))) + rigd_nums, rhv_nums = rmetric.calc(norm_nums, others=np.vstack((norm_nsga,norm_rnsga))) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=np.vstack((norm_nsga,norm_nums))) + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_nsga) + rm_nums_learning = np.append(rm_nums_learning, rigd_nums) + rm_rnsga_learning = np.append(rm_rnsga_learning, rigd_rnsga) + + # Compute cumulative sum of the learning phase + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + median_ns_nums_learning = np.append(median_ns_nums_learning, np.sum(ns_nums_learning)) + median_rm_nums_learning = np.append(median_rm_nums_learning, np.sum(rm_nums_learning)) + median_ns_rnsga_learning = np.append(median_ns_rnsga_learning, np.sum(ns_rnsga_learning)) + median_rm_rnsga_learning = np.append(median_rm_rnsga_learning, np.sum(rm_rnsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response = gp.generatePerturbatedRP4decision( + base, max_assigned_vector[0] + ) + data_row["reference_point"] = [response] + # run algorithms with the new reference point + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + + ) + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + + ) + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_decision= np.append(ns_nsga_decision, nsga_n_solutions) + ns_nums_decision= np.append(ns_nums_decision, nums_n_solutions) + ns_rnsga_decision= np.append(ns_rnsga_decision, rnsga_n_solutions) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + + nums_transformer = Normalizer().fit(int_nums.population.objectives) + norm_nums = nums_transformer.transform( + int_nums.population.objectives + ) + + rnsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_rnsga = rnsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + + rigd_nsga, rhv_nsga = rmetric.calc(norm_nsga, others=np.vstack((norm_nums,norm_rnsga))) + rigd_nums, rhv_nums = rmetric.calc(norm_nums, others=np.vstack((norm_nsga,norm_rnsga))) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=np.vstack((norm_nsga,norm_nums))) + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_nsga) + rm_nums_decision = np.append(rm_nums_decision, rigd_nums) + rm_rnsga_decision = np.append(rm_rnsga_decision, rigd_rnsga) + + #Compute median of the decision phase + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + median_ns_nums_decision = np.append(median_ns_nums_decision, np.sum(ns_nums_decision)) + median_rm_nums_decision = np.append(median_rm_nums_decision, np.sum(rm_nums_decision)) + median_ns_rnsga_decision = np.append(median_ns_rnsga_decision, np.sum(ns_rnsga_decision)) + median_rm_rnsga_decision = np.append(median_rm_rnsga_decision, np.sum(rm_rnsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f} & {np.mean(median_ns_nums_learning):.1f} & {np.std(median_ns_nums_learning):.4f} & {np.mean(median_ns_rnsga_learning):.1f} & {np.std(median_ns_rnsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f} & {np.mean(median_rm_nums_learning):.4f} & {np.std(median_rm_nums_learning):.4f} & {np.mean(median_rm_rnsga_learning):.4f} & {np.std(median_rm_rnsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f} & {np.mean(median_ns_nums_decision):.1f} & {np.std(median_ns_nums_decision):.4f} & {np.mean(median_ns_rnsga_decision):.1f} & {np.std(median_ns_rnsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f} & {np.mean(median_rm_nums_decision):.4f} & {np.std(median_rm_nums_decision):.4f} & {np.mean(median_rm_rnsga_decision):.4f} & {np.std(median_rm_rnsga_decision):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_decision)} & {np.std(median_ns_nsga_decision)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_decision)} & {np.std(median_rm_nsga_decision)}") + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS.py b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS.py new file mode 100644 index 00000000..0b10e121 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS.py @@ -0,0 +1,324 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA as NSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS as NUMS +from desdeo_emo.EAs.RNSGAIII import RNSGAIII + + +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +#problem_names = ["DTLZ1", "DTLZ3"] +problem_names = ["DTLZ1"] +n_objs = np.asarray([7]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iNSGAIII", "NUMS", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + median_ns_nums_learning = [] + median_rm_nums_learning = [] + median_ns_rnsga_learning = [] + median_rm_rnsga_learning = [] + + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + median_ns_nums_decision = [] + median_rm_nums_decision = [] + median_ns_rnsga_decision = [] + median_rm_rnsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_nums = NUMS(problem=problem, interact=True, n_gen_per_iter=gen) + int_rnsga = RNSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + int_nsga.set_interaction_type('Reference point') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + # build initial composite front + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, int_nums.population.objectives, int_rnsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + rm_nums_learning = np.array([]) + rm_nums_decision = np.array([]) + ns_nums_learning = np.array([]) + ns_nums_decision = np.array([]) + + rm_rnsga_learning = np.array([]) + rm_rnsga_decision = np.array([]) + ns_rnsga_learning = np.array([]) + ns_rnsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.preferredSolutions4learning(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response[0] + pref_int_nums.response = pd.DataFrame( + response, + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + response, + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + ns_nums_learning = np.append(ns_nums_learning, nums_n_solutions) + ns_rnsga_learning = np.append(ns_rnsga_learning, rnsga_n_solutions) + + + # Compute cumulative sum of the learning phase + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_ns_nums_learning = np.append(median_ns_nums_learning, np.sum(ns_nums_learning)) + median_ns_rnsga_learning = np.append(median_ns_rnsga_learning, np.sum(ns_rnsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + + for i in range(D): + #print("Decision phase") + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + response = gp.preferredSolutions4Decision2(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Preferred solutions') + int_rnsga.set_interaction_type('Preferred solutions') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response + pref_int_nums.response = response + + pref_int_rnsga.response = response + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + ns_nsga_decision= np.append(ns_nsga_decision, nsga_n_solutions) + ns_nums_decision= np.append(ns_nums_decision, nums_n_solutions) + ns_rnsga_decision= np.append(ns_rnsga_decision, rnsga_n_solutions) + #Compute median of the decision phase + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_ns_nums_decision = np.append(median_ns_nums_decision, np.sum(ns_nums_decision)) + median_ns_rnsga_decision = np.append(median_ns_rnsga_decision, np.sum(ns_rnsga_decision)) + + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f} & {np.mean(median_ns_nums_learning):.1f} & {np.std(median_ns_nums_learning):.4f} & {np.mean(median_ns_rnsga_learning):.1f} & {np.std(median_ns_rnsga_learning):.4f}") + + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f} & {np.mean(median_ns_nums_decision):.1f} & {np.std(median_ns_nums_decision):.4f} & {np.mean(median_ns_rnsga_decision):.1f} & {np.std(median_ns_rnsga_decision):.4f}") + + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_5.py b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_5.py new file mode 100644 index 00000000..3169b8c1 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_5.py @@ -0,0 +1,324 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA as NSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS as NUMS +from desdeo_emo.EAs.RNSGAIII import RNSGAIII + + +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +#problem_names = ["DTLZ1"] +n_objs = np.asarray([5]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iNSGAIII", "NUMS", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + median_ns_nums_learning = [] + median_rm_nums_learning = [] + median_ns_rnsga_learning = [] + median_rm_rnsga_learning = [] + + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + median_ns_nums_decision = [] + median_rm_nums_decision = [] + median_ns_rnsga_decision = [] + median_rm_rnsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_nums = NUMS(problem=problem, interact=True, n_gen_per_iter=gen) + int_rnsga = RNSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + int_nsga.set_interaction_type('Reference point') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + # build initial composite front + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, int_nums.population.objectives, int_rnsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + rm_nums_learning = np.array([]) + rm_nums_decision = np.array([]) + ns_nums_learning = np.array([]) + ns_nums_decision = np.array([]) + + rm_rnsga_learning = np.array([]) + rm_rnsga_decision = np.array([]) + ns_rnsga_learning = np.array([]) + ns_rnsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.preferredSolutions4learning(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response[0] + pref_int_nums.response = pd.DataFrame( + response, + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + response, + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + ns_nums_learning = np.append(ns_nums_learning, nums_n_solutions) + ns_rnsga_learning = np.append(ns_rnsga_learning, rnsga_n_solutions) + + + # Compute cumulative sum of the learning phase + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_ns_nums_learning = np.append(median_ns_nums_learning, np.sum(ns_nums_learning)) + median_ns_rnsga_learning = np.append(median_ns_rnsga_learning, np.sum(ns_rnsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + + for i in range(D): + #print("Decision phase") + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + response = gp.preferredSolutions4Decision2(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Preferred solutions') + int_rnsga.set_interaction_type('Preferred solutions') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response + pref_int_nums.response = response + + pref_int_rnsga.response = response + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + ns_nsga_decision= np.append(ns_nsga_decision, nsga_n_solutions) + ns_nums_decision= np.append(ns_nums_decision, nums_n_solutions) + ns_rnsga_decision= np.append(ns_rnsga_decision, rnsga_n_solutions) + #Compute median of the decision phase + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_ns_nums_decision = np.append(median_ns_nums_decision, np.sum(ns_nums_decision)) + median_ns_rnsga_decision = np.append(median_ns_rnsga_decision, np.sum(ns_rnsga_decision)) + + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f} & {np.mean(median_ns_nums_learning):.1f} & {np.std(median_ns_nums_learning):.4f} & {np.mean(median_ns_rnsga_learning):.1f} & {np.std(median_ns_rnsga_learning):.4f}") + + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f} & {np.mean(median_ns_nums_decision):.1f} & {np.std(median_ns_nums_decision):.4f} & {np.mean(median_ns_rnsga_decision):.1f} & {np.std(median_ns_rnsga_decision):.4f}") + + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_7.py b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_7.py new file mode 100644 index 00000000..918973f7 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_7.py @@ -0,0 +1,324 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA as NSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS as NUMS +from desdeo_emo.EAs.RNSGAIII import RNSGAIII + + +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +#problem_names = ["DTLZ1"] +n_objs = np.asarray([7]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iNSGAIII", "NUMS", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + median_ns_nums_learning = [] + median_rm_nums_learning = [] + median_ns_rnsga_learning = [] + median_rm_rnsga_learning = [] + + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + median_ns_nums_decision = [] + median_rm_nums_decision = [] + median_ns_rnsga_decision = [] + median_rm_rnsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_nums = NUMS(problem=problem, interact=True, n_gen_per_iter=gen) + int_rnsga = RNSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + int_nsga.set_interaction_type('Reference point') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + # build initial composite front + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, int_nums.population.objectives, int_rnsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + rm_nums_learning = np.array([]) + rm_nums_decision = np.array([]) + ns_nums_learning = np.array([]) + ns_nums_decision = np.array([]) + + rm_rnsga_learning = np.array([]) + rm_rnsga_decision = np.array([]) + ns_rnsga_learning = np.array([]) + ns_rnsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.preferredSolutions4learning(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response[0] + pref_int_nums.response = pd.DataFrame( + response, + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + response, + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + ns_nums_learning = np.append(ns_nums_learning, nums_n_solutions) + ns_rnsga_learning = np.append(ns_rnsga_learning, rnsga_n_solutions) + + + # Compute cumulative sum of the learning phase + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_ns_nums_learning = np.append(median_ns_nums_learning, np.sum(ns_nums_learning)) + median_ns_rnsga_learning = np.append(median_ns_rnsga_learning, np.sum(ns_rnsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + + for i in range(D): + #print("Decision phase") + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + response = gp.preferredSolutions4Decision2(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Preferred solutions') + int_rnsga.set_interaction_type('Preferred solutions') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response + pref_int_nums.response = response + + pref_int_rnsga.response = response + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + ns_nsga_decision= np.append(ns_nsga_decision, nsga_n_solutions) + ns_nums_decision= np.append(ns_nums_decision, nums_n_solutions) + ns_rnsga_decision= np.append(ns_rnsga_decision, rnsga_n_solutions) + #Compute median of the decision phase + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_ns_nums_decision = np.append(median_ns_nums_decision, np.sum(ns_nums_decision)) + median_ns_rnsga_decision = np.append(median_ns_rnsga_decision, np.sum(ns_rnsga_decision)) + + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f} & {np.mean(median_ns_nums_learning):.1f} & {np.std(median_ns_nums_learning):.4f} & {np.mean(median_ns_rnsga_learning):.1f} & {np.std(median_ns_rnsga_learning):.4f}") + + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f} & {np.mean(median_ns_nums_decision):.1f} & {np.std(median_ns_nums_decision):.4f} & {np.mean(median_ns_rnsga_decision):.1f} & {np.std(median_ns_rnsga_decision):.4f}") + + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_9.py b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_9.py new file mode 100644 index 00000000..7743c2f1 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_PS_9.py @@ -0,0 +1,324 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA as NSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS as NUMS +from desdeo_emo.EAs.RNSGAIII import RNSGAIII + + +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +#problem_names = ["DTLZ1"] +n_objs = np.asarray([9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iNSGAIII", "NUMS", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + median_ns_nums_learning = [] + median_rm_nums_learning = [] + median_ns_rnsga_learning = [] + median_rm_rnsga_learning = [] + + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + median_ns_nums_decision = [] + median_rm_nums_decision = [] + median_ns_rnsga_decision = [] + median_rm_rnsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_nums = NUMS(problem=problem, interact=True, n_gen_per_iter=gen) + int_rnsga = RNSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + int_nsga.set_interaction_type('Reference point') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + # build initial composite front + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, int_nums.population.objectives, int_rnsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + rm_nums_learning = np.array([]) + rm_nums_decision = np.array([]) + ns_nums_learning = np.array([]) + ns_nums_decision = np.array([]) + + rm_rnsga_learning = np.array([]) + rm_rnsga_decision = np.array([]) + ns_rnsga_learning = np.array([]) + ns_rnsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.preferredSolutions4learning(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response[0] + pref_int_nums.response = pd.DataFrame( + response, + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + response, + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + ns_nums_learning = np.append(ns_nums_learning, nums_n_solutions) + ns_rnsga_learning = np.append(ns_rnsga_learning, rnsga_n_solutions) + + + # Compute cumulative sum of the learning phase + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_ns_nums_learning = np.append(median_ns_nums_learning, np.sum(ns_nums_learning)) + median_ns_rnsga_learning = np.append(median_ns_rnsga_learning, np.sum(ns_rnsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + + for i in range(D): + #print("Decision phase") + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + response = gp.preferredSolutions4Decision2(base) + #print(response) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #int_nsga.set_interaction_type('Preferred solutions') + int_nums.set_interaction_type('Preferred solutions') + int_rnsga.set_interaction_type('Preferred solutions') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response + pref_int_nums.response = response + + pref_int_rnsga.response = response + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + ns_nsga_decision= np.append(ns_nsga_decision, nsga_n_solutions) + ns_nums_decision= np.append(ns_nums_decision, nums_n_solutions) + ns_rnsga_decision= np.append(ns_rnsga_decision, rnsga_n_solutions) + #Compute median of the decision phase + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_ns_nums_decision = np.append(median_ns_nums_decision, np.sum(ns_nums_decision)) + median_ns_rnsga_decision = np.append(median_ns_rnsga_decision, np.sum(ns_rnsga_decision)) + + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f} & {np.mean(median_ns_nums_learning):.1f} & {np.std(median_ns_nums_learning):.4f} & {np.mean(median_ns_rnsga_learning):.1f} & {np.std(median_ns_rnsga_learning):.4f}") + + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f} & {np.mean(median_ns_nums_decision):.1f} & {np.std(median_ns_nums_decision):.4f} & {np.mean(median_ns_rnsga_decision):.1f} & {np.std(median_ns_rnsga_decision):.4f}") + + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_Ranges.py b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_Ranges.py new file mode 100644 index 00000000..e6c43a4d --- /dev/null +++ b/docs/notebooks/extendedADM_NSGA_RNSGA_NUMS_Ranges.py @@ -0,0 +1,398 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA as NSGAIII +from desdeo_emo.EAs.NSGAIIINUMS import NSGAIIINUMS as NUMS +from desdeo_emo.EAs.RNSGAIII import RNSGAIII + + +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +#problem_names = ["DTLZ1", "DTLZ3"] +problem_names = ["DTLZ3"] +n_objs = np.asarray([9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iNSGAIII", "NUMS", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + median_ns_nums_learning = [] + median_rm_nums_learning = [] + median_ns_rnsga_learning = [] + median_rm_rnsga_learning = [] + + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + median_ns_nums_decision = [] + median_rm_nums_decision = [] + median_ns_rnsga_decision = [] + median_rm_rnsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_nums = NUMS(problem=problem, interact=True, n_gen_per_iter=gen) + int_rnsga = RNSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + int_nsga.set_interaction_type('Reference point') + int_nums.set_interaction_type('Reference point') + int_rnsga.set_interaction_type('Reference point') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_nsga.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_nsga.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + pref_int_nums.response = pd.DataFrame( + [response], + columns=pref_int_nums.content["dimensions_data"].columns, + ) + + pref_int_rnsga.response = pd.DataFrame( + [response], + columns=pref_int_rnsga.content["dimensions_data"].columns, + ) + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + # build initial composite front + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, int_nums.population.objectives, int_rnsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + rm_nums_learning = np.array([]) + rm_nums_decision = np.array([]) + ns_nums_learning = np.array([]) + ns_nums_decision = np.array([]) + + rm_rnsga_learning = np.array([]) + rm_rnsga_decision = np.array([]) + ns_rnsga_learning = np.array([]) + ns_rnsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + int_nsga.set_interaction_type('Preferred ranges') + int_nums.set_interaction_type('Preferred ranges') + int_rnsga.set_interaction_type('Preferred ranges') + + pref_int_nsga, _ = int_nsga.start() + pref_int_nums, _ = int_nums.start() + pref_int_rnsga, _ = int_rnsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_nsga.response = response + + pref_int_nums.response = response + pref_int_rnsga.response = response + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + ns_nums_learning = np.append(ns_nums_learning, nums_n_solutions) + ns_rnsga_learning = np.append(ns_rnsga_learning, rnsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + + nums_transformer = Normalizer().fit(int_nums.population.objectives) + norm_nums = nums_transformer.transform( + int_nums.population.objectives + ) + + rnsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_rnsga = rnsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + + rigd_nsga, rhv_nsga = rmetric.calc(norm_nsga, others=np.vstack((norm_nums,norm_rnsga))) + rigd_nums, rhv_nums = rmetric.calc(norm_nums, others=np.vstack((norm_nsga,norm_rnsga))) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=np.vstack((norm_nsga,norm_nums))) + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_nsga) + rm_nums_learning = np.append(rm_nums_learning, rigd_nums) + rm_rnsga_learning = np.append(rm_rnsga_learning, rigd_rnsga) + + # Compute cumulative sum of the learning phase + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + median_ns_nums_learning = np.append(median_ns_nums_learning, np.sum(ns_nums_learning)) + median_rm_nums_learning = np.append(median_rm_nums_learning, np.sum(rm_nums_learning)) + median_ns_rnsga_learning = np.append(median_ns_rnsga_learning, np.sum(ns_rnsga_learning)) + median_rm_rnsga_learning = np.append(median_rm_rnsga_learning, np.sum(rm_rnsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response, reference_point = gp.generateRanges4decision( + base, max_assigned_vector[0], problem.ideal, true_nadir + ) + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + pref_int_nsga.response = response + pref_int_nums.response = response + pref_int_rnsga.response = response + + previous_NSGA_FEs = int_nsga._function_evaluation_count + previous_NUMS_FEs = int_nums._function_evaluation_count + previous_RNSGA_FEs = int_rnsga._function_evaluation_count + + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + pref_int_nums,_ = int_nums.iterate(pref_int_nums) + pref_int_rnsga,_ = int_rnsga.iterate(pref_int_rnsga) + + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + peritr_NUMS_FEs = ( + int_nums._function_evaluation_count - previous_NUMS_FEs + ) + peritr_RNSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_RNSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + nsga_n_solutions, + nums_n_solutions, + rnsga_n_solutions, + cf, + ) = generate_composite_front_with_identity_2( + int_nsga.population.objectives, + int_nums.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + + ns_nsga_decision= np.append(ns_nsga_decision, nsga_n_solutions) + ns_nums_decision= np.append(ns_nums_decision, nums_n_solutions) + ns_rnsga_decision= np.append(ns_rnsga_decision, rnsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + + nums_transformer = Normalizer().fit(int_nums.population.objectives) + norm_nums = nums_transformer.transform( + int_nums.population.objectives + ) + + rnsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_rnsga = rnsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + + rigd_nsga, rhv_nsga = rmetric.calc(norm_nsga, others=np.vstack((norm_nums,norm_rnsga))) + rigd_nums, rhv_nums = rmetric.calc(norm_nums, others=np.vstack((norm_nsga,norm_rnsga))) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=np.vstack((norm_nsga,norm_nums))) + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_nsga) + rm_nums_decision = np.append(rm_nums_decision, rigd_nums) + rm_rnsga_decision = np.append(rm_rnsga_decision, rigd_rnsga) + + #Compute median of the decision phase + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + median_ns_nums_decision = np.append(median_ns_nums_decision, np.sum(ns_nums_decision)) + median_rm_nums_decision = np.append(median_rm_nums_decision, np.sum(rm_nums_decision)) + median_ns_rnsga_decision = np.append(median_ns_rnsga_decision, np.sum(ns_rnsga_decision)) + median_rm_rnsga_decision = np.append(median_rm_rnsga_decision, np.sum(rm_rnsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f} & {np.mean(median_ns_nums_learning):.1f} & {np.std(median_ns_nums_learning):.4f} & {np.mean(median_ns_rnsga_learning):.1f} & {np.std(median_ns_rnsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f} & {np.mean(median_rm_nums_learning):.4f} & {np.std(median_rm_nums_learning):.4f} & {np.mean(median_rm_rnsga_learning):.4f} & {np.std(median_rm_rnsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f} & {np.mean(median_ns_nums_decision):.1f} & {np.std(median_ns_nums_decision):.4f} & {np.mean(median_ns_rnsga_decision):.1f} & {np.std(median_ns_rnsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f} & {np.mean(median_rm_nums_decision):.4f} & {np.std(median_rm_nums_decision):.4f} & {np.mean(median_rm_rnsga_decision):.4f} & {np.std(median_rm_rnsga_decision):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_decision)} & {np.std(median_ns_nsga_decision)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_decision)} & {np.std(median_rm_nsga_decision)}") + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEA (copy).py b/docs/notebooks/extendedADM_NSGAvsRVEA (copy).py new file mode 100644 index 00000000..5e8e73e8 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEA (copy).py @@ -0,0 +1,320 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.NSGAIII import NSGAIII as RVEA +from desdeo_emo.EAs.RNSGAIII import RNSGAIII +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +n_objs = np.asarray([5, 7, 9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 5 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + median_ns_rvea_learning = [] + median_rm_rvea_learning = [] + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + + median_ns_rvea_decision = [] + median_rm_rvea_decision = [] + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_rvea.set_interaction_type('Reference point') + pref_int_rvea, _ = int_rvea.start() + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + #RNSGAIII + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + pref_int_nsga,_ = int_rnsga.iterate() + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_rnsga.population.objectives + ) + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_rvea_learning = np.array([]) + rm_rvea_decision = np.array([]) + ns_rvea_learning = np.array([]) + ns_rvea_decision = np.array([]) + + + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.generateRP4learning(base) + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.ideal_fitness[boundrnd] + + data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + #pref_int_nsga[2].response = pd.DataFrame( + # [response], + # columns=pref_int_nsga[2].content["dimensions_data"].columns, + # + # ) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_rnsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + pref_int_rnsga,_ = int_rnsga.iterate() + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_learning = np.append(ns_rvea_learning, rvea_n_solutions) + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_rnsga = nsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_rnsga) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=norm_rvea) + + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_rvea_learning = np.append(rm_rvea_learning, rigd_irvea) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_rnsga) + + # Compute cumulative sum of the learning phase + median_ns_rvea_learning = np.append(median_ns_rvea_learning, np.sum(ns_rvea_learning)) + median_rm_rvea_learning = np.append(median_rm_rvea_learning, np.sum(rm_rvea_learning)) + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response = gp.generatePerturbatedRP4decision( + base, max_assigned_vector[0] + ) + data_row["reference_point"] = [response] + # run algorithms with the new reference point + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + #pref_int_nsga[2].response = pd.DataFrame( + # [response], + # columns=pref_int_nsga[2].content["dimensions_data"].columns, + #) + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_rnsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_rnsga.iterate() + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_decision = np.append(ns_rvea_decision, rvea_n_solutions) + ns_nsga_decision = np.append(ns_nsga_decision, nsga_n_solutions) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_rnsga.population.objectives + ) + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + #data = data.append(data_row, ignore_index=1) + + rm_rvea_decision = np.append(rm_rvea_decision, rigd_irvea) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_rnsga) + #Compute median of the decision phase + median_ns_rvea_decision = np.append(median_ns_rvea_decision, np.sum(ns_rvea_decision)) + median_rm_rvea_decision = np.append(median_rm_rvea_decision, np.sum(rm_rvea_decision)) + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_learning):.1f} & {np.std(median_ns_rvea_learning):.4f} & {np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_learning):.4f} & {np.std(median_rm_rvea_learning):.4f} & {np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_decision):.1f} & {np.std(median_ns_rvea_decision):.4f} & {np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_decision):.4f} & {np.std(median_rm_rvea_decision):.4f} & {np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_decision)} & {np.std(median_ns_nsga_decision)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_decision)} & {np.std(median_rm_nsga_decision)}") + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEA.py b/docs/notebooks/extendedADM_NSGAvsRVEA.py new file mode 100644 index 00000000..2258ef31 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEA.py @@ -0,0 +1,329 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA +from desdeo_emo.EAs.NSGAIII import NSGAIII +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +n_objs = np.asarray([9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "iNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + median_ns_rvea_learning = [] + median_rm_rvea_learning = [] + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + + median_ns_rvea_decision = [] + median_rm_rvea_decision = [] + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_rvea.set_interaction_type('Reference point') + int_nsga.set_interaction_type('Reference point') + pref_int_rvea, _ = int_rvea.start() + pref_int_nsga, _ = int_nsga.start() + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_nsga.population.objectives + ) + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_rvea_learning = np.array([]) + rm_rvea_decision = np.array([]) + ns_rvea_learning = np.array([]) + ns_rvea_decision = np.array([]) + + + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.generateRP4learning(base) + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.ideal_fitness[boundrnd] + + data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + + ) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_nsga._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_nsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_learning = np.append(ns_rvea_learning, rvea_n_solutions) + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_rnsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_rnsga) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=norm_rvea) + + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_rvea_learning = np.append(rm_rvea_learning, rigd_irvea) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_rnsga) + + # Compute cumulative sum of the learning phase + median_ns_rvea_learning = np.append(median_ns_rvea_learning, np.sum(ns_rvea_learning)) + median_rm_rvea_learning = np.append(median_rm_rvea_learning, np.sum(rm_rvea_learning)) + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response = gp.generatePerturbatedRP4decision( + base, max_assigned_vector[0] + ) + data_row["reference_point"] = [response] + # run algorithms with the new reference point + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_nsga._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_nsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_decision = np.append(ns_rvea_decision, rvea_n_solutions) + ns_nsga_decision = np.append(ns_nsga_decision, nsga_n_solutions) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + #data = data.append(data_row, ignore_index=1) + + rm_rvea_decision = np.append(rm_rvea_decision, rigd_irvea) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_rnsga) + #Compute median of the decision phase + median_ns_rvea_decision = np.append(median_ns_rvea_decision, np.sum(ns_rvea_decision)) + median_rm_rvea_decision = np.append(median_rm_rvea_decision, np.sum(rm_rvea_decision)) + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_learning):.1f} & {np.std(median_ns_rvea_learning):.4f} & {np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_learning):.4f} & {np.std(median_rm_rvea_learning):.4f} & {np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_decision):.1f} & {np.std(median_ns_rvea_decision):.4f} & {np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_decision):.4f} & {np.std(median_rm_rvea_decision):.4f} & {np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_decision)} & {np.std(median_ns_nsga_decision)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_decision)} & {np.std(median_rm_nsga_decision)}") + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEARanges (another copy).py b/docs/notebooks/extendedADM_NSGAvsRVEARanges (another copy).py new file mode 100644 index 00000000..9613e5bc --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEARanges (another copy).py @@ -0,0 +1,310 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.NSGAIII import NSGAIII as RVEA +from desdeo_emo.EAs.RNSGAIII import RNSGAIII +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +n_objs = np.asarray([7,9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "preferred_ranges"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 5 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + median_ns_rvea_learning = [] + median_rm_rvea_learning = [] + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + + median_ns_rvea_decision = [] + median_rm_rvea_decision = [] + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + for run in range(total_run): + print(f"Run {run+1} of {total_run}") + counter = 1 + total_count = len(num_gen_per_iter) * len(n_objs) * len(problem_names) + print(f"Loop {counter} of {total_count}") + counter += 1 + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_ideal = np.asarray([0] * n_obj) + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_rvea.set_interaction_type('Reference point') + pref_int_rvea, _ = int_rvea.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.nadir_fitness[boundrnd] + # run algorithms once with the randomly generated reference point + #pref_int_rvea,_ = int_rvea.requests() + #pref_int_nsga,_ = int_nsga.requests() + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + #RNSGAIII + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + pref_int_nsga,_ = int_rnsga.iterate() + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_rnsga.population.objectives + ) + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_rvea_learning = np.array([]) + rm_rvea_decision = np.array([]) + ns_rvea_learning = np.array([]) + ns_rvea_decision = np.array([]) + + + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + #problem.ideal = np.asarray([0.1e-500] * n_obj) + #problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #response = response.T + int_rvea.set_interaction_type('Preferred ranges') + pref_int_rvea, _ = int_rvea.start() + data_row["preferred_ranges"] = [response] + # run algorithms with the new reference point + + #Interactive RVEA + pref_int_rvea.response = response + previous_RVEA_FEs = int_rvea._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response.T)), n_iterations=10, n_gen_per_iter=gen) + pref_int_rnsga,_ = int_rnsga.iterate() + previous_NSGA_FEs = int_rnsga._function_evaluation_count + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_learning = np.append(ns_rvea_learning, rvea_n_solutions) + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + #data = data.append(data_row, ignore_index=1) + rm_rvea_learning = np.append(rm_rvea_learning, rigd_irvea) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_insga) + + # Compute cumulative sum of the learning phase + median_ns_rvea_learning = np.append(median_ns_rvea_learning, np.sum(ns_rvea_learning)) + median_rm_rvea_learning = np.append(median_rm_rvea_learning, np.sum(rm_rvea_learning)) + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + #problem.ideal = np.asarray([0.1e-500] * n_obj) + #problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response, reference_point = gp.generateRanges4decision( + base, max_assigned_vector[0], problem.ideal, true_nadir + ) + #response = response.T + data_row["preferred_ranges"] = [response] + # run algorithms with the new reference point + pref_int_rvea.response = response + + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response.T)), n_iterations=10, n_gen_per_iter=gen) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_rnsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_rnsga.iterate() + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_decision = np.append(ns_rvea_decision, rvea_n_solutions) + ns_nsga_decision = np.append(ns_nsga_decision, nsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_rnsga.population.objectives + ) + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + data = data.append(data_row, ignore_index=1) + rm_rvea_decision = np.append(rm_rvea_decision, rigd_irvea) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_insga) + #Compute median of the decision phase + median_ns_rvea_decision = np.append(median_ns_rvea_decision, np.sum(ns_rvea_decision)) + median_rm_rvea_decision = np.append(median_rm_rvea_decision, np.sum(rm_rvea_decision)) + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_learning):.1f} & {np.std(median_ns_rvea_learning):.4f} & {np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_learning):.4f} & {np.std(median_rm_rvea_learning):.4f} & {np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_decision):.1f} & {np.std(median_ns_rvea_decision):.4f} & {np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_decision):.4f} & {np.std(median_rm_rvea_decision):.4f} & {np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f}") + #data.to_csv(f"ranges{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEARanges (copy).py b/docs/notebooks/extendedADM_NSGAvsRVEARanges (copy).py new file mode 100644 index 00000000..311aec8b --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEARanges (copy).py @@ -0,0 +1,247 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.NSGAIII import NSGAIII as RVEA +from desdeo_emo.EAs.RNSGAIII import RNSGAIII +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +n_objs = np.asarray([5,7,9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "preferred_ranges"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 5 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + median_ns_rvea_learning = [] + median_rm_rvea_learning = [] + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + + median_ns_rvea_decision = [] + median_rm_rvea_decision = [] + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + for run in range(total_run): + print(f"Run {run+1} of {total_run}") + counter = 1 + total_count = len(num_gen_per_iter) * len(n_objs) * len(problem_names) + print(f"Loop {counter} of {total_count}") + counter += 1 + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_ideal = np.asarray([0] * n_obj) + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_rvea.set_interaction_type('Reference point') + pref_int_rvea, _ = int_rvea.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.nadir_fitness[boundrnd] + # run algorithms once with the randomly generated reference point + #pref_int_rvea,_ = int_rvea.requests() + #pref_int_nsga,_ = int_nsga.requests() + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + #RNSGAIII + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + pref_int_nsga,_ = int_rnsga.iterate() + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_rnsga.population.objectives + ) + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_rvea_learning = np.array([]) + rm_rvea_decision = np.array([]) + ns_rvea_learning = np.array([]) + ns_rvea_decision = np.array([]) + + + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + #problem.ideal = np.asarray([0.1e-500] * n_obj) + #problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.preferredSolutions4learning(base) + #response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #response = response.T + int_rvea.set_interaction_type('Preferred solutions') + pref_int_rvea, _ = int_rvea.start() + # run algorithms with the new reference point + print(response) + #Interactive RVEA + pref_int_rvea.response = response + previous_RVEA_FEs = int_rvea._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + pref_int_rnsga,_ = int_rnsga.iterate() + previous_NSGA_FEs = int_rnsga._function_evaluation_count + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_learning = np.append(ns_rvea_learning, rvea_n_solutions) + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + + + # Compute cumulative sum of the learning phase + median_ns_rvea_learning = np.append(median_ns_rvea_learning, np.sum(ns_rvea_learning)) + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + #problem.ideal = np.asarray([0.1e-500] * n_obj) + #problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response = gp.preferredSolutions4decision(base) + #response = response.T + data_row["preferred_solutions"] = [response] + # run algorithms with the new reference point + pref_int_rvea.response = response + + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_rnsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_rnsga.iterate() + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_decision = np.append(ns_rvea_decision, rvea_n_solutions) + ns_nsga_decision = np.append(ns_nsga_decision, nsga_n_solutions) + + #Compute median of the decision phase + median_ns_rvea_decision = np.append(median_ns_rvea_decision, np.sum(ns_rvea_decision)) + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_learning):.1f} & {np.std(median_ns_rvea_learning):.4f} & {np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_decision):.1f} & {np.std(median_ns_rvea_decision):.4f} & {np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f}") + #data.to_csv(f"ranges{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEARanges.py b/docs/notebooks/extendedADM_NSGAvsRVEARanges.py new file mode 100644 index 00000000..9613e5bc --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEARanges.py @@ -0,0 +1,310 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.NSGAIII import NSGAIII as RVEA +from desdeo_emo.EAs.RNSGAIII import RNSGAIII +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +n_objs = np.asarray([7,9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "RNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "preferred_ranges"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 5 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + median_ns_rvea_learning = [] + median_rm_rvea_learning = [] + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + + median_ns_rvea_decision = [] + median_rm_rvea_decision = [] + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + for run in range(total_run): + print(f"Run {run+1} of {total_run}") + counter = 1 + total_count = len(num_gen_per_iter) * len(n_objs) * len(problem_names) + print(f"Loop {counter} of {total_count}") + counter += 1 + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_ideal = np.asarray([0] * n_obj) + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_rvea.set_interaction_type('Reference point') + pref_int_rvea, _ = int_rvea.start() + + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.nadir_fitness[boundrnd] + # run algorithms once with the randomly generated reference point + #pref_int_rvea,_ = int_rvea.requests() + #pref_int_nsga,_ = int_nsga.requests() + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + #RNSGAIII + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response)), n_iterations=10, n_gen_per_iter=gen) + pref_int_nsga,_ = int_rnsga.iterate() + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_rnsga.population.objectives + ) + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_rvea_learning = np.array([]) + rm_rvea_decision = np.array([]) + ns_rvea_learning = np.array([]) + ns_rvea_decision = np.array([]) + + + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + #problem.ideal = np.asarray([0.1e-500] * n_obj) + #problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + #response = response.T + int_rvea.set_interaction_type('Preferred ranges') + pref_int_rvea, _ = int_rvea.start() + data_row["preferred_ranges"] = [response] + # run algorithms with the new reference point + + #Interactive RVEA + pref_int_rvea.response = response + previous_RVEA_FEs = int_rvea._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response.T)), n_iterations=10, n_gen_per_iter=gen) + pref_int_rnsga,_ = int_rnsga.iterate() + previous_NSGA_FEs = int_rnsga._function_evaluation_count + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_learning = np.append(ns_rvea_learning, rvea_n_solutions) + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_rnsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + #data = data.append(data_row, ignore_index=1) + rm_rvea_learning = np.append(rm_rvea_learning, rigd_irvea) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_insga) + + # Compute cumulative sum of the learning phase + median_ns_rvea_learning = np.append(median_ns_rvea_learning, np.sum(ns_rvea_learning)) + median_rm_rvea_learning = np.append(median_rm_rvea_learning, np.sum(rm_rvea_learning)) + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + #problem.ideal = np.asarray([0.1e-500] * n_obj) + #problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response, reference_point = gp.generateRanges4decision( + base, max_assigned_vector[0], problem.ideal, true_nadir + ) + #response = response.T + data_row["preferred_ranges"] = [response] + # run algorithms with the new reference point + pref_int_rvea.response = response + + int_rnsga = RNSGAIII(problem, 50, np.atleast_2d(UPEMO(response.T)), n_iterations=10, n_gen_per_iter=gen) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_rnsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_rnsga.iterate() + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_rnsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rnsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_decision = np.append(ns_rvea_decision, rvea_n_solutions) + ns_nsga_decision = np.append(ns_nsga_decision, nsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_rnsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_rnsga.population.objectives + ) + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + data = data.append(data_row, ignore_index=1) + rm_rvea_decision = np.append(rm_rvea_decision, rigd_irvea) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_insga) + #Compute median of the decision phase + median_ns_rvea_decision = np.append(median_ns_rvea_decision, np.sum(ns_rvea_decision)) + median_rm_rvea_decision = np.append(median_rm_rvea_decision, np.sum(rm_rvea_decision)) + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_learning):.1f} & {np.std(median_ns_rvea_learning):.4f} & {np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_learning):.4f} & {np.std(median_rm_rvea_learning):.4f} & {np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_decision):.1f} & {np.std(median_ns_rvea_decision):.4f} & {np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_decision):.4f} & {np.std(median_rm_rvea_decision):.4f} & {np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f}") + #data.to_csv(f"ranges{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEA_PS.py b/docs/notebooks/extendedADM_NSGAvsRVEA_PS.py new file mode 100644 index 00000000..ae42df51 --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEA_PS.py @@ -0,0 +1,283 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA +from desdeo_emo.EAs.IMOEAD import IMOEA_D as MOEA_D + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ2", "DTLZ4"] +n_objs = np.asarray([4, 7, 9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "iMOEA_D"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 1 +for run in range(total_run): + print(f"Run {run+1} of {total_run}") + counter = 1 + total_count = len(num_gen_per_iter) * len(n_objs) * len(problem_names) + for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + print(f"Loop {counter} of {total_count}") + counter += 1 + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + + problem.ideal = np.asarray([0] * n_obj) + problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + + true_nadir = np.asarray([1] * n_obj) + + # interactive + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_nsga = MOEA_D(problem=problem, interact=True, n_gen_per_iter=gen) + + # initial reference point is specified randomly + response = np.random.rand(n_obj) + + # run algorithms once with the randomly generated reference point + pref_int_rvea,_ = int_rvea.requests() + pref_int_nsga,_ = int_nsga.requests() + pref_int_rvea[2].response = pd.DataFrame( + [response], + columns=pref_int_rvea[2].content["dimensions_data"].columns, + ) + pref_int_nsga[2].response = pd.DataFrame( + [response], + columns=pref_int_nsga[2].content["dimensions_data"].columns, + ) + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea[2]) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga[2]) + + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_nsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution, n_obj) + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response = gp.generateRP4learning(base) + + data_row["reference_point"] = [response] + + # run algorithms with the new reference point + pref_int_rvea[2].response = pd.DataFrame( + [response], + columns=pref_int_rvea[2].content["dimensions_data"].columns, + ) + pref_int_nsga[2].response = pd.DataFrame( + [response], + columns=pref_int_nsga[2].content["dimensions_data"].columns, + ) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_nsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea[2]) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga[2]) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_nsga.population.objectives, + cf, + ) + + data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + data_row["iMOEA_D_N_Ss"] = [nsga_n_solutions] + data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + data_row["iMOEA_D_FEs"] = [peritr_NSGA_FEs * n_obj] + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + + data_row[ + ["iRVEA" + excess_col for excess_col in excess_columns] + ] = [rigd_irvea, rhv_irvea] + data_row[ + ["iMOEA_D" + excess_col for excess_col in excess_columns] + ] = [rigd_insga, rhv_insga] + + data = data.append(data_row, ignore_index=1) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + + # generates the next reference point for the decision phase + response = gp.generatePerturbatedRP4decision( + base, max_assigned_vector[0] + ) + + data_row["reference_point"] = [response] + + # run algorithms with the new reference point + pref_int_rvea[2].response = pd.DataFrame( + [response], + columns=pref_int_rvea[2].content["dimensions_data"].columns, + ) + pref_int_nsga[2].response = pd.DataFrame( + [response], + columns=pref_int_nsga[2].content["dimensions_data"].columns, + ) + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_nsga._function_evaluation_count + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea[2]) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga[2]) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_nsga.population.objectives, + cf, + ) + + data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + data_row["iMOEA_D_N_Ss"] = [nsga_n_solutions] + data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + data_row["iMOEA_D_FEs"] = [peritr_NSGA_FEs * n_obj] + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + + data_row[ + ["iRVEA" + excess_col for excess_col in excess_columns] + ] = [rigd_irvea, rhv_irvea] + data_row[ + ["iMOEA_D" + excess_col for excess_col in excess_columns] + ] = [rigd_insga, rhv_insga] + + data = data.append(data_row, ignore_index=1) + + data.to_csv(f"./results/output{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_NSGAvsRVEA_Ranges.py b/docs/notebooks/extendedADM_NSGAvsRVEA_Ranges.py new file mode 100644 index 00000000..6d4f65fb --- /dev/null +++ b/docs/notebooks/extendedADM_NSGAvsRVEA_Ranges.py @@ -0,0 +1,320 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.utilities.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA +from desdeo_emo.EAs.NSGAIII import NSGAIII +from desdeo_emo.utilities.preference_converters import UPEMO + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +#from pymoo.config import Configuration + +#Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ3"] +n_objs = np.asarray([9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [200] # number of generations per iteration + +algorithms = ["iRVEA", "iNSGAIII"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 10 + +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + #Global arrays for median and stddev + median_ns_rvea_learning = [] + median_rm_rvea_learning = [] + median_ns_nsga_learning = [] + median_rm_nsga_learning = [] + + median_ns_rvea_decision = [] + median_rm_rvea_decision = [] + median_ns_nsga_decision = [] + median_rm_nsga_decision = [] + + for run in range(total_run): + print(f"Problem {problem_name} Objectives {n_obj} Run {run+1} of {total_run}") + + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal_fitness = np.asarray([0] * n_obj) + problem.nadir_fitness = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + true_nadir = np.asarray([1] * n_obj) + # initial reference point is specified randomly + response = np.random.rand(n_obj) + # run algorithms once with the randomly generated reference point + # interactive RVEA + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + int_rvea.set_interaction_type('Reference point') + int_nsga.set_interaction_type('Reference point') + pref_int_rvea, _ = int_rvea.start() + pref_int_nsga, _ = int_nsga.start() + for boundrnd in range(0, n_obj): + if response[boundrnd] < int_rvea.population.problem.ideal_fitness[boundrnd]: + response[boundrnd] = int_rvea.population.problem.nadir_fitness[boundrnd] + #print(pref_int_rvea.content["dimensions_data"].columns["ideal"]) + pref_int_rvea.response = pd.DataFrame( + [response], + columns=pref_int_rvea.content["dimensions_data"].columns, + ) + pref_int_nsga.response = pd.DataFrame( + [response], + columns=pref_int_nsga.content["dimensions_data"].columns, + ) + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + + # build initial composite front + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, int_nsga.population.objectives + ) + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution=lattice_resolution, number_of_objectives=n_obj) + + # Arrays to store values for the current run + rm_rvea_learning = np.array([]) + rm_rvea_decision = np.array([]) + ns_rvea_learning = np.array([]) + ns_rvea_decision = np.array([]) + + + rm_nsga_learning = np.array([]) + rm_nsga_decision = np.array([]) + ns_nsga_learning = np.array([]) + ns_nsga_decision = np.array([]) + + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + response, reference_point = gp.generateRanges4learning(base, problem.ideal, true_nadir) + int_rvea.set_interaction_type('Preferred ranges') + int_nsga.set_interaction_type('Preferred ranges') + + pref_int_rvea, _ = int_rvea.start() + pref_int_nsga, _ = int_nsga.start() + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + # Interactive RVEA + pref_int_rvea.response = response + pref_int_nsga.response = response + + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_nsga._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_nsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_learning = np.append(ns_rvea_learning, rvea_n_solutions) + ns_nsga_learning = np.append(ns_nsga_learning, nsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_rnsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_rnsga) + rigd_rnsga, rhv_rnsga = rmetric.calc(norm_rnsga, others=norm_rvea) + + + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_rnsga, rhv_rnsga] + #data = data.append(data_row, ignore_index=1) + rm_rvea_learning = np.append(rm_rvea_learning, rigd_irvea) + rm_nsga_learning = np.append(rm_nsga_learning, rigd_rnsga) + + # Compute cumulative sum of the learning phase + median_ns_rvea_learning = np.append(median_ns_rvea_learning, np.sum(ns_rvea_learning)) + median_rm_rvea_learning = np.append(median_rm_rvea_learning, np.sum(rm_rvea_learning)) + median_ns_nsga_learning = np.append(median_ns_nsga_learning, np.sum(ns_nsga_learning)) + median_rm_nsga_learning = np.append(median_rm_nsga_learning, np.sum(rm_nsga_learning)) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + # generates the next reference point for the decision phase + response, reference_point = gp.generateRanges4decision( + base, max_assigned_vector[0], problem.ideal, true_nadir + ) + + #data_row["reference_point"] = [response] + # run algorithms with the new reference point + pref_int_rvea.response = response + pref_int_nsga.response = response + + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_NSGA_FEs = int_nsga._function_evaluation_count + + pref_int_rvea,_ = int_rvea.iterate(pref_int_rvea) + pref_int_nsga,_ = int_nsga.iterate(pref_int_nsga) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_NSGA_FEs = ( + int_nsga._function_evaluation_count - previous_NSGA_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + nsga_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_nsga.population.objectives, + cf, + ) + #data_row["iRVEA_N_Ss"] = [rvea_n_solutions] + #data_row["RNSGAIII_N_Ss"] = [nsga_n_solutions] + #data_row["iRVEA_FEs"] = [peritr_RVEA_FEs * n_obj] + #data_row["RNSGAIII_FEs"] = [peritr_NSGA_FEs * n_obj] + + ns_rvea_decision = np.append(ns_rvea_decision, rvea_n_solutions) + ns_nsga_decision = np.append(ns_nsga_decision, nsga_n_solutions) + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform( + int_nsga.population.objectives + ) + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + #data_row[ + # ["iRVEA" + excess_col for excess_col in excess_columns] + #] = [rigd_irvea, rhv_irvea] + #data_row[ + # ["RNSGAIII" + excess_col for excess_col in excess_columns] + #] = [rigd_insga, rhv_insga] + #data = data.append(data_row, ignore_index=1) + + rm_rvea_decision = np.append(rm_rvea_decision, rigd_irvea) + rm_nsga_decision = np.append(rm_nsga_decision, rigd_rnsga) + #Compute median of the decision phase + median_ns_rvea_decision = np.append(median_ns_rvea_decision, np.sum(ns_rvea_decision)) + median_rm_rvea_decision = np.append(median_rm_rvea_decision, np.sum(rm_rvea_decision)) + median_ns_nsga_decision = np.append(median_ns_nsga_decision, np.sum(ns_nsga_decision)) + median_rm_nsga_decision = np.append(median_rm_nsga_decision, np.sum(rm_nsga_decision)) + print(f"Results for Problem {problem_name} with {n_obj} objectives") + print ("Learning phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_learning):.1f} & {np.std(median_ns_rvea_learning):.4f} & {np.mean(median_ns_nsga_learning):.1f} & {np.std(median_ns_nsga_learning):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_learning):.4f} & {np.std(median_rm_rvea_learning):.4f} & {np.mean(median_rm_nsga_learning):.4f} & {np.std(median_rm_nsga_learning):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_learning)} & {np.std(median_ns_nsga_learning)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_learning)} & {np.std(median_rm_nsga_learning)}") + + print("Decision phase") + print("Ns") + print (f"{np.mean(median_ns_rvea_decision):.1f} & {np.std(median_ns_rvea_decision):.4f} & {np.mean(median_ns_nsga_decision):.1f} & {np.std(median_ns_nsga_decision):.4f}") + print("RM") + print (f"{np.mean(median_rm_rvea_decision):.4f} & {np.std(median_rm_rvea_decision):.4f} & {np.mean(median_rm_nsga_decision):.4f} & {np.std(median_rm_nsga_decision):.4f}") + #print (f"NSGA Ns = {np.mean(median_ns_nsga_decision)} & {np.std(median_ns_nsga_decision)}") + #print (f"NSGA RM = {np.mean(median_rm_nsga_decision)} & {np.std(median_rm_nsga_decision)}") + + #data.to_csv(f"refpoints{run+1}.csv", index=False) diff --git a/docs/notebooks/extendedADM_RVEA_RPvsRanges.py b/docs/notebooks/extendedADM_RVEA_RPvsRanges.py new file mode 100644 index 00000000..78784bfb --- /dev/null +++ b/docs/notebooks/extendedADM_RVEA_RPvsRanges.py @@ -0,0 +1,328 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.othertools.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA + +# from desdeo_emo.EAs.NSGAIII import NSGAIII + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +from pymoo.configuration import Configuration + +Configuration.show_compile_hint = False + +problem_names = ["DTLZ1", "DTLZ2", "DTLZ3", "DTLZ4"] +n_objs = np.asarray([3, 4, 5, 6, 7, 8, 9]) # number of objectives + +K = 10 +n_vars = K + n_objs - 1 # number of variables + +num_gen_per_iter = [100] # number of generations per iteration + +algorithms = ["iRVEA_RP", "iRVEA_Ranges"] # algorithms to be compared + +# the followings are for formatting results +column_names = ( + [ + "problem", + "num_obj", + "iteration", + "num_gens", + "reference_point", + "preferred_ranges", + ] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] + + [algorithm + "_N_Ss" for algorithm in algorithms] + + [algorithm + "_FEs" for algorithm in algorithms] +) +excess_columns = ["_R_IGD", "_R_HV"] +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 4 # number of iterations for the learning phase +D = 3 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +total_run = 2 +for run in range(total_run): + print(f"Run {run+1} of {total_run}") + counter = 1 + total_count = len(num_gen_per_iter) * len(n_objs) * len(problem_names) + for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + print(f"Loop {counter} of {total_count}") + counter += 1 + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + + ideal = np.asarray([0] * n_obj) + problem.update_ideal(objective_vectors=ideal, fitness=ideal) + problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + + true_nadir = np.asarray([1] * n_obj) + + # interactive + + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + + int_rvea_ranges = RVEA( + problem=problem, interact=True, n_gen_per_iter=gen + ) + + # initial reference point is specified randomly + ref_point = np.random.rand(n_obj) + + # run algorithms once with the randomly generated reference point + _, pref_int_rvea = int_rvea.requests() + _, pref_int_rvea_ranges = int_rvea_ranges.requests() + + pref_int_rvea[2].response = pd.DataFrame( + [ref_point], + columns=pref_int_rvea[2].content["dimensions_data"].columns, + ) + + pref_int_rvea_ranges[2].response = pd.DataFrame( + [ref_point], + columns=pref_int_rvea_ranges[2].content["dimensions_data"].columns, + ) + + _, pref_int_rvea = int_rvea.iterate(pref_int_rvea[2]) + # First run of the preferred ranges based RVEA is also made with the randomly generated reference point + _, pref_int_rvea_ranges = int_rvea_ranges.iterate( + pref_int_rvea_ranges[2] + ) + + # build initial composite front + ( + rvea_n_solutions, + rvea_ranges_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rvea_ranges.population.objectives, + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions( + "das-dennis", n_obj, n_partitions=12 + ) + pareto_front = problemR.pareto_front(ref_dirs) + + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution, n_obj) + + # learning phase + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + # After this class call, solutions inside the composite front are assigned to reference vectors + base = baseADM(cf, reference_vectors) + # generates the next reference point for the next iteration in the learning phase + ranges, reference_point = gp.generateRanges4learning( + base, problem.ideal, problem.nadir + ) + + data_row["reference_point"] = [reference_point] + data_row["preferred_ranges"] = [ranges] + + # run algorithms with the new reference point + pref_int_rvea[2].response = pd.DataFrame( + [reference_point], + columns=pref_int_rvea[2].content["dimensions_data"].columns, + ) + pref_int_rvea_ranges[3].response = ranges + + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_RVEA_Ranges_FEs = ( + int_rvea_ranges._function_evaluation_count + ) + _, pref_int_rvea = int_rvea.iterate(pref_int_rvea[2]) + _, pref_int_rvea_ranges = int_rvea_ranges.iterate( + pref_int_rvea_ranges[3] + ) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_RVEA_Ranges_FEs = ( + int_rvea_ranges._function_evaluation_count + - previous_RVEA_Ranges_FEs + ) + + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + rvea_ranges_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rvea_ranges.population.objectives, + cf, + ) + + data_row["iRVEA_RP_N_Ss"] = [rvea_n_solutions] + data_row["iRVEA_Ranges_N_Ss"] = [rvea_ranges_n_solutions] + data_row["iRVEA_RP_FEs"] = [peritr_RVEA_FEs * n_obj] + data_row["iRVEA_Ranges_FEs"] = [peritr_RVEA_Ranges_FEs * n_obj] + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + + # normalize reference point + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + + rvea_ranges_transformer = Normalizer().fit( + int_rvea_ranges.population.objectives + ) + norm_rvea_ranges = rvea_ranges_transformer.transform( + int_rvea_ranges.population.objectives + ) + + # R-metric calls for R_IGD and R_HV + rigd_irvea, rhv_irvea = rmetric.calc( + norm_rvea, others=norm_rvea_ranges + ) + rigd_irvea_ranges, rhv_irvea_ranges = rmetric.calc( + norm_rvea_ranges, others=norm_rvea + ) + + data_row[ + ["iRVEA_RP" + excess_col for excess_col in excess_columns] + ] = [rigd_irvea, rhv_irvea] + data_row[ + ["iRVEA_Ranges" + excess_col for excess_col in excess_columns] + ] = [rigd_irvea_ranges, rhv_irvea_ranges] + + data = data.append(data_row, ignore_index=1) + + # Decision phase + # After the learning phase the reference vector which has the maximum number of assigned solutions forms ROI + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + + # generates the next reference point for the decision phase + ranges, reference_point = gp.generateRanges4decision( + base, max_assigned_vector[0], problem.ideal, problem.nadir + ) + + data_row["reference_point"] = [reference_point] + data_row["preferred_ranges"] = [ranges] + + # run algorithms with the new reference point + pref_int_rvea[2].response = pd.DataFrame( + [reference_point], + columns=pref_int_rvea[2].content["dimensions_data"].columns, + ) + pref_int_rvea_ranges[3].response = ranges + + previous_RVEA_FEs = int_rvea._function_evaluation_count + previous_RVEA_Ranges_FEs = ( + int_rvea_ranges._function_evaluation_count + ) + _, pref_int_rvea = int_rvea.iterate(pref_int_rvea[2]) + _, pref_int_rvea_ranges = int_rvea_ranges.iterate( + pref_int_rvea_ranges[3] + ) + + peritr_RVEA_FEs = ( + int_rvea._function_evaluation_count - previous_RVEA_FEs + ) + peritr_RVEA_Ranges_FEs = ( + int_rvea_ranges._function_evaluation_count + - previous_RVEA_Ranges_FEs + ) + # extend composite front with newly obtained solutions + ( + rvea_n_solutions, + rvea_ranges_n_solutions, + cf, + ) = generate_composite_front_with_identity( + int_rvea.population.objectives, + int_rvea_ranges.population.objectives, + cf, + ) + + data_row["iRVEA_RP_N_Ss"] = [rvea_n_solutions] + data_row["iRVEA_Ranges_N_Ss"] = [rvea_ranges_n_solutions] + data_row["iRVEA_RP_FEs"] = [peritr_RVEA_FEs * n_obj] + data_row["iRVEA_Ranges_FEs"] = [peritr_RVEA_Ranges_FEs * n_obj] + + # R-metric calculation + ref_point = reference_point.reshape(1, n_obj) + + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + + # normalize solutions before sending r-metric + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform( + int_rvea.population.objectives + ) + + rvea_ranges_transformer = Normalizer().fit( + int_rvea_ranges.population.objectives + ) + norm_rvea_ranges = rvea_ranges_transformer.transform( + int_rvea_ranges.population.objectives + ) + + rigd_irvea, rhv_irvea = rmetric.calc( + norm_rvea, others=norm_rvea_ranges + ) + rigd_irvea_ranges, rhv_irvea_ranges = rmetric.calc( + norm_rvea_ranges, others=norm_rvea + ) + + data_row[ + ["iRVEA_RP" + excess_col for excess_col in excess_columns] + ] = [rigd_irvea, rhv_irvea] + data_row[ + ["iRVEA_Ranges" + excess_col for excess_col in excess_columns] + ] = [rigd_irvea_ranges, rhv_irvea_ranges] + + data = data.append(data_row, ignore_index=1) + + data.to_csv( + f"./results/extendedADM/RVEA_RPvsRanges/100_generations/output{run+1}.csv", + index=False, + ) + diff --git a/docs/notebooks/generatePreference.py b/docs/notebooks/generatePreference.py new file mode 100644 index 00000000..734e463f --- /dev/null +++ b/docs/notebooks/generatePreference.py @@ -0,0 +1,415 @@ +# from desdeo_emo.othertools.ReferenceVectors import ReferenceVectors +import numpy as np +import baseADM + + +def generateRP4learning(base: baseADM): + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + # Assigment of the solutions to the vectors + assigned_vectors = base.assigned_vectors + + # Find the vector which has a minimum number of assigned solutions + number_assigned = np.bincount(assigned_vectors) + min_assigned_vector = np.atleast_1d( + np.squeeze( + np.where( + number_assigned == np.min(number_assigned[np.nonzero(number_assigned)]) + ) + ) + ) + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == min_assigned_vector[0])) + # If there are multiple vectors which have the minimum number of solutions, first one's index is used + ) + # Assigned solutions to the vector which has a minimum number of solutions + sub_population_fitness = translated_cf[sub_population_index] + + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + # Index of the solution which has a minimum distance to the origin + minidx = np.where(sub_pop_fitness_magnitude == np.nanmin(sub_pop_fitness_magnitude)) + + distance_selected = sub_pop_fitness_magnitude[minidx] + + # Create the reference point + reference_point = distance_selected[0] * base.vectors.values[min_assigned_vector[0]] + reference_point = np.squeeze(reference_point + ideal_cf) + # reference_point = reference_point + ideal_cf + return reference_point + + +def get_max_assigned_vector(assigned_vectors): + + number_assigned = np.bincount(assigned_vectors) + max_assigned_vector = np.atleast_1d( + np.squeeze( + np.where( + number_assigned == np.max(number_assigned[np.nonzero(number_assigned)]) + ) + ) + ) + return max_assigned_vector + + +def generateRP4decision(base: baseADM, max_assigned_vector): + + assigned_vectors = base.assigned_vectors + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == max_assigned_vector)) + ) + sub_population_fitness = translated_cf[sub_population_index] + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + # Index of the solution which has a minimum distance to the origin + minidx = np.where(sub_pop_fitness_magnitude == np.nanmin(sub_pop_fitness_magnitude)) + distance_selected = sub_pop_fitness_magnitude[minidx] + + # Create the reference point + reference_point = distance_selected[0] * base.vectors.values[max_assigned_vector] + reference_point = np.squeeze(reference_point + ideal_cf) + + # reference_point = reference_point + ideal_cf + return reference_point + + +def generatePerturbatedRP4decision(base: baseADM, max_assigned_vector): + + assigned_vectors = base.assigned_vectors + theta = base.theta + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == max_assigned_vector)) + ) + sub_population_fitness = translated_cf[sub_population_index] + + # angles = theta[sub_population_index, max_assigned_vector] + # angles = np.divide(angles) + # print(angles) + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + # Index of the solution which has a minimum distance to the origin + minidx = np.where(sub_pop_fitness_magnitude == np.nanmin(sub_pop_fitness_magnitude)) + distance_selected = sub_pop_fitness_magnitude[minidx] + + # aminidx = np.where(angles == np.nanmin(angles)) + + # Create the reference point + reference_point = distance_selected[0] * base.vectors.values[max_assigned_vector] + + # Find the distance from the nearest solution to the reference point + distance = min(np.linalg.norm(reference_point - i) for i in sub_population_fitness) + + # nearest = np.squeeze(sub_population_fitness[aminidx] + ideal_cf) + + # distance = np.linalg.norm(nearest - reference_point) + # print("distance", distance) + + reference_point = np.squeeze(reference_point + ideal_cf) + + reference_point = np.squeeze(reference_point - distance) + + # The following line is to make sure that the components of the reference point cannot be smaller than the components of the ideal point + # update the following line if the ideal point is not zero + reference_point[reference_point < 0] = np.finfo(float).eps + # print(reference_point) + + return reference_point + + +def generateRanges4learning(base: baseADM, true_ideal, true_nadir): + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + # Assigment of the solutions to the vectors + assigned_vectors = base.assigned_vectors + + # Find the vector which has a minimum number of assigned solutions + number_assigned = np.bincount(assigned_vectors) + min_assigned_vector = np.atleast_1d( + np.squeeze( + np.where( + number_assigned == np.min(number_assigned[np.nonzero(number_assigned)]) + ) + ) + ) + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == min_assigned_vector[0])) + # If there are multiple vectors which have the minimum number of solutions, first one's index is used + ) + # Assigned solutions to the vector which has a minimum number of solutions + sub_population_fitness = translated_cf[sub_population_index] + + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + # Index of the solution which has a minimum distance to the origin + minidx = np.where(sub_pop_fitness_magnitude == np.nanmin(sub_pop_fitness_magnitude)) + + distance_selected = sub_pop_fitness_magnitude[minidx] + + # Create the reference point + reference_point = distance_selected[0] * base.vectors.values[min_assigned_vector[0]] + + # Distance between the reference point and the nearest solution + distance = min(np.linalg.norm(reference_point - i) for i in sub_population_fitness) + + reference_point = np.squeeze(reference_point + ideal_cf) + + temp = reference_point - distance + # change the following line if the ideal point is different than zero + temp2 = reference_point + distance + + for i in range(reference_point.shape[0]): + if reference_point[i] < true_ideal[i]: + reference_point[i] = true_ideal[i] + if reference_point[i] > true_nadir[i]: + reference_point[i] = true_nadir[i] + if temp[i] < true_ideal[i]: + temp[i] = true_ideal[i] + if temp[i] > true_nadir[i]: + temp[i] = true_nadir[i] + if temp2[i] < true_ideal[i]: + temp2[i] = true_ideal[i] + if temp2[i] > true_nadir[i]: + temp2[i] = true_nadir[i] + + preferred_range = np.vstack((temp, temp2)).T + # preferred_range = np.squeeze(preferred_range + ideal_cf) + + # desdeo_emo is expecting ranges as list, therefore, array is converted to list. + # preferred_range = preferred_range.tolist() + + return preferred_range, reference_point + + +def generateRanges4decision(base: baseADM, max_assigned_vector, true_ideal, true_nadir): + + assigned_vectors = base.assigned_vectors + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == max_assigned_vector)) + ) + sub_population_fitness = translated_cf[sub_population_index] + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + # Index of the solution which has a minimum distance to the origin + minidx = np.where(sub_pop_fitness_magnitude == np.nanmin(sub_pop_fitness_magnitude)) + distance_selected = sub_pop_fitness_magnitude[minidx] + + # Create the reference point + reference_point = distance_selected[0] * base.vectors.values[max_assigned_vector] + + # Distance between the reference point and the nearest solution + distance = min(np.linalg.norm(reference_point - i) for i in sub_population_fitness) + + reference_point = np.squeeze(reference_point + ideal_cf) + + # This is for perturbating the reference point by using the distance between the nearest solution to the reference point + reference_point = np.squeeze(reference_point - distance) + + # The following line is to make sure that the components of the reference point cannot be smaller than the components of the ideal point + # update the following line if the ideal point is not zero + + temp = reference_point - distance + # change the following line if the ideal point is different than zero + temp2 = reference_point + distance + + for i in range(reference_point.shape[0]): + if reference_point[i] < true_ideal[i]: + reference_point[i] = true_ideal[i] + if reference_point[i] > true_nadir[i]: + reference_point[i] = true_nadir[i] + if temp[i] < true_ideal[i]: + temp[i] = true_ideal[i] + if temp[i] > true_nadir[i]: + temp[i] = true_nadir[i] + if temp2[i] < true_ideal[i]: + temp2[i] = true_ideal[i] + if temp2[i] > true_nadir[i]: + temp2[i] = true_nadir[i] + + preferred_range = np.vstack((temp, temp2)).T + + # desdeo_emo is expecting ranges as list, therefore, array is converted to list. + # preferred_range = preferred_range.tolist() + return preferred_range, reference_point + +def sparse_argsort(arr): + indices = np.nonzero(arr)[0] + return indices[np.argsort(arr[indices])] + +def preferredSolutions4learning(base: baseADM, p=1): + num_selected = 0 + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + preferred_solutions = np.empty((0,len(ideal_cf)), float) + #print(preferred_solutions) + + # Assigment of the solutions to the vectors + assigned_vectors = base.assigned_vectors + index_min = 0 + # Find the vector which has a minimum number of assigned solutions + while num_selected < p: + number_assigned = np.bincount(assigned_vectors) + sorted_assigned = sparse_argsort(number_assigned) + #print(number_assigned) + #print(sorted_assigned) + #min_assigned_vector = np.atleast_1d( + # np.squeeze( + # np.where( + # number_assigned == np.min(number_assigned[np.nonzero(number_assigned)]) + # ) + # ) + #) + min_assigned_vector = sorted_assigned[index_min] + #print("min assigned vector") + #print(min_assigned_vector) + + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == min_assigned_vector)) + # If there are multiple vectors which have the minimum number of solutions, first one's index is used + ) + # Assigned solutions to the vector which has a minimum number of solutions + sub_population_fitness = translated_cf[sub_population_index] + #print(sub_population_fitness) + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + sorted_sub_population = np.argsort(sub_pop_fitness_magnitude) + for j in range(0, len(sub_population_fitness)): + # Index of the solution which has a minimum distance to the origin + solution_selected = sub_population_fitness[sorted_sub_population[j]] + preferred_solution = np.squeeze(solution_selected + ideal_cf) + + preferred_solutions = np.append(preferred_solutions, [preferred_solution], axis=0) + num_selected = num_selected +1 + if num_selected == p: + #print("las tenemos pjjpjj cambio") + #print(preferred_solutions) + return preferred_solutions + + index_min = index_min + 1 + + + return preferred_solution + + +def preferredSolutions4decision(base: baseADM, max_assigned_vector): + + assigned_vectors = base.assigned_vectors + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == max_assigned_vector)) + ) + print(sub_population_index) + sub_population_fitness = translated_cf[sub_population_index] + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + + # Index of the solution which has a minimum distance to the origin + minidx = np.argpartition(sub_pop_fitness_magnitude, 4) + + # print(minidx[:3]) + solution_selected = sub_population_fitness[minidx[:4]] + + preferred_solution = np.squeeze(solution_selected + ideal_cf) + + return preferred_solution + + +def preferredSolutions4Decision2(base: baseADM, p=5): + num_selected = 0 + + ideal_cf = base.ideal_point + + translated_cf = base.translated_front + + preferred_solutions = np.empty((0,len(ideal_cf)), float) + #print(preferred_solutions) + + # Assigment of the solutions to the vectors + assigned_vectors = base.assigned_vectors + index_max = 0 + # Find the vector which has a minimum number of assigned solutions + while num_selected < p: + number_assigned = np.bincount(assigned_vectors) + sorted_assigned = sparse_argsort(number_assigned)[::-1] + #print(number_assigned) + #print(sorted_assigned) + #min_assigned_vector = np.atleast_1d( + # np.squeeze( + # np.where( + # number_assigned == np.min(number_assigned[np.nonzero(number_assigned)]) + # ) + # ) + #) + max_assigned_vector = sorted_assigned[index_max] + #print("min assigned vector") + #print(min_assigned_vector) + + sub_population_index = np.atleast_1d( + np.squeeze(np.where(assigned_vectors == max_assigned_vector)) + # If there are multiple vectors which have the minimum number of solutions, first one's index is used + ) + # Assigned solutions to the vector which has a minimum number of solutions + sub_population_fitness = translated_cf[sub_population_index] + #print(sub_population_fitness) + # Distances of these solutions to the origin + sub_pop_fitness_magnitude = np.sqrt( + np.sum(np.power(sub_population_fitness, 2), axis=1) + ) + sorted_sub_population = np.argsort(sub_pop_fitness_magnitude) + for j in range(0, len(sub_population_fitness)): + # Index of the solution which has a minimum distance to the origin + solution_selected = sub_population_fitness[sorted_sub_population[j]] + preferred_solution = np.squeeze(solution_selected + ideal_cf) + + preferred_solutions = np.append(preferred_solutions, [preferred_solution], axis=0) + num_selected = num_selected +1 + if num_selected == p: + #print("las tenemos pjjpjj cambio") + #print(preferred_solutions) + return preferred_solutions + + index_max = index_max + 1 + + + return preferred_solution diff --git a/docs/notebooks/rmetric.py b/docs/notebooks/rmetric.py new file mode 100644 index 00000000..3fcd9f56 --- /dev/null +++ b/docs/notebooks/rmetric.py @@ -0,0 +1,222 @@ +#################################################################### +# Title: R-metric implementation +# Publication: Li, K., Deb, K., Yao, X.: R-metric: Evaluating the +# performance of preference-basedevolutionary multiobjective optimization +# using reference points. IEEE Transactions on Evolutionary Computation22(6), +# 821–835 (2018). +# +# This implementation is adapted from the one in pymoo framework(https://pymoo.org/) +# Source: https://github.com/msu-coinlab/pymoo/blob/master/pymoo/performance_indicator/rmetric.py +# retrieved in September 2020. +##################################################################### + + +import numpy as np +from scipy.spatial.distance import cdist + +from pymoo.core.indicator import Indicator +from pymoo.indicators.hv import Hypervolume +from pymoo.indicators.igd import IGD + + +class RMetric(Indicator): + def __init__(self, problem, ref_points, w=None, delta=0.3, pf=None): + """ + + Parameters + ---------- + + problem : class + problem instance + + ref_points : numpy.array + list of reference points + + w : numpy.array + weights for each objective + + delta : float + The delta value representing the region of interest + + """ + + Indicator.__init__(self) + self.ref_points = ref_points + self.problem = problem + w_ = np.ones(self.ref_points.shape[1]) if not w else w + self.w_points = self.ref_points + 2 * w_ + self.delta = delta + self.pf = pf + + self.F = None + self.others = None + + def _filter(self): + def check_dominance(a, b, n_obj): + flag1 = False + flag2 = False + for i in range(n_obj): + if a[i] < b[i]: + flag1 = True + else: + if a[i] > b[i]: + flag2 = True + if flag1 and not flag2: + return 1 + elif not flag1 and flag2: + return -1 + else: + return 0 + + num_objs = np.size(self.F, axis=1) + index_array = np.zeros(np.size(self.F, axis=0)) + + # filter out all solutions that are dominated by solutions found by other algorithms + if self.others is not None: + for i in range(np.size(self.F, 0)): + for j in range(np.size(self.others, 0)): + flag = check_dominance(self.F[i, :], self.others[j, :], num_objs) + if flag == -1: + index_array[i] = 1 + break + + final_index = np.logical_not(index_array) + filtered_pop = self.F[final_index, :] + + return filtered_pop + + def _preprocess(self, data, ref_point, w_point): + + datasize = np.size(data, 0) + + # Identify representative point + ref_matrix = np.tile(ref_point, (datasize, 1)) + w_matrix = np.tile(w_point, (datasize, 1)) + # ratio of distance to the ref point over the distance between the w_point and the ref_point + diff_matrix = (data - ref_matrix) / (w_matrix - ref_matrix) + agg_value = np.amax(diff_matrix, axis=1) + idx = np.argmin(agg_value) + zp = [data[idx, :]] + + return (zp,) + + def _translate(self, zp, trimmed_data, ref_point, w_point): + # Solution translation - Matlab reproduction + # find k + temp = (zp[0] - ref_point) / (w_point - ref_point) + kIdx = np.argmax(temp) + + # find zl + temp = (zp[0][kIdx] - ref_point[kIdx]) / (w_point[kIdx] - ref_point[kIdx]) + zl = ref_point + temp * (w_point - ref_point) + + temp = zl - zp + shift_direction = np.tile(temp, (trimmed_data.shape[0], 1)) + # new_size = self.curr_pop.shape[0] + return trimmed_data + shift_direction + + def _trim(self, pop, centeroid, range=0.2): + popsize, objDim = pop.shape + diff_matrix = pop - np.tile(centeroid, (popsize, 1))[0] + flags = np.sum(abs(diff_matrix) < range / 2, axis=1) + filtered_matrix = pop[np.where(flags == objDim)] + return filtered_matrix + + def _trim_fast(self, pop, centeroid, range=0.2): + centeroid_matrix = cdist(pop, centeroid, metric="euclidean") + filtered_matrix = pop[np.where(centeroid_matrix < range / 2), :][0] + return filtered_matrix + + def calc(self, F, others=None, calc_hv=True): + """ + + This method calculates the R-IGD and R-HV based off of the values provided. + + + Parameters + ---------- + + F : numpy.ndarray + The objective space values + + others : numpy.ndarray + Results from other algorithms which should be used for filtering nds solutions + + calc_hv : bool + Whether the hv is calculate - (None if more than 3 dimensions) + + + Returns + ------- + rigd : float + R-IGD + + rhv : float + R-HV if calc_hv is true and less or equal to 3 dimensions + + """ + self.F, self.others = F, others + + translated = [] + final_PF = [] + + # 1. Prescreen Procedure - NDS Filtering + pop = self._filter() + + pf = self.pf + if pf is None: + pf = self.problem.pareto_front() + + if pf is None: + raise Exception( + "Please provide the Pareto front to calculate the R-Metric!" + ) + + labels = np.argmin(cdist(pop, self.ref_points), axis=1) + + for i in range(len(self.ref_points)): + cluster = pop[np.where(labels == i)] + if len(cluster) != 0: + # 2. Representative Point Identification + zp = self._preprocess( + cluster, self.ref_points[i], w_point=self.w_points[i] + )[0] + # 3. Filtering Procedure - Filter points + trimmed_data = self._trim(cluster, zp, range=self.delta) + # 4. Solution Translation + pop_t = self._translate( + zp, trimmed_data, self.ref_points[i], w_point=self.w_points[i] + ) + translated.extend(pop_t) + + # 5. R-Metric Computation + target = self._preprocess( + data=pf, ref_point=self.ref_points[i], w_point=self.w_points[i] + ) + PF = self._trim(pf, target) + final_PF.extend(PF) + + translated = np.array(translated) + final_PF = np.array(final_PF) + + rigd, rhv = None, None + + if len(translated) > 0: + + # IGD Computation + rigd = IGD(final_PF).do(translated) + + nadir_point = np.amax(self.w_points, axis=0) + front = translated + dim = self.ref_points[0].shape[0] + if calc_hv: + if dim <= 3: + try: + rhv = Hypervolume(ref_point=nadir_point).calc(front) + except: + pass + + if calc_hv: + return rigd, rhv + else: + return rigd diff --git a/docs/notebooks/visualize_adm_test_dtlz.py b/docs/notebooks/visualize_adm_test_dtlz.py new file mode 100644 index 00000000..301c68a3 --- /dev/null +++ b/docs/notebooks/visualize_adm_test_dtlz.py @@ -0,0 +1,280 @@ +import numpy as np +import pandas as pd + +import baseADM +from baseADM import * +import generatePreference as gp +import visual +from visual import * + +from desdeo_problem.testproblems.TestProblems import test_problem_builder +from desdeo_emo.othertools.ReferenceVectors import ReferenceVectors + +from desdeo_emo.EAs.RVEA import RVEA +from desdeo_emo.EAs.NSGAIII import NSGAIII + +from pymoo.factory import get_problem, get_reference_directions +import rmetric as rm +from sklearn.preprocessing import Normalizer +from pymoo.configuration import Configuration + +Configuration.show_compile_hint = False + + +# problem_names = ["DTLZ2", "DTLZ3", "DTLZ4"] +problem_names = ["DTLZ1", "DTLZ2", "DTLZ3", "DTLZ4"] +# n_objs = np.asarray([3, 4, 5, 6, 7, 8, 9]) +n_objs = np.asarray([3]) +K = 10 +n_vars = K + n_objs - 1 + +num_gen_per_iter = [50] + +algorithms = ["iRVEA", "iNSGAIII"] +column_names = ( + ["problem", "num_obj", "iteration", "num_gens", "reference_point"] + + [algorithm + "_R_IGD" for algorithm in algorithms] + + [algorithm + "_R_HV" for algorithm in algorithms] +) + +excess_columns = [ + "_R_IGD", + "_R_HV", +] + +data = pd.DataFrame(columns=column_names) +data_row = pd.DataFrame(columns=column_names, index=[1]) + +# ADM parameters +L = 0 # number of iterations for the learning phase +D = 20 # number of iterations for the decision phase +lattice_resolution = 5 # density variable for creating reference vectors + +counter = 1 +total_count = len(num_gen_per_iter) * len(n_objs) * len(problem_names) +for gen in num_gen_per_iter: + for n_obj, n_var in zip(n_objs, n_vars): + for problem_name in problem_names: + print(f"Loop {counter} of {total_count}") + counter += 1 + problem = test_problem_builder( + name=problem_name, n_of_objectives=n_obj, n_of_variables=n_var + ) + problem.ideal = np.asarray([0] * n_obj) + problem.nadir = abs(np.random.normal(size=n_obj, scale=0.15)) + 1 + + true_nadir = np.asarray([1] * n_obj) + + # interactive + int_rvea = RVEA(problem=problem, interact=True, n_gen_per_iter=gen) + int_nsga = NSGAIII(problem=problem, interact=True, n_gen_per_iter=gen) + + # initial reference point + response = np.random.rand(n_obj) + fig_rp = go.Figure() + + # run algorithms once with the randomly generated reference point + _, pref_int_rvea = int_rvea.requests() + _, pref_int_nsga = int_nsga.requests() + pref_int_rvea.response = pd.DataFrame( + [response], columns=pref_int_rvea.content["dimensions_data"].columns + ) + pref_int_nsga.response = pd.DataFrame( + [response], columns=pref_int_nsga.content["dimensions_data"].columns + ) + + _, pref_int_rvea = int_rvea.iterate(pref_int_rvea) + _, pref_int_nsga = int_nsga.iterate(pref_int_nsga) + + cf = generate_composite_front( + int_rvea.population.objectives, int_nsga.population.objectives + ) + + # the following two lines for getting pareto front by using pymoo framework + problemR = get_problem(problem_name.lower(), n_var, n_obj) + ref_dirs = get_reference_directions("das-dennis", n_obj, n_partitions=12) + pareto_front = problemR.pareto_front(ref_dirs) + + # creates uniformly distributed reference vectors + reference_vectors = ReferenceVectors(lattice_resolution, n_obj) + + all_rps = np.empty(shape=(L + D, n_obj), dtype="object") + + for i in range(L): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + i + 1, + gen, + ] + + # problem_nameR = problem_name.lower() + base = baseADM(cf, reference_vectors) + + response = gp.generateRP4learning(base) + # print(response) + + data_row["reference_point"] = [ + response, + ] + + # Reference point generation for the next iteration + pref_int_rvea.response = pd.DataFrame( + [response], columns=pref_int_rvea.content["dimensions_data"].columns + ) + pref_int_nsga.response = pd.DataFrame( + [response], columns=pref_int_nsga.content["dimensions_data"].columns + ) + + _, pref_int_rvea = int_rvea.iterate(pref_int_rvea) + _, pref_int_nsga = int_nsga.iterate(pref_int_nsga) + + cf = generate_composite_front( + cf, int_rvea.population.objectives, int_nsga.population.objectives + ) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + all_rps[i] = ref_point + + rmetric = rm.RMetric(problemR, norm_rp, pf=pareto_front) + + # normalize solutions before sending r-metric + + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform(int_rvea.population.objectives) + + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform(int_nsga.population.objectives) + + cf_transformer = Normalizer().fit(cf) + norm_cf = cf_transformer.transform(cf) + + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + + data_row[["iRVEA" + excess_col for excess_col in excess_columns]] = [ + rigd_irvea, + rhv_irvea, + ] + data_row[["iNSGAIII" + excess_col for excess_col in excess_columns]] = [ + rigd_insga, + rhv_insga, + ] + + data = data.append(data_row, ignore_index=1) + """fig = visualize_3D_front_rp(int_rvea.population.objectives, response) + fig.write_html( + f"./results/decision_behaviour/iRVEA/" + f"iRVEA_{problem_name}_iteration_{i+1}.html" + ) + fig = visualize_3D_front_rp(int_nsga.population.objectives, response) + fig.write_html( + f"./results/decision_behaviour/iNSGA/" + f"iNSGA_{problem_name}_iteration_{i+1}.html" + )""" + + # Decision phase + base = baseADM(cf, reference_vectors) + + max_assigned_vector = gp.get_max_assigned_vector(base.assigned_vectors) + # print(max_assigned_vector[0]) + + for i in range(D): + data_row[["problem", "num_obj", "iteration", "num_gens"]] = [ + problem_name, + n_obj, + L + i + 1, + gen, + ] + + # since composite front grows after each iteration this call should be done for each iteration + base = baseADM(cf, reference_vectors) + + # generates the next reference point for the decision phase + response = gp.generatePerturbatedRP4decision( + base, max_assigned_vector[0] + ) + + data_row["reference_point"] = [ + response, + ] + + # Reference point generation for the next iteration + pref_int_rvea.response = pd.DataFrame( + [response], columns=pref_int_rvea.content["dimensions_data"].columns + ) + pref_int_nsga.response = pd.DataFrame( + [response], columns=pref_int_nsga.content["dimensions_data"].columns + ) + + _, pref_int_rvea = int_rvea.iterate(pref_int_rvea) + _, pref_int_nsga = int_nsga.iterate(pref_int_nsga) + + cf = generate_composite_front( + cf, int_rvea.population.objectives, int_nsga.population.objectives + ) + + # R-metric calculation + ref_point = response.reshape(1, n_obj) + + rp_transformer = Normalizer().fit(ref_point) + norm_rp = rp_transformer.transform(ref_point) + all_rps[L + i] = ref_point + + # for decision phase, delta is specified as 0.2 + rmetric = rm.RMetric(problemR, norm_rp, delta=0.2, pf=pareto_front) + + # normalize solutions before sending r-metric + + rvea_transformer = Normalizer().fit(int_rvea.population.objectives) + norm_rvea = rvea_transformer.transform(int_rvea.population.objectives) + + nsga_transformer = Normalizer().fit(int_nsga.population.objectives) + norm_nsga = nsga_transformer.transform(int_nsga.population.objectives) + + cf_transformer = Normalizer().fit(cf) + norm_cf = cf_transformer.transform(cf) + + rigd_irvea, rhv_irvea = rmetric.calc(norm_rvea, others=norm_nsga) + rigd_insga, rhv_insga = rmetric.calc(norm_nsga, others=norm_rvea) + + data_row[["iRVEA" + excess_col for excess_col in excess_columns]] = [ + rigd_irvea, + rhv_irvea, + ] + data_row[["iNSGAIII" + excess_col for excess_col in excess_columns]] = [ + rigd_insga, + rhv_insga, + ] + + data = data.append(data_row, ignore_index=1) + """ fig = visualize_3D_front_rp(int_rvea.population.objectives, response) + fig.write_html( + f"./results/decision_behaviour/iRVEA/" + f"iRVEA_{problem_name}_iteration_{L+i+1}.html" + ) + fig = visualize_3D_front_rp(int_nsga.population.objectives, response) + fig.write_html( + f"./results/decision_behaviour/iNSGA/" + f"iNSGA_{problem_name}_iteration_{L+i+1}.html" + )""" + fig_rp.add_trace( + go.Scatter3d( + x=all_rps[:, 0], + y=all_rps[:, 1], + z=all_rps[:, 2], + name="Reference points", + mode="lines+markers", + marker_size=5, + ) + ) + fig_rp.write_html(f"./results/decision/" f"RPs_{problem_name}_{gen}.html") + fig = visualize_3D_front_rvs(cf, reference_vectors) + fig.write_html(f"./results/decision/" f"cf_{problem_name}_{gen}.html") + # print(all_rps) +data.to_csv("./results/decision/results_3objs_L0delta03_D20delta02.csv", index=False) +