From 5831965000b22e6481a14d707edfba47673420c3 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Mon, 14 Aug 2023 18:49:16 -0400 Subject: [PATCH 01/21] Add files via upload --- demo_radom_model.py | 68 ++++++++++++++ demo_random_problem.py | 103 +++++++++++++++++++++ demo_random_problem_solver.py | 100 ++++++++++++++++++++ demo_user.py | 167 ++++++++++++++++++++++++++++++++++ 4 files changed, 438 insertions(+) create mode 100644 demo_radom_model.py create mode 100644 demo_random_problem.py create mode 100644 demo_random_problem_solver.py create mode 100644 demo_user.py diff --git a/demo_radom_model.py b/demo_radom_model.py new file mode 100644 index 000000000..2c955a31f --- /dev/null +++ b/demo_radom_model.py @@ -0,0 +1,68 @@ +""" +This script is intended to help with debugging a random model. +It imports a model, initializes a model object with given factors, +sets up pseudorandom number generators, and runs one or more replications. +""" + +""" +Instead of modifying the problem and model class, we modify the demo_model and demo_problems. +""" + +import sys +import os.path as o +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) + +import numpy as np +# Import random number generator. +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a + +# Import model. +from simopt.models.san_2 import SAN + +fixed_factors = {} +mymodel = SAN(fixed_factors = fixed_factors, random=True) + +# from models. import +# Replace with name of .py file containing model class. +# Replace with name of model class. + +# Fix factors of model. Specify a dictionary of factors. + +# fixed_factors = {} # Resort to all default values. +# Look at Model class definition to get names of factors. + +# Initialize an instance of the specified model class. + +# mymodel = (fixed_factors) +# Replace with name of model class. + +# Working example for MM1 model. +# ----------------------------------------------- +# from simopt.models.mm1queue import MM1Queue +# fixed_factors = {"lambda": 3.0, "mu": 8.0} +# mymodel = MM1Queue(fixed_factors) +# ----------------------------------------------- + +# The rest of this script requires no changes. + +# Check that all factors describe a simulatable model. +# Check fixed factors individually. + +for key, value in mymodel.factors.items(): + print(f"The factor {key} is set as {value}. Is this simulatable? {bool(mymodel.check_simulatable_factor(key))}.") +# Check all factors collectively. +print(f"Is the specified model simulatable? {bool(mymodel.check_simulatable_factors())}.") + +# Create a list of RNG objects for the simulation model to use when +# running replications. +rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(mymodel.n_rngs)] +rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4 + ss, 0]) for ss in range(mymodel.n_random)] + +mymodel.attach_rng(rng_list2) +responses, gradients = mymodel.replicate(rng_list) +print("\nFor a single replication:") +print("\nResponses:") +for key, value in responses.items(): + print(f"\t {key} is {value}.") + diff --git a/demo_random_problem.py b/demo_random_problem.py new file mode 100644 index 000000000..8dfdab754 --- /dev/null +++ b/demo_random_problem.py @@ -0,0 +1,103 @@ +""" +This script is intended to help with debugging a random problem. +It imports a random problem, initializes a problem object with given factors, +sets up pseudorandom number generators, and runs multiple replications +at a given solution. +""" + +import sys +import os.path as o +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) + +# Import random number generator. +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a + +# Import the Solution class. +from simopt.base import Solution + +# Import problem. +# from models. import +# Replace with name of .py file containing problem class. +# Replace with name of problem class. + +# Fix factors of problem. Specify a dictionary of factors. + +# fixed_factors = {} # Resort to all default values. +# Look at Problem class definition to get names of factors. + +# Initialize an instance of the specified problem class. + +# myproblem = (fixed_factors=fixed_factors) +# Replace with name of problem class. + +# Initialize a solution x corresponding to the problem. + +# Look at the Problem class definition to identify the decision variables. +# x will be a tuple consisting of the decision variables. + +# The following line does not need to be changed. +# mysolution = Solution(x, myproblem) + +# ----------------------------------------------- + +from simopt.models.san_2 import SANLongestPath # Change this import command correspondingly + +def rebase(random_rng, n): + new_rngs = [] + for rng in random_rng: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + subsubstream_index = rng.s_ss_sss_index[2] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index])) + random_rng = new_rngs + return random_rng + +n_inst = 5 # The number of random instances you want to generate + +model_fixed_factors = {"num_nodes": 9, "num_arcs": 14} # Change to empty {} if want to use the default value +myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True) # Change to the imported problem + +rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myproblem.model.n_rngs)] +random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)] +rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)] + +# Generate n_inst random problem instances +for i in range(n_inst): + random_rng = rebase(random_rng, 1) + rng_list2 = rebase(rng_list2, 1) + myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True, random_rng=rng_list2) # Change to the imported problem + myproblem.attach_rngs(random_rng) + x = (8,) * myproblem.dim # Change the initial value according to the dimension + mysolution = Solution(x, myproblem) + mysolution.attach_rngs(rng_list, copy=False) + + # Simulate a fixed number of replications (n_reps) at the solution x. + n_reps = 10 + + myproblem.simulate(mysolution, m=n_reps) + + # Print results to console. + print(mysolution.objectives_mean[0]) + print(type(mysolution)) + print(f"Ran {n_reps} replications of the {myproblem.name} problem at solution x = {x}.\n") + # print(f"The mean objective estimate was {round(mysolution.objectives_mean[0], 4)} with standard error {round(mysolution.objectives_stderr[0], 4)}.") + print("The individual observations of the objective were:") + for idx in range(n_reps): + print(f"\t {round(mysolution.objectives[idx][0], 4)}") + if myproblem.gradient_available: + print("\nThe individual observations of the gradients of the objective were:") + for idx in range(n_reps): + print(f"\t {[round(g, 4) for g in mysolution.objectives_gradients[idx][0]]}") + else: + print("\nThis problem has no known gradients.") + if myproblem.n_stochastic_constraints > 0: + print(f"\nThis problem has {myproblem.n_stochastic_constraints} stochastic constraints of the form E[LHS] <= 0.") + for stc_idx in range(myproblem.n_stochastic_constraints): + print(f"\tFor stochastic constraint #{stc_idx + 1}, the mean of the LHS was {round(mysolution.stoch_constraints_mean[stc_idx], 4)} with standard error {round(mysolution.stoch_constraints_stderr[stc_idx], 4)}.") + print("\tThe observations of the LHSs were:") + for idx in range(n_reps): + print(f"\t\t {round(mysolution.stoch_constraints[idx][stc_idx], 4)}") + else: + print("\nThis problem has no stochastic constraints.") + diff --git a/demo_random_problem_solver.py b/demo_random_problem_solver.py new file mode 100644 index 000000000..dc37c07f2 --- /dev/null +++ b/demo_random_problem_solver.py @@ -0,0 +1,100 @@ +""" +This script is intended to help with debugging random problems and solvers. +It create a problem-solver pairing by importing problems and runs multiple +macroreplications of the solver on the problem. +""" + +import sys +import os.path as o +import numpy as np +import os +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) + +# Import the ProblemSolver class and other useful functions +from simopt.experiment_base import ProblemSolver, read_experiment_results, post_normalize, plot_progress_curves, plot_solvability_cdfs +from rng.mrg32k3a import MRG32k3a +from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr + +# !! When testing a new solver/problem, first go to directory.py. +# See directory.py for more details. +# Specify the names of the solver to test. + +# ----------------------------------------------- +solver_name = "RNDSRCH" # Random search solver +# ----------------------------------------------- + + +def rebase(random_rng, n): + new_rngs = [] + for rng in random_rng: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + subsubstream_index = rng.s_ss_sss_index[2] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index])) + random_rng = new_rngs + return random_rng + +def strtobool(t): + t = t.lower() + if t == "t": + return True + else: + return False + +n_inst = int(input('Please enter the number of instance you want to generate: ')) +rand = input('Please decide whether you want to generate random instances or determinent instances (T/F): ') +rand = strtobool(rand) + +model_fixed_factors = {} # Override model factors + +myproblem = SANLongestPathConstr(random=True, model_fixed_factors=model_fixed_factors) + +random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)] +rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)] + +# Generate 5 random problem instances +for i in range(n_inst): + random_rng = rebase(random_rng, 1) + rng_list2 = rebase(rng_list2, 1) + myproblem = SANLongestPathConstr(random=rand, random_rng=rng_list2, model_fixed_factors=model_fixed_factors) + myproblem.attach_rngs(random_rng) + problem_name = myproblem.model.name + str(i) + print('-------------------------------------------------------') + print(f"Testing solver {solver_name} on problem {problem_name}.") + + # Specify file path name for storing experiment outputs in .pickle file. + file_name_path = "experiments/outputs/" + solver_name + "_on_" + problem_name + ".pickle" + print(f"Results will be stored as {file_name_path}.") + + # Initialize an instance of the experiment class. + myexperiment = ProblemSolver(solver_name=solver_name, problem=myproblem) + + # Run a fixed number of macroreplications of the solver on the problem. + myexperiment.run(n_macroreps=100) + + # If the solver runs have already been performed, uncomment the + # following pair of lines (and uncommmen the myexperiment.run(...) + # line above) to read in results from a .pickle file. + # myexperiment = read_experiment_results(file_name_path) + + print("Post-processing results.") + # Run a fixed number of postreplications at all recommended solutions. + myexperiment.post_replicate(n_postreps=1) #200, 10 + # Find an optimal solution x* for normalization. + post_normalize([myexperiment], n_postreps_init_opt=1) #200, 5 + + # Log results. + myexperiment.log_experiment_results() + + print("Optimal solution: ",np.array(myexperiment.xstar)) + print("Optimal Value: ", myexperiment.all_est_objectives[0]) + + print("Plotting results.") + # Produce basic plots of the solver on the problem. + plot_progress_curves(experiments=[myexperiment], plot_type="all", normalize=False) + plot_progress_curves(experiments=[myexperiment], plot_type="mean", normalize=False) + plot_progress_curves(experiments=[myexperiment], plot_type="quantile", beta=0.90, normalize=False) + plot_solvability_cdfs(experiments=[myexperiment], solve_tol=0.1) + + # Plots will be saved in the folder experiments/plots. + print("Finished. Plots can be found in experiments/plots folder.") \ No newline at end of file diff --git a/demo_user.py b/demo_user.py new file mode 100644 index 000000000..1872aa0a0 --- /dev/null +++ b/demo_user.py @@ -0,0 +1,167 @@ +""" +This script is the user interface for generating multiple random problem instances and +solve them by specified solvers. +It create problem-solver groups and runs multiple +macroreplications of each problem-solver pair. To run the file, user need +to import the solver and probelm they want to build random instances at the beginning, +and also provide an input file, which include the information needed to +build random instances (the name of problem, number of random instances to +generate, and some overriding factors). +""" + +import sys +import os.path as o +import os +import re +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) + +# Import the ProblemsSolvers class and other useful functions +from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles +from rng.mrg32k3a import MRG32k3a +from simopt.base import Solution +from simopt.models.smf import SMF_Max +from simopt.models.rmitd import RMITDMaxRevenue +from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr +from simopt.models.mm1queue import MM1MinMeanSojournTime + + +# !! When testing a new solver/problem, first import problems from the random code file, +# Then create a test_input.txt file in your computer. +# There you should add the import statement and an entry in the file +# You need to specify name of solvers and problems you want to test in the file by 'solver_name' +# And specify the problem related informations by problem = [...] +# All lines start with '#' will be counted as commend and will not be implemented +# See the following example for more details. + +# Ex: +# To create two random instance of SAN and three random instances of SMF: +# In the demo_user.py, modify: +# from simopt.models.smf import SMF_Max +# from simopt.models.san_2 import SANLongestPath +# In the input information file (test_input.txt), include the following lines: +# solver_names = ["RNDSRCH", "ASTRODF", "NELDMD"] +# problem1 = [SANLongestPath, 2, {'num_nodes':8, 'num_arcs':12}] +# problem2 = [SMF_Max, 3, {'num_nodes':7, 'num_arcs':16}] + +# Grab information from the input file +def get_info(path): + L = [] + with open(path) as file: + lines = [line.rstrip() for line in file] + for line in lines: + if not line.startswith("#") and line: + L.append(line) + lines = L + command_lines = [] + problem_sets = [] + for line in lines: + if 'import' in line: + command_lines.append(line) + elif 'solver_names' in line: + solver_names = line + else: + problem_sets.append(line) + + for i in command_lines: + exec(i) + + problems = [] + solver_names = eval(re.findall(r'\[.*?\]', solver_names)[0]) + for l in problem_sets: + o = re.findall(r'\[.*?\]', l)[0] + problems.append(eval(o)) + + problem_sets = [p[0] for p in problems] + L_num = [p[1] for p in problems] + L_para = [p[2] for p in problems] + + return solver_names, problem_sets, L_num, L_para + +# Read input file and process information +path = input('Please input the path of the input file: ') +if "'" in path: # If the input path already has quotation marks + path = path.replace("'", "") + +solver_names, problem_set, L_num, L_para = get_info(path) +rands = [True for i in range(len(problem_set))] + +# Check whether the input file is valid +if len(L_num) != len(problem_set) or len(L_para) != len(problem_set): + print('Invalid input. The input number of random instances does not match with the number of problems you want.') + print('Please check your input file') + +def rebase(random_rng, n): + new_rngs = [] + for rng in random_rng: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + subsubstream_index = rng.s_ss_sss_index[2] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index])) + random_rng = new_rngs + return random_rng + +myproblems = problem_set + +# Check whether the problem is random +for i in range(len(problem_set)): + if L_num[i] == 0: + L_num[i] = 1 + rands[i] = False + else: + rands[i] = True + +problems = [] +problem_names = [] + +def generate_problem(i, myproblems, rands, problems, L_num, L_para): + print('For problem ', myproblems[i]().name, ':') + model_fixed_factors = L_para[i] + + name = myproblems[i] + myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i]) + random_rng = [MRG32k3a(s_ss_sss_index=[2, 4 + L_num[i], ss]) for ss in range(myproblem.n_rngs)] + rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)] + + if rands[i] == False: # Determinant case + problems.append(myproblem) + myproblem.name = str(myproblem.model.name) + str(0) + problem_names.append(myproblem.name) + print('') + + else: + for j in range(L_num[i]): + random_rng = rebase(random_rng, 1) # Advance the substream for different instances + rng_list2 = rebase(rng_list2, 1) + name = myproblems[i] + myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i], random_rng=rng_list2) + myproblem.attach_rngs(random_rng) + # myproblem.name = str(myproblem.model.name) + str(j) + myproblem.name = str(myproblem.name) + '-' + str(j) + problems.append(myproblem) + problem_names.append(myproblem.name) + print('') + + return problems, problem_names + +# Generate problems +for i in range(len(L_num)): + problems, problem_names = generate_problem(i, myproblems, rands, problems, L_num, L_para) + +# Initialize an instance of the experiment class. +mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems) + +# Run a fixed number of macroreplications of each solver on each problem. +mymetaexperiment.run(n_macroreps=3) + +print("Post-processing results.") +# Run a fixed number of postreplications at all recommended solutions. +mymetaexperiment.post_replicate(n_postreps=20) +# Find an optimal solution x* for normalization. +mymetaexperiment.post_normalize(n_postreps_init_opt=20) + +print("Plotting results.") +# Produce basic plots of the solvers on the problems. +plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="cdf_solvability") + +# Plots will be saved in the folder experiments/plots. +print("Finished. Plots can be found in experiments/plots folder.") \ No newline at end of file From 5b3faf726c82a32ddeddb36ab7fe95d74e493a35 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 15:45:23 -0400 Subject: [PATCH 02/21] Add files via upload These experiment_base.py and base.py should be used when one wants to use demo_random_model/problem/user to generate and solve random problem instances. --- base.py | 989 ++++++++++++++++ experiment_base.py | 2725 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 3714 insertions(+) create mode 100644 base.py create mode 100644 experiment_base.py diff --git a/base.py b/base.py new file mode 100644 index 000000000..e2e8b47d0 --- /dev/null +++ b/base.py @@ -0,0 +1,989 @@ +#!/usr/bin/env python +""" +Summary +------- +Provide base classes for solvers, problems, and models. +This is the modified version to generate and run random model/random problem instance. +""" + +import numpy as np +from copy import deepcopy +import sys +import os.path as o +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local + +from simopt.auto_diff_util import bi_dict, replicate_wrapper + +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) + + +class Solver(object): + """Base class to implement simulation-optimization solvers. + + Attributes + ---------- + name : str + Name of solver. + objective_type : str + Description of objective types: "single" or "multi". + constraint_type : str + Description of constraints types: "unconstrained", "box", "deterministic", "stochastic". + variable_type : str + Description of variable types: "discrete", "continuous", "mixed". + gradient_needed : bool + True if gradient of objective function is needed, otherwise False. + factors : dict + Changeable factors (i.e., parameters) of the solver. + specifications : dict + Details of each factor (for GUI, data validation, and defaults). + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of RNGs used for the solver's internal purposes. + solution_progenitor_rngs : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of RNGs used as a baseline for simulating solutions. + + Parameters + ---------- + fixed_factors : dict + Dictionary of user-specified solver factors. + """ + def __init__(self, fixed_factors): + # Set factors of the solver. + # Fill in missing factors with default values. + self.factors = fixed_factors + for key in self.specifications: + if key not in fixed_factors: + self.factors[key] = self.specifications[key]["default"] + + def __eq__(self, other): + """Check if two solvers are equivalent. + + Parameters + ---------- + other : ``base.Solver`` + Other Solver object to compare to self. + + Returns + ------- + bool + True if the two solvers are equivalent, otherwise False. + """ + if type(self) == type(other): + if self.factors == other.factors: + return True + else: + # print("Solver factors do not match.") + return False + else: + # print("Solver types do not match.") + return False + + def attach_rngs(self, rng_list): + """Attach a list of random-number generators to the solver. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of random-number generators used for the solver's internal purposes. + """ + self.rng_list = rng_list + + def solve(self, problem): + """Run a single macroreplication of a solver on a problem. + + Notes + ----- + Each subclass of ``base.Solver`` has its own custom ``solve`` method. + + Parameters + ---------- + problem : ``base.Problem`` + Simulation-optimization problem to solve. + + Returns + ------- + recommended_solns : list [``Solution``] + List of solutions recommended throughout the budget. + intermediate_budgets : list [int] + List of intermediate budgets when recommended solutions changes. + """ + raise NotImplementedError + + def check_crn_across_solns(self): + """Check solver factor crn_across_solns. + + Notes + ----- + Currently implemented to always return True. This factor must be a bool. + """ + return True + + def check_solver_factor(self, factor_name): + """Determine if the setting of a solver factor is permissible. + + Parameters + ---------- + factor_name : str + Name of factor for dictionary lookup (i.e., key). + + Returns + ------- + is_permissible : bool + True if the solver factor is permissible, otherwise False. + """ + is_permissible = True + is_permissible *= self.check_factor_datatype(factor_name) + is_permissible *= self.check_factor_list[factor_name]() + return is_permissible + # raise NotImplementedError + + def check_solver_factors(self): + """Determine if the joint settings of solver factors are permissible. + + Notes + ----- + Each subclass of ``base.Solver`` has its own custom ``check_solver_factors`` method. + + Returns + ------- + is_simulatable : bool + True if the solver factors are permissible, otherwise False. + """ + return True + # raise NotImplementedError + + def check_factor_datatype(self, factor_name): + """Determine if a factor's data type matches its specification. + + Parameters + ---------- + factor_name : str + String corresponding to name of factor to check. + + Returns + ------- + is_right_type : bool + True if factor is of specified data type, otherwise False. + """ + is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) + return is_right_type + + def create_new_solution(self, x, problem): + """Create a new solution object with attached RNGs primed + to simulate replications. + + Parameters + ---------- + x : tuple + Vector of decision variables. + problem : ``base.Problem`` + Problem being solved by the solvers. + + Returns + ------- + new_solution : ``base.Solution`` + New solution. + """ + # Create new solution with attached rngs. + new_solution = Solution(x, problem) + new_solution.attach_rngs(rng_list=self.solution_progenitor_rngs, copy=True) + # Manipulate progenitor rngs to prepare for next new solution. + if not self.factors["crn_across_solns"]: # If CRN are not used ... + # ...advance each rng to start of the substream = current substream + # of model RNGs. + for rng in self.solution_progenitor_rngs: + for _ in range(problem.model.n_rngs): + rng.advance_substream() + return new_solution + + def rebase(self, n_reps): + """Rebase the progenitor rngs to start at a later subsubstream index. + + Parameters + ---------- + n_reps : int + Substream index to skip to. + """ + new_rngs = [] + for rng in self.solution_progenitor_rngs: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps])) + self.solution_progenitor_rngs = new_rngs + + +class Problem(object): + """Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : str + Name of problem. + dim : int + Number of decision variables. + n_objectives : int + Number of objectives. + n_stochastic_constraints : int + Number of stochastic constraints. + minmax : tuple [int] + Indicators of maximization (+1) or minimization (-1) for each objective. + constraint_type : str + Description of constraints types: "unconstrained", "box", "deterministic", "stochastic". + variable_type : str + Description of variable types: "discrete", "continuous", "mixed". + lower_bounds : tuple + Lower bound for each decision variable. + upper_bounds : tuple + Upper bound for each decision variable. + gradient_available : bool + True if direct gradient of objective function is available, otherwise False. + optimal_value : float + Optimal objective function value. + optimal_solution : tuple + Optimal solution. + model : ``base.Model`` + Associated simulation model that generates replications. + model_default_factors : dict + Default values for overriding model-level default factors. + model_fixed_factors : dict + Combination of overriden model-level factors and defaults. + model_decision_factors : set [str] + Set of keys for factors that are decision variables. + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of RNGs used to generate a random initial solution + or a random problem instance. + factors : dict + Changeable factors of the problem: + initial_solution : tuple + Default initial solution from which solvers start. + budget : int + Max number of replications (fn evals) for a solver to take. + specifications : dict + Details of each factor (for GUI, data validation, and defaults). + + Parameters + ---------- + fixed_factors : dict + Dictionary of user-specified problem factors. + model_fixed_factors : dict + Subset of user-specified non-decision factors to pass through to the model. + """ + def __init__(self, fixed_factors, model_fixed_factors): + # Set factors of the problem. + # Fill in missing factors with default values. + self.factors = fixed_factors + for key in self.specifications: + if key not in fixed_factors: + self.factors[key] = self.specifications[key]["default"] + # Set subset of factors of the simulation model. + # Fill in missing model factors with problem-level default values. + for key in self.model_default_factors: + if key not in model_fixed_factors: + model_fixed_factors[key] = self.model_default_factors[key] + self.model_fixed_factors = model_fixed_factors + # super().__init__() + + def __eq__(self, other): + """Check if two problems are equivalent. + + Parameters + ---------- + other : ``base.Problem`` + Other ``base.Problem`` objects to compare to self. + + Returns + ------- + bool + True if the two problems are equivalent, otherwise False. + """ + if type(self) == type(other): + if self.factors == other.factors: + # Check if non-decision-variable factors of models are the same. + non_decision_factors = set(self.model.factors.keys()) - self.model_decision_factors + for factor in non_decision_factors: + if self.model.factors[factor] != other.model.factors[factor]: + # print("Model factors do not match") + return False + return True + else: + # print("Problem factors do not match.") + return False + else: + # print("Problem types do not match.") + return False + + def check_initial_solution(self): + """Check if initial solution is feasible and of correct dimension. + + Returns + ------- + bool + True if initial solution is feasible and of correct dimension, otherwise False. + """ + if len(self.factors["initial_solution"]) != self.dim: + return False + elif not self.check_deterministic_constraints(x=self.factors["initial_solution"]): + return False + else: + return True + + def check_budget(self): + """Check if budget is strictly positive. + + Returns + ------- + bool + True if budget is strictly positive, otherwise False. + """ + return self.factors["budget"] > 0 + + def check_problem_factor(self, factor_name): + """Determine if the setting of a problem factor is permissible. + + Parameters + ---------- + factor_name : str + Name of factor for dictionary lookup (i.e., key). + + Returns + ------- + is_permissible : bool + True if problem factor is permissible, otherwise False. + """ + is_permissible = True + is_permissible *= self.check_factor_datatype(factor_name) + is_permissible *= self.check_factor_list[factor_name]() + return is_permissible + # raise NotImplementedError + + def check_problem_factors(self): + """Determine if the joint settings of problem factors are permissible. + + Notes + ----- + Each subclass of ``base.Problem`` has its own custom ``check_problem_factors`` method. + + Returns + ------- + is_simulatable : bool + True if problem factors are permissible, otherwise False. + """ + return True + # raise NotImplementedError + + def check_factor_datatype(self, factor_name): + """Determine if a factor's data type matches its specification. + + Parameters + ---------- + factor_name : str + String corresponding to name of factor to check. + + Returns + ------- + is_right_type : bool + True if factor is of specified data type, otherwise False. + """ + is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) + return is_right_type + + def attach_rngs(self, random_rng, copy=True): + """Attach a list of random-number generators to the problem. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of random-number generators used to generate a random initial solution + or a random problem instance. + """ + if copy: + self.random_rng = [deepcopy(rng) for rng in random_rng] + else: + self.random_rng = random_rng + + def rebase(self, n_reps): + """Rebase the progenitor rngs to start at a later subsubstream index. + + Parameters + ---------- + n_reps : int + Substream index to skip to. + """ + new_rngs = [] + for rng in self.random_rng: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps])) + self.random_rng = new_rngs + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys. + + Notes + ----- + Each subclass of ``base.Problem`` has its own custom ``vector_to_factor_dict`` method. + + Parameters + ---------- + vector : tuple + Vector of values associated with decision variables. + + Returns + ------- + factor_dict : dict + Dictionary with factor keys and associated values. + """ + raise NotImplementedError + + def factor_dict_to_vector(self, factor_dict): + """Convert a dictionary with factor keys to a vector + of variables. + + Notes + ----- + Each subclass of ``base.Problem`` has its own custom ``factor_dict_to_vector`` method. + + Parameters + ---------- + factor_dict : dict + Dictionary with factor keys and associated values. + + Returns + ------- + vector : tuple + Vector of values associated with decision variables. + """ + raise NotImplementedError + + def factor_dict_to_vector_gradients(self, factor_dict): + """Convert a dictionary with factor keys to a gradient vector. + + Notes + ----- + A subclass of ``base.Problem`` can have its own custom + ``factor_dict_to_vector_gradients`` method if the + objective is deterministic. + + Parameters + ---------- + factor_dict : dict + Dictionary with factor keys and associated values. + + Returns + ------- + vector : tuple + Vector of partial derivatives associated with decision variables. + """ + return self.factor_dict_to_vector(factor_dict) + + def response_dict_to_objectives(self, response_dict): + """Convert a dictionary with response keys to a vector + of objectives. + + Notes + ----- + Each subclass of ``base.Problem`` has its own custom ``response_dict_to_objectives`` method. + + Parameters + ---------- + response_dict : dict + Dictionary with response keys and associated values. + + Returns + ------- + objectives : tuple + Vector of objectives. + """ + raise NotImplementedError + + def response_dict_to_objectives_gradients(self, response_dict): + """Convert a dictionary with response keys to a vector + of gradients. + + Notes + ----- + A subclass of ``base.Problem`` can have its own custom + ``response_dict_to_objectives_gradients`` method if the + objective is deterministic. + + Parameters + ---------- + response_dict : dict + Dictionary with response keys and associated values. + + Returns + ------- + vector : tuple + Vector of gradients. + """ + return self.response_dict_to_objectives(response_dict) + + def response_dict_to_stoch_constraints(self, response_dict): + """Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0. + + Notes + ----- + Each subclass of ``base.Problem`` has its own custom ``response_dict_to_stoch_constraints`` method. + + Parameters + ---------- + response_dict : dict + Dictionary with response keys and associated values. + + Returns + ------- + stoch_constraints : tuple + Vector of LHSs of stochastic constraints. + """ + stoch_constraints = () + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """Compute deterministic components of objectives for a solution `x`. + + Parameters + ---------- + x : tuple + Vector of decision variables. + + Returns + ------- + det_objectives : tuple + Vector of deterministic components of objectives. + det_objectives_gradients : tuple + Vector of gradients of deterministic components of objectives. + """ + det_objectives = (0,) * self.n_objectives + det_objectives_gradients = tuple([(0,) * self.dim for _ in range(self.n_objectives)]) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """Compute deterministic components of stochastic constraints + for a solution `x`. + + Parameters + ---------- + x : tuple + Vector of decision variables. + + Returns + ------- + det_stoch_constraints : tuple + Vector of deterministic components of stochastic + constraints. + det_stoch_constraints_gradients : tuple + Vector of gradients of deterministic components of + stochastic constraints. + """ + det_stoch_constraints = (0,) * self.n_stochastic_constraints + det_stoch_constraints_gradients = tuple([(0,) * self.dim for _ in range(self.n_stochastic_constraints)]) + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """Check if a solution `x` satisfies the problem's deterministic + constraints. + + Parameters + ---------- + x : tuple + Vector of decision variables. + + Returns + ------- + satisfies : bool + True if solution `x` satisfies the deterministic constraints, + otherwise False. + """ + # Check box constraints. + return bool(np.prod([self.lower_bounds[idx] <= x[idx] <= self.upper_bounds[idx] for idx in range(len(x))])) + + def get_random_solution(self, rand_sol_rng): + """Generate a random solution for starting or restarting solvers. + + Parameters + ---------- + rand_sol_rng : ``mrg32k3a.mrg32k3a.MRG32k3a`` + Random-number generator used to sample a new random solution. + + Returns + ------- + x : tuple + vector of decision variables + """ + pass + + def simulate(self, solution, m=1): + """Simulate `m` i.i.d. replications at solution `x`. + + Notes + ----- + Gradients of objective function and stochastic constraint LHSs + are temporarily commented out. Under development. + + Parameters + ---------- + solution : ``base.Solution`` + Solution to evalaute. + m : int + Number of replications to simulate at `x`. + """ + if m < 1: + print('--* Error: Number of replications must be at least 1. ') + print('--* Aborting. ') + else: + # Pad numpy arrays if necessary. + if solution.n_reps + m > solution.storage_size: + solution.pad_storage(m) + # Set the decision factors of the model. + self.model.factors.update(solution.decision_factors) + for _ in range(m): + # Generate one replication at x. + responses, gradients = self.model.replicate(solution.rng_list) + # Convert gradient subdictionaries to vectors mapping to decision variables. + if self.gradient_available: + vector_gradients = {keys: self.factor_dict_to_vector_gradients(gradient_dict) for (keys, gradient_dict) in gradients.items()} + # vector_gradients = {keys: self.factor_dict_to_vector(gradient_dict) for (keys, gradient_dict) in gradients.items()} + # Convert responses and gradients to objectives and gradients and add + # to those of deterministic components of objectives. + solution.objectives[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_objectives(responses), solution.det_objectives)] + if self.gradient_available: + solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives_gradients(vector_gradients), solution.det_objectives_gradients)] + # solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives(vector_gradients), solution.det_objectives_gradients)] + if self.n_stochastic_constraints > 0: + # Convert responses and gradients to stochastic constraints and gradients and add + # to those of deterministic components of stochastic constraints. + solution.stoch_constraints[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_stoch_constraints(responses), solution.det_stoch_constraints)] + # solution.stoch_constraints_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_stoch_cons, det_stoch_cons)] for stoch_stoch_cons, det_stoch_cons in zip(self.response_dict_to_stoch_constraints(vector_gradients), solution.det_stoch_constraints_gradients)] + # Increment counter. + solution.n_reps += 1 + # Advance rngs to start of next subsubstream. + for rng in solution.rng_list: + rng.advance_subsubstream() + # Update summary statistics. + solution.recompute_summary_statistics() + + def simulate_up_to(self, solutions, n_reps): + """Simulate a set of solutions up to a given number of replications. + + Parameters + ---------- + solutions : set [``base.Solution``] + A set of ``base.Solution`` objects. + n_reps : int + Common number of replications to simulate each solution up to. + """ + for solution in solutions: + # If more replications needed, take them. + if solution.n_reps < n_reps: + n_reps_to_take = n_reps - solution.n_reps + self.simulate(solution=solution, m=n_reps_to_take) + + +class Model(object): + """Base class to implement simulation models (models) featured in + simulation-optimization problems. + + Attributes + ---------- + name : str + Name of model. + n_rngs : int + Number of random-number generators used to run a simulation replication. + n_responses : int + Number of responses (performance measures). + factors : dict + Changeable factors of the simulation model. + specifications : dict + Details of each factor (for GUI, data validation, and defaults). + check_factor_list : dict + Switch case for checking factor simulatability. + + Parameters + ---------- + fixed_factors : dict + Dictionary of user-specified model factors. + """ + def __init__(self, fixed_factors): + # Set factors of the simulation model. + # Fill in missing factors with default values. + self.factors = fixed_factors + for key in self.specifications: + if key not in fixed_factors: + self.factors[key] = self.specifications[key]["default"] + + def __eq__(self, other): + """Check if two models are equivalent. + + Parameters + ---------- + other : ``base.Model`` + Other ``base.Model`` object to compare to self. + + Returns + ------- + bool + True if the two models are equivalent, otherwise False. + """ + if type(self) == type(other): + if self.factors == other.factors: + return True + else: + # print("Model factors do not match.") + return False + else: + # print("Model types do not match.") + return False + + def check_simulatable_factor(self, factor_name): + """Determine if a simulation replication can be run with the given factor. + + Parameters + ---------- + factor_name : str + Name of factor for dictionary lookup (i.e., key). + + Returns + ------- + is_simulatable : bool + True if model specified by factors is simulatable, otherwise False. + """ + is_simulatable = True + is_simulatable *= self.check_factor_datatype(factor_name) + is_simulatable *= self.check_factor_list[factor_name]() + return is_simulatable + # raise NotImplementedError + + def check_simulatable_factors(self): + """Determine if a simulation replication can be run with the given factors. + + Notes + ----- + Each subclass of ``base.Model`` has its own custom ``check_simulatable_factors`` method. + + Returns + ------- + is_simulatable : bool + True if model specified by factors is simulatable, otherwise False. + """ + return True + # raise NotImplementedError + + def check_factor_datatype(self, factor_name): + """Determine if a factor's data type matches its specification. + + Returns + ------- + is_right_type : bool + True if factor is of specified data type, otherwise False. + """ + is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) + return is_right_type + + def attach_rng(self, random_rng, copy=True): + """Attach a list of random-number generators to the problem. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of random-number generators used to generate a random initial solution + or a random problem instance. + """ + # self.random_rng = random_rng + if copy: + self.random_rng = [deepcopy(rng) for rng in random_rng] + else: + self.random_rng = random_rng + + def replicate(self, rng_list): + """Simulate a single replication for the current model factors. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + RNGs for model to use when simulating a replication. + + Returns + ------- + responses : dict + Performance measures of interest. + gradients : dict [dict] + Gradient estimate for each response. + """ + raise NotImplementedError + + +class Auto_Model(Model): + """ + Subclass of Model. + """ + def __init__(self, fixed_factors): + # set factors of the simulation model + # fill in missing factors with default values + super(Auto_Model, self).__init__(fixed_factors) + self.differentiable_factor_names = [] + for key in self.specifications: + if self.specifications[key]["datatype"] == float: + self.differentiable_factor_names.append(key) + self.bi_dict = bi_dict(self.response_names) + + def innner_replicate(self, rng_list): + raise NotImplementedError + + def replicate(self, rng_list, **kwargs): + return replicate_wrapper(self, rng_list, **kwargs) + + +class Solution(object): + """Base class for solutions represented as vectors of decision variables + and dictionaries of decision factors. + + Attributes + ---------- + x : tuple + Vector of decision variables. + dim : int + Number of decision variables describing `x`. + decision_factors : dict + Decision factor names and values. + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + RNGs for model to use when running replications at the solution. + n_reps : int + Number of replications run at the solution. + det_objectives : tuple + Deterministic components added to objectives. + det_objectives_gradients : tuple [tuple] + Gradients of deterministic components added to objectives; + # objectives x dimension. + det_stoch_constraints : tuple + Deterministic components added to LHS of stochastic constraints. + det_stoch_constraints_gradients : tuple [tuple] + Gradients of deterministics components added to LHS stochastic constraints; + # stochastic constraints x dimension. + storage_size : int + Max number of replications that can be recorded in current storage. + objectives : numpy array + Objective(s) estimates from each replication; + # replications x # objectives. + objectives_gradients : numpy array + Gradient estimates of objective(s) from each replication; + # replications x # objectives x dimension. + stochastic_constraints : numpy array + Stochastic constraint estimates from each replication; + # replications x # stochastic constraints. + stochastic_constraints_gradients : numpy array + Gradient estimates of stochastic constraints from each replication; + # replications x # stochastic constraints x dimension. + + + Parameters + ---------- + x : tuple + Vector of decision variables. + problem : ``base.Problem`` + Problem to which `x` is a solution. + """ + def __init__(self, x, problem): + super().__init__() + self.x = x + if isinstance(x, int) or isinstance(x, float): + self.dim = 1 + else: + self.dim = len(x) + self.decision_factors = problem.vector_to_factor_dict(x) + self.n_reps = 0 + self.det_objectives, self.det_objectives_gradients = problem.deterministic_objectives_and_gradients(self.x) + self.det_stoch_constraints, self.det_stoch_constraints_gradients = problem.deterministic_stochastic_constraints_and_gradients(self.x) + init_size = 100 # Initialize numpy arrays to store up to 100 replications. + self.storage_size = init_size + # Raw data. + self.objectives = np.zeros((init_size, problem.n_objectives)) + self.objectives_gradients = np.zeros((init_size, problem.n_objectives, problem.dim)) + if problem.n_stochastic_constraints > 0: + self.stoch_constraints = np.zeros((init_size, problem.n_stochastic_constraints)) + self.stoch_constraints_gradients = np.zeros((init_size, problem.n_stochastic_constraints, problem.dim)) + else: + self.stoch_constraints = None + self.stoch_constraints_gradients = None + # Summary statistics + # self.objectives_mean = np.full((problem.n_objectives), np.nan) + # self.objectives_var = np.full((problem.n_objectives), np.nan) + # self.objectives_stderr = np.full((problem.n_objectives), np.nan) + # self.objectives_cov = np.full((problem.n_objectives, problem.n_objectives), np.nan) + # self.objectives_gradients_mean = np.full((problem.n_objectives, problem.dim), np.nan) + # self.objectives_gradients_var = np.full((problem.n_objectives, problem.dim), np.nan) + # self.objectives_gradients_stderr = np.full((problem.n_objectives, problem.dim), np.nan) + # self.objectives_gradients_cov = np.full((problem.n_objectives, problem.dim, problem.dim), np.nan) + # self.stoch_constraints_mean = np.full((problem.n_stochastic_constraints), np.nan) + # self.stoch_constraints_var = np.full((problem.n_stochastic_constraints), np.nan) + # self.stoch_constraints_stderr = np.full((problem.n_stochastic_constraints), np.nan) + # self.stoch_constraints_cov = np.full((problem.n_stochastic_constraints, problem.n_stochastic_constraints), np.nan) + # self.stoch_constraints_gradients_mean = np.full((problem.n_stochastic_constraints, problem.dim), np.nan) + # self.stoch_constraints_gradients_var = np.full((problem.n_stochastic_constraints, problem.dim), np.nan) + # self.stoch_constraints_gradients_stderr = np.full((problem.n_stochastic_constraints, problem.dim), np.nan) + # self.stoch_constraints_gradients_cov = np.full((problem.n_stochastic_constraints, problem.dim, problem.dim), np.nan) + + def attach_rngs(self, rng_list, copy=True): + """Attach a list of random-number generators to the solution. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of random-number generators used to run simulation replications. + copy : bool, default=True + True if we want to copy the ``mrg32k3a.mrg32k3a.MRG32k3a`` objects, otherwise False. + """ + if copy: + self.rng_list = [deepcopy(rng) for rng in rng_list] + else: + self.rng_list = rng_list + + def pad_storage(self, m): + """Append zeros to numpy arrays for summary statistics. + + Parameters + ---------- + m : int + Number of replications to simulate. + """ + # Size of data storage. + n_objectives = len(self.det_objectives) + base_pad_size = 100 + # Default is to append space for 100 more replications. + # If more space needed, append in multiples of 100. + pad_size = int(np.ceil(m / base_pad_size)) * base_pad_size + self.storage_size += pad_size + self.objectives = np.concatenate((self.objectives, np.zeros((pad_size, n_objectives)))) + self.objectives_gradients = np.concatenate((self.objectives_gradients, np.zeros((pad_size, n_objectives, self.dim)))) + if self.stoch_constraints is not None: + n_stochastic_constraints = len(self.det_stoch_constraints) + self.stoch_constraints = np.concatenate((self.stoch_constraints, np.zeros((pad_size, n_stochastic_constraints)))) + self.stoch_constraints_gradients = np.concatenate((self.stoch_constraints_gradients, np.zeros((pad_size, n_stochastic_constraints, self.dim)))) + + def recompute_summary_statistics(self): + """Recompute summary statistics of the solution. + + Notes + ----- + Statistics for gradients of objectives and stochastic constraint LHSs + are temporarily commented out. Under development. + """ + self.objectives_mean = np.mean(self.objectives[:self.n_reps], axis=0) + if self.n_reps > 1: + self.objectives_var = np.var(self.objectives[:self.n_reps], axis=0, ddof=1) + self.objectives_stderr = np.std(self.objectives[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) + self.objectives_cov = np.cov(self.objectives[:self.n_reps], rowvar=False, ddof=1) + self.objectives_gradients_mean = np.mean(self.objectives_gradients[:self.n_reps], axis=0) + if self.n_reps > 1: + self.objectives_gradients_var = np.var(self.objectives_gradients[:self.n_reps], axis=0, ddof=1) + self.objectives_gradients_stderr = np.std(self.objectives_gradients[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) + self.objectives_gradients_cov = np.array([np.cov(self.objectives_gradients[:self.n_reps, obj], rowvar=False, ddof=1) for obj in range(len(self.det_objectives))]) + if self.stoch_constraints is not None: + self.stoch_constraints_mean = np.mean(self.stoch_constraints[:self.n_reps], axis=0) + self.stoch_constraints_var = np.var(self.stoch_constraints[:self.n_reps], axis=0, ddof=1) + self.stoch_constraints_stderr = np.std(self.stoch_constraints[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) + self.stoch_constraints_cov = np.cov(self.stoch_constraints[:self.n_reps], rowvar=False, ddof=1) + # self.stoch_constraints_gradients_mean = np.mean(self.stoch_constraints_gradients[:self.n_reps], axis=0) + # self.stoch_constraints_gradients_var = np.var(self.stoch_constraints_gradients[:self.n_reps], axis=0, ddof=1) + # self.stoch_constraints_gradients_stderr = np.std(self.stoch_constraints_gradients[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) + # self.stoch_constraints_gradients_cov = np.array([np.cov(self.stoch_constraints_gradients[:self.n_reps, stcon], rowvar=False, ddof=1) for stcon in range(len(self.det_stoch_constraints))]) diff --git a/experiment_base.py b/experiment_base.py new file mode 100644 index 000000000..a325ec3fb --- /dev/null +++ b/experiment_base.py @@ -0,0 +1,2725 @@ +#!/usr/bin/env python +""" +Summary +------- +Provide base classes for problem-solver pairs and helper functions +for reading/writing data and plotting. +This is the modified version to generate and solve random problem instances by solvers. +""" + +import numpy as np +import matplotlib.pyplot as plt +import seaborn as sns +import pandas as pd +from scipy.stats import norm +import pickle +import importlib +import time +import os +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local + +from .base import Solution +from .directory import solver_directory, problem_directory + + +class Curve(object): + """Base class for all curves. + + Attributes + ---------- + x_vals : list [float] + Values of horizontal components. + y_vals : list [float] + Values of vertical components. + n_points : int + Number of values in x- and y- vectors. + + Parameters + ---------- + x_vals : list [float] + Values of horizontal components. + y_vals : list [float] + Values of vertical components. + """ + def __init__(self, x_vals, y_vals): + if len(x_vals) != len(y_vals): + print("Vectors of x- and y- values must be of same length.") + self.x_vals = x_vals + self.y_vals = y_vals + self.n_points = len(x_vals) + + def lookup(self, x): + """Lookup the y-value of the curve at an intermediate x-value. + + Parameters + ---------- + x : float + X-value at which to lookup the y-value. + + Returns + ------- + y : float + Y-value corresponding to x. + """ + if x < self.x_vals[0]: + y = np.nan + else: + idx = np.max(np.where(np.array(self.x_vals) <= x)) + y = self.y_vals[idx] + return y + + def compute_crossing_time(self, threshold): + """Compute the first time at which a curve drops below a given threshold. + + Parameters + ---------- + threshold : float + Value for which to find first crossing time. + + Returns + ------- + crossing_time : float + First time at which a curve drops below threshold. + """ + # Crossing time is defined as infinity if the curve does not drop + # below threshold. + crossing_time = np.inf + # Pass over curve to find first crossing time. + for i in range(self.n_points): + if self.y_vals[i] < threshold: + crossing_time = self.x_vals[i] + break + return crossing_time + + def compute_area_under_curve(self): + """Compute the area under a curve. + + Returns + ------- + area : float + Area under the curve. + """ + area = np.dot(self.y_vals[:-1], np.diff(self.x_vals)) + return area + + def curve_to_mesh(self, mesh): + """Create a curve defined at equally spaced x values. + + Parameters + ---------- + mesh : list of floats + List of uniformly spaced x-values. + + Returns + ------- + mesh_curve : ``experiment_base.Curve`` + Curve with equally spaced x-values. + """ + mesh_curve = Curve(x_vals=mesh, y_vals=[self.lookup(x) for x in mesh]) + return mesh_curve + + def curve_to_full_curve(self): + """Create a curve with duplicate x- and y-values to indicate steps. + + Returns + ------- + full_curve : ``experiment_base.Curve`` + Curve with duplicate x- and y-values. + """ + duplicate_x_vals = [x for x in self.x_vals for _ in (0, 1)] + duplicate_y_vals = [y for y in self.y_vals for _ in (0, 1)] + full_curve = Curve(x_vals=duplicate_x_vals[1:], y_vals=duplicate_y_vals[:-1]) + return full_curve + + def plot(self, color_str="C0", curve_type="regular"): + """Plot a curve. + + Parameters + ---------- + color_str : str, default="C0" + String indicating line color, e.g., "C0", "C1", etc. + curve_type : str, default="regular" + String indicating type of line: "regular" or "conf_bound". + + Returns + ------- + handle : list [``matplotlib.lines.Line2D``] + Curve handle, to use when creating legends. + """ + if curve_type == "regular": + linestyle = "-" + linewidth = 2 + elif curve_type == "conf_bound": + linestyle = "--" + linewidth = 1 + handle, = plt.step(self.x_vals, + self.y_vals, + color=color_str, + linestyle=linestyle, + linewidth=linewidth, + where="post" + ) + return handle + + +def mean_of_curves(curves): + """Compute pointwise (w.r.t. x-values) mean of curves. + Starting and ending x-values must coincide for all curves. + + Parameters + ---------- + curves : list [``experiment_base.Curve``] + Collection of curves to aggregate. + + Returns + ------- + mean_curve : ``experiment_base.Curve object`` + Mean curve. + """ + unique_x_vals = np.unique([x_val for curve in curves for x_val in curve.x_vals]) + mean_y_vals = [np.mean([curve.lookup(x_val) for curve in curves]) for x_val in unique_x_vals] + mean_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=mean_y_vals) + return mean_curve + + +def quantile_of_curves(curves, beta): + """Compute pointwise (w.r.t. x values) quantile of curves. + Starting and ending x values must coincide for all curves. + + Parameters + ---------- + curves : list [``experiment_base.Curve``] + Collection of curves to aggregate. + beta : float + Quantile level. + + Returns + ------- + quantile_curve : ``experiment_base.Curve`` + Quantile curve. + """ + unique_x_vals = np.unique([x_val for curve in curves for x_val in curve.x_vals]) + quantile_y_vals = [np.quantile([curve.lookup(x_val) for curve in curves], q=beta) for x_val in unique_x_vals] + quantile_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=quantile_y_vals) + return quantile_curve + + +def cdf_of_curves_crossing_times(curves, threshold): + """Compute the cdf of crossing times of curves. + + Parameters + ---------- + curves : list [``experiment_base.Curve``] + Collection of curves to aggregate. + threshold : float + Value for which to find first crossing time. + + Returns + ------- + cdf_curve : ``experiment_base.Curve`` + CDF of crossing times. + """ + n_curves = len(curves) + crossing_times = [curve.compute_crossing_time(threshold) for curve in curves] + unique_x_vals = [0] + list(np.unique([crossing_time for crossing_time in crossing_times if crossing_time < np.inf])) + [1] + cdf_y_vals = [sum(crossing_time <= x_val for crossing_time in crossing_times) / n_curves for x_val in unique_x_vals] + cdf_curve = Curve(x_vals=unique_x_vals, y_vals=cdf_y_vals) + return cdf_curve + + +def quantile_cross_jump(curves, threshold, beta): + """Compute a simple curve with a jump at the quantile of the crossing times. + + Parameters + ---------- + curves : list [``experiment_base.Curve``] + Collection of curves to aggregate. + threshold : float + Value for which to find first crossing time. + beta : float + Quantile level. + + Returns + ------- + jump_curve : ``experiment_base.Curve`` + Piecewise-constant curve with a jump at the quantile crossing time (if finite). + """ + solve_time_quantile = np.quantile([curve.compute_crossing_time(threshold=threshold) for curve in curves], q=beta) + # Note: np.quantile will evaluate to np.nan if forced to interpolate + # between a finite and infinite value. These are rare cases. Since + # crossing times must be non-negative, the quantile should be mapped + # to positive infinity. + if solve_time_quantile == np.inf or np.isnan(solve_time_quantile): + jump_curve = Curve(x_vals=[0, 1], y_vals=[0, 0]) + else: + jump_curve = Curve(x_vals=[0, solve_time_quantile, 1], y_vals=[0, 1, 1]) + return jump_curve + + +def difference_of_curves(curve1, curve2): + """Compute the difference of two curves (Curve 1 - Curve 2). + + Parameters + ---------- + curve1, curve2 : ``experiment_base.Curve`` + Curves to take the difference of. + + Returns + ------- + difference_curve : ``experiment_base.Curve`` + Difference of curves. + """ + unique_x_vals = np.unique(curve1.x_vals + curve2.x_vals) + difference_y_vals = [(curve1.lookup(x_val) - curve2.lookup(x_val)) for x_val in unique_x_vals] + difference_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=difference_y_vals) + return difference_curve + + +def max_difference_of_curves(curve1, curve2): + """Compute the maximum difference of two curves (Curve 1 - Curve 2). + + Parameters + ---------- + curve1, curve2 : ``experiment_base.Curve`` + Curves to take the difference of. + + Returns + ------- + max_diff : float + Maximum difference of curves. + """ + difference_curve = difference_of_curves(curve1, curve2) + max_diff = max(difference_curve.y_vals) + return max_diff + + +class ProblemSolver(object): + """Base class for running one solver on one problem. + + Attributes + ---------- + solver : ``base.Solver`` + Simulation-optimization solver. + problem : ``base.Problem`` + Simulation-optimization problem. + n_macroreps : int + Number of macroreplications run. + file_name_path : str + Path of .pickle file for saving ``experiment_base.ProblemSolver`` object. + all_recommended_xs : list [list [tuple]] + Sequences of recommended solutions from each macroreplication. + all_intermediate_budgets : list [list] + Sequences of intermediate budgets from each macroreplication. + timings : list [float] + Runtimes (in seconds) for each macroreplication. + n_postreps : int + Number of postreplications to take at each recommended solution. + crn_across_budget : bool + True if CRN used for post-replications at solutions recommended at + different times, otherwise False. + crn_across_macroreps : bool + True if CRN used for post-replications at solutions recommended on + different macroreplications, otherwise False. + all_post_replicates : list [list [list]] + All post-replicates from all solutions from all macroreplications. + all_est_objectives : numpy array [numpy array] + Estimated objective values of all solutions from all macroreplications. + n_postreps_init_opt : int + Number of postreplications to take at initial solution (x0) and + optimal solution (x*). + crn_across_init_opt : bool + True if CRN used for post-replications at solutions x0 and x*, otherwise False. + x0 : tuple + Initial solution (x0). + x0_postreps : list + Post-replicates at x0. + xstar : tuple + Proxy for optimal solution (x*). + xstar_postreps : list + Post-replicates at x*. + objective_curves : list [``experiment_base.Curve``] + Curves of estimated objective function values, + one for each macroreplication. + progress_curves : list [``experiment_base.Curve``] + Progress curves, one for each macroreplication. + + Parameters + ---------- + solver_name : str, optional + Name of solver. + problem_name : str, optional + Name of problem. + solver_rename : str, optional + User-specified name for solver. + problem_rename : str, optional + User-specified name for problem. + solver : ``base.Solver``, optional + Simulation-optimization solver. + problem : ``base.Problem``, optional + Simulation-optimization problem. + solver_fixed_factors : dict, optional + Dictionary of user-specified solver factors. + problem_fixed_factors : dict, optional + Dictionary of user-specified problem factors. + model_fixed_factors : dict, optional + Dictionary of user-specified model factors. + file_name_path : str, optional + Path of .pickle file for saving ``experiment_base.ProblemSolver`` objects. + """ + def __init__(self, solver_name=None, problem_name=None, solver_rename=None, problem_rename=None, solver=None, problem=None, solver_fixed_factors=None, problem_fixed_factors=None, model_fixed_factors=None, file_name_path=None): + """There are two ways to create a ProblemSolver object: + 1. Provide the names of the solver and problem to look up in ``directory.py``. + 2. Provide the solver and problem objects to pair. + """ + # Handle unassigned arguments. + if solver_fixed_factors is None: + solver_fixed_factors = {} + if problem_fixed_factors is None: + problem_fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + # Initialize solver. + if solver is not None: # Method #2 + self.solver = solver + elif solver_rename is None: # Method #1 + self.solver = solver_directory[solver_name](fixed_factors=solver_fixed_factors) + else: # Method #1 + self.solver = solver_directory[solver_name](name=solver_rename, fixed_factors=solver_fixed_factors) + # Initialize problem. + if problem is not None: # Method #2 + self.problem = problem + elif problem_rename is None: # Method #1 + self.problem = problem_directory[problem_name](fixed_factors=problem_fixed_factors, model_fixed_factors=model_fixed_factors) + else: # Method #1 + self.problem = problem_directory[problem_name](name=problem_rename, fixed_factors=problem_fixed_factors, model_fixed_factors=model_fixed_factors) + # Initialize file path. + if file_name_path is None: + self.file_name_path = f"./experiments/outputs/{self.solver.name}_on_{self.problem.name}.pickle" + else: + self.file_name_path = file_name_path + + def check_compatibility(self): + """Check whether the experiment's solver and problem are compatible. + + Returns + ------- + error_str : str + Error message in the event problem and solver are incompatible. + """ + error_str = "" + # Check number of objectives. + if self.solver.objective_type == "single" and self.problem.n_objectives > 1: + error_str += "Solver cannot solve a multi-objective problem.\n" + elif self.solver.objective_type == "multi" and self.problem.n_objectives == 1: + error_str += "Multi-objective solver being run on a single-objective problem.\n" + # Check constraint types. + constraint_types = ["unconstrained", "box", "deterministic", "stochastic"] + if constraint_types.index(self.solver.constraint_type) < constraint_types.index(self.problem.constraint_type): + error_str += "Solver can handle upto " + self.solver.constraint_type + " constraints, but problem has " + self.problem.constraint_type + " constraints.\n" + # Check variable types. + if self.solver.variable_type == "discrete" and self.problem.variable_type != "discrete": + error_str += "Solver is for discrete variables but problem variables are " + self.problem.variable_type + ".\n" + elif self.solver.variable_type == "continuous" and self.problem.variable_type != "continuous": + error_str += "Solver is for continuous variables but problem variables are " + self.problem.variable_type + ".\n" + # Check for existence of gradient estimates. + if self.solver.gradient_needed and not self.problem.gradient_available: + error_str += "Gradient-based solver does not have access to gradient for this problem.\n" + return error_str + + def run(self, n_macroreps): + """Run n_macroreps of the solver on the problem. + + Notes + ----- + RNGs dedicated for random problem instances and temporarily unused. + Under development. + + Parameters + ---------- + n_macroreps : int + Number of macroreplications of the solver to run on the problem. + """ + self.n_macroreps = n_macroreps + self.all_recommended_xs = [] + self.all_intermediate_budgets = [] + self.timings = [] + # Create, initialize, and attach random number generators + # Stream 0: reserved for taking post-replications + # Stream 1: reserved for bootstrapping + # Stream 2: reserved for overhead ... + # Substream 0: rng for random problem instance + # Substream 1: rng for random initial solution x0 and + # restart solutions + # Substream 2: rng for selecting random feasible solutions + # Substream 3: rng for solver's internal randomness + # Streams 3, 4, ..., n_macroreps + 2: reserved for + # macroreplications + rng0 = MRG32k3a(s_ss_sss_index=[2, 0, 0]) # Currently unused. + rng1 = MRG32k3a(s_ss_sss_index=[2, 1, 0]) + rng2 = MRG32k3a(s_ss_sss_index=[2, 2, 0]) + rng3 = MRG32k3a(s_ss_sss_index=[2, 3, 0]) + self.solver.attach_rngs([rng1, rng2, rng3]) + # Run n_macroreps of the solver on the problem. + # Report recommended solutions and corresponding intermediate budgets. + for mrep in range(self.n_macroreps): + print(f"Running macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.") + # Create, initialize, and attach RNGs used for simulating solutions. + progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 2, ss, 0]) for ss in range(self.problem.model.n_rngs)] + self.solver.solution_progenitor_rngs = progenitor_rngs + # print([rng.s_ss_sss_index for rng in progenitor_rngs]) + # Run the solver on the problem. + tic = time.perf_counter() + recommended_solns, intermediate_budgets = self.solver.solve(problem=self.problem) + toc = time.perf_counter() + # Record the run time of the macroreplication. + self.timings.append(toc - tic) + # Trim solutions recommended after final budget. + recommended_solns, intermediate_budgets = trim_solver_results(problem=self.problem, recommended_solns=recommended_solns, intermediate_budgets=intermediate_budgets) + # Extract decision-variable vectors (x) from recommended solutions. + # Record recommended solutions and intermediate budgets. + self.all_recommended_xs.append([solution.x for solution in recommended_solns]) + self.all_intermediate_budgets.append(intermediate_budgets) + # Save ProblemSolver object to .pickle file. + self.record_experiment_results() + + def check_run(self): + """Check if the experiment has been run. + + Returns + ------- + ran : bool + True if the experiment been run, otherwise False. + """ + if getattr(self, "all_recommended_xs", None) is None: + ran = False + else: + ran = True + return ran + + def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macroreps=False): + """Run postreplications at solutions recommended by the solver. + + Parameters + ---------- + n_postreps : int + Number of postreplications to take at each recommended solution. + crn_across_budget : bool, default=True + True if CRN used for post-replications at solutions recommended at different times, + otherwise False. + crn_across_macroreps : bool, default=False + True if CRN used for post-replications at solutions recommended on different + macroreplications, otherwise False. + """ + self.n_postreps = n_postreps + self.crn_across_budget = crn_across_budget + self.crn_across_macroreps = crn_across_macroreps + # Create, initialize, and attach RNGs for model. + # Stream 0: reserved for post-replications. + # Skip over first set of substreams dedicated for sampling x0 and x*. + baseline_rngs = [MRG32k3a(s_ss_sss_index=[0, self.problem.model.n_rngs + rng_index, 0]) for rng_index in range(self.problem.model.n_rngs)] + # Initialize matrix containing + # all postreplicates of objective, + # for each macroreplication, + # for each budget. + self.all_post_replicates = [[[] for _ in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)] + # Simulate intermediate recommended solutions. + for mrep in range(self.n_macroreps): + for budget_index in range(len(self.all_intermediate_budgets[mrep])): + x = self.all_recommended_xs[mrep][budget_index] + fresh_soln = Solution(x, self.problem) + fresh_soln.attach_rngs(rng_list=baseline_rngs, copy=False) + self.problem.simulate(solution=fresh_soln, m=self.n_postreps) + # Store results + self.all_post_replicates[mrep][budget_index] = list(fresh_soln.objectives[:fresh_soln.n_reps][:, 0]) # 0 <- assuming only one objective + if crn_across_budget: + # Reset each rng to start of its current substream. + for rng in baseline_rngs: + rng.reset_substream() + if crn_across_macroreps: + # Reset each rng to start of its current substream. + for rng in baseline_rngs: + rng.reset_substream() + else: + # Advance each rng to start of + # substream = current substream + # of model RNGs. + for rng in baseline_rngs: + for _ in range(self.problem.model.n_rngs): + rng.advance_substream() + # Store estimated objective for each macrorep for each budget. + self.all_est_objectives = [[np.mean(self.all_post_replicates[mrep][budget_index]) for budget_index in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)] + # Save ProblemSolver object to .pickle file. + self.record_experiment_results() + + def check_postreplicate(self): + """Check if the experiment has been postreplicated. + + Returns + ------- + postreplicated : bool + True if the experiment has been postreplicated, otherwise False. + """ + if getattr(self, "all_est_objectives", None) is None: + postreplicated = False + else: + postreplicated = True + return postreplicated + + def check_postnormalize(self): + """Check if the experiment has been postnormalized. + + Returns + ------- + postnormalized : bool + True if the experiment has been postnormalized, otherwise False. + """ + if getattr(self, "n_postreps_init_opt", None) is None: + postnormalized = False + else: + postnormalized = True + return postnormalized + + def bootstrap_sample(self, bootstrap_rng, normalize=True): + """Generate a bootstrap sample of estimated objective curves + or estimated progress curves. + + Parameters + ---------- + bootstrap_rng : ``mrg32k3a.mrg32k3a.MRG32k3a`` + Random number generator to use for bootstrapping. + normalize : bool, default=True + True if progress curves are to be normalized w.r.t. + optimality gaps, otherwise False. + + Returns + ------- + bootstrap_curves : list [``experiment_base.Curve``] + Bootstrapped estimated objective curves or estimated progress + curves of all solutions from all bootstrapped macroreplications. + """ + bootstrap_curves = [] + # Uniformly resample M macroreplications (with replacement) from 0, 1, ..., M-1. + # Subsubstream 0: reserved for this outer-level bootstrapping. + bs_mrep_idxs = bootstrap_rng.choices(range(self.n_macroreps), k=self.n_macroreps) + # Advance RNG subsubstream to prepare for inner-level bootstrapping. + bootstrap_rng.advance_subsubstream() + # Subsubstream 1: reserved for bootstrapping at x0 and x*. + # Bootstrap sample post-replicates at common x0. + # Uniformly resample L postreps (with replacement) from 0, 1, ..., L-1. + bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps_init_opt), k=self.n_postreps_init_opt) + # Compute the mean of the resampled postreplications. + bs_initial_obj_val = np.mean([self.x0_postreps[postrep] for postrep in bs_postrep_idxs]) + # Reset subsubstream if using CRN across budgets. + # This means the same postreplication indices will be used for resampling at x0 and x*. + if self.crn_across_init_opt: + bootstrap_rng.reset_subsubstream() + # Bootstrap sample postreplicates at reference optimal solution x*. + # Uniformly resample L postreps (with replacement) from 0, 1, ..., L. + bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps_init_opt), k=self.n_postreps_init_opt) + # Compute the mean of the resampled postreplications. + bs_optimal_obj_val = np.mean([self.xstar_postreps[postrep] for postrep in bs_postrep_idxs]) + # Compute initial optimality gap. + bs_initial_opt_gap = bs_initial_obj_val - bs_optimal_obj_val + # Advance RNG subsubstream to prepare for inner-level bootstrapping. + # Will now be at start of subsubstream 2. + bootstrap_rng.advance_subsubstream() + # Bootstrap within each bootstrapped macroreplication. + # Option 1: Simpler (default) CRN scheme, which makes for faster code. + if self.crn_across_budget and not self.crn_across_macroreps: + for idx in range(self.n_macroreps): + mrep = bs_mrep_idxs[idx] + # Inner-level bootstrapping over intermediate recommended solutions. + est_objectives = [] + # Same postreplication indices for all intermediate budgets on + # a given macroreplciation. + bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps), k=self.n_postreps) + for budget in range(len(self.all_intermediate_budgets[mrep])): + # If solution is x0... + if self.all_recommended_xs[mrep][budget] == self.x0: + est_objectives.append(bs_initial_obj_val) + # ...else if solution is x*... + elif self.all_recommended_xs[mrep][budget] == self.xstar: + est_objectives.append(bs_optimal_obj_val) + # ... else solution other than x0 or x*. + else: + # Compute the mean of the resampled postreplications. + est_objectives.append(np.mean([self.all_post_replicates[mrep][budget][postrep] for postrep in bs_postrep_idxs])) + # Record objective or progress curve. + if normalize: + frac_intermediate_budgets = [budget / self.problem.factors["budget"] for budget in self.all_intermediate_budgets[mrep]] + norm_est_objectives = [(est_objective - bs_optimal_obj_val) / bs_initial_opt_gap for est_objective in est_objectives] + new_progress_curve = Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives) + bootstrap_curves.append(new_progress_curve) + else: + new_objective_curve = Curve(x_vals=self.all_intermediate_budgets[mrep], y_vals=est_objectives) + bootstrap_curves.append(new_objective_curve) + # Option 2: Non-default CRN behavior. + else: + for idx in range(self.n_macroreps): + mrep = bs_mrep_idxs[idx] + # Inner-level bootstrapping over intermediate recommended solutions. + est_objectives = [] + for budget in range(len(self.all_intermediate_budgets[mrep])): + # If solution is x0... + if self.all_recommended_xs[mrep][budget] == self.x0: + est_objectives.append(bs_initial_obj_val) + # ...else if solution is x*... + elif self.all_recommended_xs[mrep][budget] == self.xstar: + est_objectives.append(bs_optimal_obj_val) + # ... else solution other than x0 or x*. + else: + # Uniformly resample N postreps (with replacement) from 0, 1, ..., N-1. + bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps), k=self.n_postreps) + # Compute the mean of the resampled postreplications. + est_objectives.append(np.mean([self.all_post_replicates[mrep][budget][postrep] for postrep in bs_postrep_idxs])) + # Reset subsubstream if using CRN across budgets. + if self.crn_across_budget: + bootstrap_rng.reset_subsubstream() + # If using CRN across macroreplications... + if self.crn_across_macroreps: + # ...reset subsubstreams... + bootstrap_rng.reset_subsubstream() + # ...else if not using CRN across macrorep... + else: + # ...advance subsubstream. + bootstrap_rng.advance_subsubstream() + # Record objective or progress curve. + if normalize: + frac_intermediate_budgets = [budget / self.problem.factors["budget"] for budget in self.all_intermediate_budgets[mrep]] + norm_est_objectives = [(est_objective - bs_optimal_obj_val) / bs_initial_opt_gap for est_objective in est_objectives] + new_progress_curve = Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives) + bootstrap_curves.append(new_progress_curve) + else: + new_objective_curve = Curve(x_vals=self.all_intermediate_budgets[mrep], y_vals=est_objectives) + bootstrap_curves.append(new_objective_curve) + return bootstrap_curves + + def clear_run(self): + """Delete results from ``run()`` method and any downstream results. + """ + attributes = ["n_macroreps", + "all_recommended_xs", + "all_intermediate_budgets"] + for attribute in attributes: + try: + delattr(self, attribute) + except Exception: + pass + self.clear_postreplicate() + + def clear_postreplicate(self): + """Delete results from ``post_replicate()`` method and any downstream results. + """ + attributes = ["n_postreps", + "crn_across_budget", + "crn_across_macroreps", + "all_post_replicates", + "all_est_objectives"] + for attribute in attributes: + try: + delattr(self, attribute) + except Exception: + pass + self.clear_postnorm() + + def clear_postnorm(self): + """Delete results from ``post_normalize()`` associated with experiment. + """ + attributes = ["n_postreps_init_opt", + "crn_across_init_opt", + "x0", + "x0_postreps", + "xstar", + "xstar_postreps", + "objective_curves", + "progress_curves" + ] + for attribute in attributes: + try: + delattr(self, attribute) + except Exception: + pass + + def record_experiment_results(self): + """Save ``experiment_base.ProblemSolver`` object to .pickle file. + """ + # Create directories if they do no exist. + if "./experiments/outputs" in self.file_name_path and not os.path.exists("./experiments/outputs"): + os.makedirs("./experiments", exist_ok=True) + os.makedirs("./experiments/outputs") + elif "./data_farming_experiments/outputs" in self.file_name_path and not os.path.exists("./data_farming_experiments/outputs"): + os.makedirs("./data_farming_experiments", exist_ok=True) + os.makedirs("./data_farming_experiments/outputs") + with open(self.file_name_path, "wb") as file: + pickle.dump(self, file, pickle.HIGHEST_PROTOCOL) + + def log_experiment_results(self, print_solutions=True): + """Create readable .txt file from a problem-solver pair's .pickle file. + """ + # Create a new text file in experiments/logs folder with correct name. + new_path = self.file_name_path.replace("outputs", "logs") # Adjust file_path_name to correct folder. + new_path2 = new_path.replace(".pickle", "") # Remove .pickle from .txt file name. + + # Create directories if they do no exist. + if "./experiments/logs" in new_path2 and not os.path.exists("./experiments/logs"): + os.makedirs("./experiments", exist_ok=True) + os.makedirs("./experiments/logs") + + with open(new_path2 + "_experiment_results.txt", "w") as file: + # Title txt file with experiment information. + file.write(self.file_name_path) + file.write('\n') + file.write(f"Problem: {self.problem.name}\n") + file.write(f"Solver: {self.solver.name}\n\n") + + # Display model factors. + file.write("Model Factors:\n") + for key, value in self.problem.model.factors.items(): + # Excluding model factors corresponding to decision variables. + if key not in self.problem.model_decision_factors: + file.write(f"\t{key}: {value}\n") + file.write("\n") + # Display problem factors. + file.write("Problem Factors:\n") + for key, value in self.problem.factors.items(): + file.write(f"\t{key}: {value}\n") + file.write("\n") + # Display solver factors. + file.write("Solver Factors:\n") + for key, value in self.solver.factors.items(): + file.write(f"\t{key}: {value}\n") + file.write("\n") + + # Display macroreplication information. + file.write(f"{self.n_macroreps} macroreplications were run.\n") + # If results have been postreplicated, list the number of post-replications. + if self.check_postreplicate(): + file.write(f"{self.n_postreps} postreplications were run at each recommended solution.\n\n") + # If post-normalized, state initial solution (x0) and proxy optimal solution (x_star) + # and how many replications were taken of them (n_postreps_init_opt). + if self.check_postnormalize(): + file.write(f"The initial solution is {tuple([round(x, 4) for x in self.x0])}. Its estimated objective is {round(np.mean(self.x0_postreps), 4)}.\n") + file.write(f"The proxy optimal solution is {tuple([round(x, 4) for x in self.xstar])}. Its estimated objective is {round(np.mean(self.xstar_postreps), 4)}.\n") + file.write(f"{self.n_postreps_init_opt} postreplications were taken at x0 and x_star.\n\n") + # Display recommended solution at each budget value for each macroreplication. + file.write('Macroreplication Results:\n') + for mrep in range(self.n_macroreps): + file.write(f"\nMacroreplication {mrep + 1}:\n") + for budget in range(len(self.all_intermediate_budgets[mrep])): + file.write(f"\tBudget: {round(self.all_intermediate_budgets[mrep][budget], 4)}") + # Optionally print solutions. + if print_solutions: + file.write(f"\tRecommended Solution: {tuple([round(x, 4) for x in self.all_recommended_xs[mrep][budget]])}") + # If postreplicated, add estimated objective function values. + if self.check_postreplicate(): + file.write(f"\tEstimated Objective: {round(self.all_est_objectives[mrep][budget], 4)}\n") + file.write(f"\tThe time taken to complete this macroreplication was {round(self.timings[mrep], 2)} s.\n") + file.close() + + +def trim_solver_results(problem, recommended_solns, intermediate_budgets): + """Trim solutions recommended by solver after problem's max budget. + + Parameters + ---------- + problem : ``base.Problem`` + Problem object on which the solver was run. + recommended_solutions : list [``base.Solution``] + Solutions recommended by the solver. + intermediate_budgets : list [int] + Intermediate budgets at which solver recommended different solutions. + """ + # Remove solutions corresponding to intermediate budgets exceeding max budget. + invalid_idxs = [idx for idx, element in enumerate(intermediate_budgets) if element > problem.factors["budget"]] + for invalid_idx in sorted(invalid_idxs, reverse=True): + del recommended_solns[invalid_idx] + del intermediate_budgets[invalid_idx] + # If no solution is recommended at the final budget, + # re-recommend the latest recommended solution. + # (Necessary for clean plotting of progress curves.) + if intermediate_budgets[-1] < problem.factors["budget"]: + recommended_solns.append(recommended_solns[-1]) + intermediate_budgets.append(problem.factors["budget"]) + return recommended_solns, intermediate_budgets + + +def read_experiment_results(file_name_path): + """Read in ``experiment_base.ProblemSolver`` object from .pickle file. + + Parameters + ---------- + file_name_path : str + Path of .pickle file for reading ``experiment_base.ProblemSolver`` object. + + Returns + ------- + experiment : ``experiment_base.ProblemSolver`` + Problem-solver pair that has been run or has been post-processed. + """ + with open(file_name_path, "rb") as file: + experiment = pickle.load(file) + return experiment + + +def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, proxy_init_val=None, proxy_opt_val=None, proxy_opt_x=None): + """Construct objective curves and (normalized) progress curves + for a collection of experiments on a given problem. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on a common problem. + n_postreps_init_opt : int + Number of postreplications to take at initial x0 and optimal x*. + crn_across_init_opt : bool, default=True + True if CRN used for post-replications at solutions x0 and x*, otherwise False. + proxy_init_val : float, optional + Known objective function value of initial solution. + proxy_opt_val : float, optional + Proxy for or bound on optimal objective function value. + proxy_opt_x : tuple, optional + Proxy for optimal solution. + """ + # Check that all experiments have the same problem and same + # post-experimental setup. + ref_experiment = experiments[0] + for experiment in experiments: + # Check if problems are the same. + if experiment.problem != ref_experiment.problem: + print("At least two experiments have different problem instances.") + # Check if experiments have common number of macroreps. + if experiment.n_macroreps != ref_experiment.n_macroreps: + print("At least two experiments have different numbers of macro-replications.") + # Check if experiment has been post-replicated and with common number of postreps. + if getattr(experiment, "n_postreps", None) is None: + print(f"The experiment of {experiment.solver.name} on {experiment.problem.name} has not been post-replicated.") + elif getattr(experiment, "n_postreps", None) != getattr(ref_experiment, "n_postreps", None): + print("At least two experiments have different numbers of post-replications.") + print("Estimation of optimal solution x* may be based on different numbers of post-replications.") + # Take post-replications at common x0. + # Create, initialize, and attach RNGs for model. + # Stream 0: reserved for post-replications. + baseline_rngs = [MRG32k3a(s_ss_sss_index=[0, rng_index, 0]) for rng_index in range(experiment.problem.model.n_rngs)] + x0 = ref_experiment.problem.factors["initial_solution"] + if proxy_init_val is not None: + x0_postreps = [proxy_init_val] * n_postreps_init_opt + else: + initial_soln = Solution(x0, ref_experiment.problem) + initial_soln.attach_rngs(rng_list=baseline_rngs, copy=False) + ref_experiment.problem.simulate(solution=initial_soln, m=n_postreps_init_opt) + x0_postreps = list(initial_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective + if crn_across_init_opt: + # Reset each rng to start of its current substream. + for rng in baseline_rngs: + rng.reset_substream() + # Determine (proxy for) optimal solution and/or (proxy for) its + # objective function value. If deterministic (proxy for) f(x*), + # create duplicate post-replicates to facilitate later bootstrapping. + # If proxy for f(x*) is specified... + if proxy_opt_val is not None: + xstar = None + xstar_postreps = [proxy_opt_val] * n_postreps_init_opt + # ...else if proxy for x* is specified... + elif proxy_opt_x is not None: + xstar = proxy_opt_x + # Take post-replications at xstar. + opt_soln = Solution(xstar, ref_experiment.problem) + opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False) + ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt) + xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective + # ...else if f(x*) is known... + elif ref_experiment.problem.optimal_value is not None: + xstar = None + xstar_postreps = [ref_experiment.problem.optimal_value] * n_postreps_init_opt + # ...else if x* is known... + elif ref_experiment.problem.optimal_solution is not None: + xstar = ref_experiment.problem.optimal_solution + # Take post-replications at xstar. + opt_soln = Solution(xstar, ref_experiment.problem) + opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False) + ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt) + xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective + # ...else determine x* empirically as estimated best solution + # found by any solver on any macroreplication. + else: + # TO DO: Simplify this block of code. + best_est_objectives = np.zeros(len(experiments)) + for experiment_idx in range(len(experiments)): + experiment = experiments[experiment_idx] + exp_best_est_objectives = np.zeros(experiment.n_macroreps) + for mrep in range(experiment.n_macroreps): + exp_best_est_objectives[mrep] = np.max(experiment.problem.minmax[0] * np.array(experiment.all_est_objectives[mrep])) + best_est_objectives[experiment_idx] = np.max(exp_best_est_objectives) + best_experiment_idx = np.argmax(best_est_objectives) + best_experiment = experiments[best_experiment_idx] + best_exp_best_est_objectives = np.zeros(experiment.n_macroreps) + for mrep in range(best_experiment.n_macroreps): + best_exp_best_est_objectives[mrep] = np.max(best_experiment.problem.minmax[0] * np.array(best_experiment.all_est_objectives[mrep])) + best_mrep = np.argmax(best_exp_best_est_objectives) + best_budget_idx = np.argmax(experiment.problem.minmax[0] * np.array(best_experiment.all_est_objectives[best_mrep])) + xstar = best_experiment.all_recommended_xs[best_mrep][best_budget_idx] + # Take post-replications at x*. + opt_soln = Solution(xstar, ref_experiment.problem) + opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False) + ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt) + xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective + # Compute signed initial optimality gap = f(x0) - f(x*). + initial_obj_val = np.mean(x0_postreps) + opt_obj_val = np.mean(xstar_postreps) + initial_opt_gap = initial_obj_val - opt_obj_val + # Store x0 and x* info and compute progress curves for each ProblemSolver. + for experiment in experiments: + # DOUBLE-CHECK FOR SHALLOW COPY ISSUES. + experiment.n_postreps_init_opt = n_postreps_init_opt + experiment.crn_across_init_opt = crn_across_init_opt + experiment.x0 = x0 + experiment.x0_postreps = x0_postreps + experiment.xstar = xstar + experiment.xstar_postreps = xstar_postreps + # Construct objective and progress curves. + experiment.objective_curves = [] + experiment.progress_curves = [] + for mrep in range(experiment.n_macroreps): + est_objectives = [] + # Substitute estimates at x0 and x* (based on N postreplicates) + # with new estimates (based on L postreplicates). + for budget in range(len(experiment.all_intermediate_budgets[mrep])): + if experiment.all_recommended_xs[mrep][budget] == x0: + est_objectives.append(np.mean(x0_postreps)) + elif experiment.all_recommended_xs[mrep][budget] == xstar: + est_objectives.append(np.mean(xstar_postreps)) + else: + est_objectives.append(experiment.all_est_objectives[mrep][budget]) + experiment.objective_curves.append(Curve(x_vals=experiment.all_intermediate_budgets[mrep], y_vals=est_objectives)) + # Normalize by initial optimality gap. + norm_est_objectives = [(est_objective - opt_obj_val) / initial_opt_gap for est_objective in est_objectives] + frac_intermediate_budgets = [budget / experiment.problem.factors["budget"] for budget in experiment.all_intermediate_budgets[mrep]] + experiment.progress_curves.append(Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives)) + # Save ProblemSolver object to .pickle file. + experiment.record_experiment_results() + + +def bootstrap_sample_all(experiments, bootstrap_rng, normalize=True): + """Generate bootstrap samples of estimated progress curves (normalized + and unnormalized) from a set of experiments. + + Parameters + ---------- + experiments : list [list [``experiment_base.ProblemSolver``]] + Problem-solver pairs of different solvers and/or problems. + bootstrap_rng : ``mrg32k3a.mrg32k3a.MRG32k3a`` + Random number generator to use for bootstrapping. + normalize : bool, default=True + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + + Returns + ------- + bootstrap_curves : list [list [list [``experiment_base.Curve``]]] + Bootstrapped estimated objective curves or estimated progress curves + of all solutions from all macroreplications. + """ + n_solvers = len(experiments) + n_problems = len(experiments[0]) + bootstrap_curves = [[[] for _ in range(n_problems)] for _ in range(n_solvers)] + # Obtain a bootstrap sample from each experiment. + for solver_idx in range(n_solvers): + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + bootstrap_curves[solver_idx][problem_idx] = experiment.bootstrap_sample(bootstrap_rng, normalize) + # Reset substream for next solver-problem pair. + bootstrap_rng.reset_substream() + # Advance substream of random number generator to prepare for next bootstrap sample. + bootstrap_rng.advance_substream() + return bootstrap_curves + + +def bootstrap_procedure(experiments, n_bootstraps, conf_level, plot_type, beta=None, solve_tol=None, estimator=None, normalize=True): + """Obtain bootstrap sample and compute confidence intervals. + + Parameters + ---------- + experiments : list [list [``experiment_base.ProblemSolver``]] + Problem-solver pairs of different solvers and/or problems. + n_bootstraps : int + Number of times to generate a bootstrap sample of estimated progress curves. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + plot_type : str + String indicating which type of plot to produce: + "mean" : estimated mean progress curve; + + "quantile" : estimated beta quantile progress curve; + + "area_mean" : mean of area under progress curve; + + "area_std_dev" : standard deviation of area under progress curve; + + "solve_time_quantile" : beta quantile of solve time; + + "solve_time_cdf" : cdf of solve time; + + "cdf_solvability" : cdf solvability profile; + + "quantile_solvability" : quantile solvability profile; + + "diff_cdf_solvability" : difference of cdf solvability profiles; + + "diff_quantile_solvability" : difference of quantile solvability profiles. + beta : float, optional + Quantile to plot, e.g., beta quantile; in (0, 1). + solve_tol : float, optional + Relative optimality gap definining when a problem is solved; in (0, 1]. + estimator : float or ``experiment_base.Curve``, optional + Main estimator, e.g., mean convergence curve from an experiment. + normalize : bool, default=True + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + + Returns + ------- + bs_CI_lower_bounds, bs_CI_upper_bounds = float or ``experiment_base.Curve`` + Lower and upper bound(s) of bootstrap CI(s), as floats or curves. + """ + # Create random number generator for bootstrap sampling. + # Stream 1 dedicated for bootstrapping. + bootstrap_rng = MRG32k3a(s_ss_sss_index=[1, 0, 0]) + # Obtain n_bootstrap replications. + bootstrap_replications = [] + for bs_index in range(n_bootstraps): + # Generate bootstrap sample of estimated objective/progress curves. + bootstrap_curves = bootstrap_sample_all(experiments, bootstrap_rng=bootstrap_rng, normalize=normalize) + # Apply the functional of the bootstrap sample. + bootstrap_replications.append(functional_of_curves(bootstrap_curves, plot_type, beta=beta, solve_tol=solve_tol)) + # Distinguish cases where functional returns a scalar vs a curve. + if plot_type in {"area_mean", "area_std_dev", "solve_time_quantile"}: + # Functional returns a scalar. + bs_CI_lower_bounds, bs_CI_upper_bounds = compute_bootstrap_CI(bootstrap_replications, + conf_level=conf_level, + bias_correction=True, + overall_estimator=estimator + ) + elif plot_type in {"mean", "quantile", "solve_time_cdf", "cdf_solvability", "quantile_solvability", "diff_cdf_solvability", "diff_quantile_solvability"}: + # Functional returns a curve. + unique_budgets = list(np.unique([budget for curve in bootstrap_replications for budget in curve.x_vals])) + bs_CI_lbs = [] + bs_CI_ubs = [] + for budget in unique_budgets: + bootstrap_subreplications = [curve.lookup(x=budget) for curve in bootstrap_replications] + sub_estimator = estimator.lookup(x=budget) + bs_CI_lower_bound, bs_CI_upper_bound = compute_bootstrap_CI(bootstrap_subreplications, + conf_level=conf_level, + bias_correction=True, + overall_estimator=sub_estimator + ) + bs_CI_lbs.append(bs_CI_lower_bound) + bs_CI_ubs.append(bs_CI_upper_bound) + bs_CI_lower_bounds = Curve(x_vals=unique_budgets, y_vals=bs_CI_lbs) + bs_CI_upper_bounds = Curve(x_vals=unique_budgets, y_vals=bs_CI_ubs) + return bs_CI_lower_bounds, bs_CI_upper_bounds + + +def functional_of_curves(bootstrap_curves, plot_type, beta=0.5, solve_tol=0.1): + """Compute a functional of the bootstrapped objective/progress curves. + + Parameters + ---------- + bootstrap_curves : list [list [list [``experiment_base.Curve``]]] + Bootstrapped estimated objective curves or estimated progress curves + of all solutions from all macroreplications. + plot_type : str + String indicating which type of plot to produce: + "mean" : estimated mean progress curve; + + "quantile" : estimated beta quantile progress curve; + + "area_mean" : mean of area under progress curve; + + "area_std_dev" : standard deviation of area under progress curve; + + "solve_time_quantile" : beta quantile of solve time; + + "solve_time_cdf" : cdf of solve time; + + "cdf_solvability" : cdf solvability profile; + + "quantile_solvability" : quantile solvability profile; + + "diff_cdf_solvability" : difference of cdf solvability profiles; + + "diff_quantile_solvability" : difference of quantile solvability profiles; + beta : float, default=0.5 + Quantile to plot, e.g., beta quantile; in (0, 1). + solve_tol : float, default=0.1 + Relative optimality gap definining when a problem is solved; in (0, 1]. + + Returns + ------- + functional : list + Functional of bootstrapped curves, e.g, mean progress curves, + mean area under progress curve, quantile of crossing time, etc. + """ + if plot_type == "mean": + # Single experiment --> returns a curve. + functional = mean_of_curves(bootstrap_curves[0][0]) + elif plot_type == "quantile": + # Single experiment --> returns a curve. + functional = quantile_of_curves(bootstrap_curves[0][0], beta=beta) + elif plot_type == "area_mean": + # Single experiment --> returns a scalar. + functional = np.mean([curve.compute_area_under_curve() for curve in bootstrap_curves[0][0]]) + elif plot_type == "area_std_dev": + # Single experiment --> returns a scalar. + functional = np.std([curve.compute_area_under_curve() for curve in bootstrap_curves[0][0]], ddof=1) + elif plot_type == "solve_time_quantile": + # Single experiment --> returns a scalar + functional = np.quantile([curve.compute_crossing_time(threshold=solve_tol) for curve in bootstrap_curves[0][0]], q=beta) + elif plot_type == "solve_time_cdf": + # Single experiment --> returns a curve. + functional = cdf_of_curves_crossing_times(bootstrap_curves[0][0], threshold=solve_tol) + elif plot_type == "cdf_solvability": + # One solver, multiple problems --> returns a curve. + functional = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[0]]) + elif plot_type == "quantile_solvability": + # One solver, multiple problems --> returns a curve. + functional = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[0]]) + elif plot_type == "diff_cdf_solvability": + # Two solvers, multiple problems --> returns a curve. + solvability_profile_1 = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[0]]) + solvability_profile_2 = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[1]]) + functional = difference_of_curves(solvability_profile_1, solvability_profile_2) + elif plot_type == "diff_quantile_solvability": + # Two solvers, multiple problems --> returns a curve. + solvability_profile_1 = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[0]]) + solvability_profile_2 = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[1]]) + functional = difference_of_curves(solvability_profile_1, solvability_profile_2) + else: + print("Not a valid plot type.") + return functional + + +def compute_bootstrap_CI(observations, conf_level, bias_correction=True, overall_estimator=None): + """Construct a bootstrap confidence interval for an estimator. + + Parameters + ---------- + observations : list + Estimators from all bootstrap instances. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + bias_correction : bool, default=True + True if bias-corrected bootstrap CIs (via percentile method) are to be used, + otherwise False. + overall_estimator : float, optional + Estimator to compute bootstrap confidence interval of; + required for bias corrected CI. + + Returns + ------- + bs_CI_lower_bound : float + Lower bound of bootstrap CI. + bs_CI_upper_bound : float + Upper bound of bootstrap CI. + """ + # Compute bootstrapping confidence interval via percentile method. + # See Efron (1981) "Nonparameteric Standard Errors and Confidence Intervals." + if bias_correction: + if overall_estimator is None: + print("Estimator required to compute bias-corrected CIs.") + # For biased-corrected CIs, see equation (4.4) on page 146. + z0 = norm.ppf(np.mean([obs < overall_estimator for obs in observations])) + zconflvl = norm.ppf(conf_level) + q_lower = norm.cdf(2 * z0 - zconflvl) + q_upper = norm.cdf(2 * z0 + zconflvl) + else: + # For uncorrected CIs, see equation (4.3) on page 146. + q_lower = (1 - conf_level) / 2 + q_upper = 1 - (1 - conf_level) / 2 + bs_CI_lower_bound = np.quantile(observations, q=q_lower) + bs_CI_upper_bound = np.quantile(observations, q=q_upper) + return bs_CI_lower_bound, bs_CI_upper_bound + + +def plot_bootstrap_CIs(bs_CI_lower_bounds, bs_CI_upper_bounds, color_str="C0"): + """Plot bootstrap confidence intervals. + + Parameters + ---------- + bs_CI_lower_bounds, bs_CI_upper_bounds : ``experiment_base.Curve`` + Lower and upper bounds of bootstrap CIs, as curves. + color_str : str, default="C0" + String indicating line color, e.g., "C0", "C1", etc. + """ + bs_CI_lower_bounds.plot(color_str=color_str, curve_type="conf_bound") + bs_CI_upper_bounds.plot(color_str=color_str, curve_type="conf_bound") + # Shade space between curves. + # Convert to full curves to get piecewise-constant shaded areas. + plt.fill_between(x=bs_CI_lower_bounds.curve_to_full_curve().x_vals, + y1=bs_CI_lower_bounds.curve_to_full_curve().y_vals, + y2=bs_CI_upper_bounds.curve_to_full_curve().y_vals, + color=color_str, + alpha=0.2 + ) + + +def report_max_halfwidth(curve_pairs, normalize, conf_level, difference=False,): + """Compute and print caption for max halfwidth of one or more bootstrap CI curves. + + Parameters + ---------- + curve_pairs : list [list [``experiment_base.Curve``]] + List of paired bootstrap CI curves. + normalize : bool + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + difference : bool + True if the plot is for difference profiles, otherwise False. + """ + # Compute max halfwidth of bootstrap confidence intervals. + min_lower_bound = np.inf + max_upper_bound = -np.inf + max_halfwidths = [] + for curve_pair in curve_pairs: + min_lower_bound = min(min_lower_bound, min(curve_pair[0].y_vals)) + max_upper_bound = max(max_upper_bound, max(curve_pair[1].y_vals)) + max_halfwidths.append(0.5 * max_difference_of_curves(curve_pair[1], curve_pair[0])) + max_halfwidth = max(max_halfwidths) + # Print caption about max halfwidth. + if normalize: + if difference: + xloc = 0.05 + yloc = -1.35 + else: + xloc = 0.05 + yloc = -0.35 + else: + # xloc = 0.05 * budget of the problem + xloc = 0.05 * curve_pairs[0][0].x_vals[-1] + yloc = min_lower_bound - 0.25 * (max_upper_bound - min_lower_bound) + txt = f"The max halfwidth of the bootstrap {round(conf_level * 100)}% CIs is {round(max_halfwidth, 2)}." + plt.text(x=xloc, y=yloc, s=txt) + + +def check_common_problem_and_reference(experiments): + """Check if a collection of experiments have the same problem, x0, and x*. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on a common problem. + """ + ref_experiment = experiments[0] + for experiment in experiments: + if experiment.problem != ref_experiment.problem: + print("At least two experiments have different problem instances.") + if experiment.x0 != ref_experiment.x0: + print("At least two experiments have different starting solutions.") + if experiment.xstar != ref_experiment.xstar: + print("At least two experiments have different optimal solutions.") + + +def plot_progress_curves(experiments, plot_type, beta=0.50, normalize=True, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True): + """Plot individual or aggregate progress curves for one or more solvers + on a single problem. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on a common problem. + plot_type : str + String indicating which type of plot to produce: + "all" : all estimated progress curves; + + "mean" : estimated mean progress curve; + + "quantile" : estimated beta quantile progress curve. + beta : float, default=0.50 + Quantile to plot, e.g., beta quantile; in (0, 1). + normalize : bool, default=True + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + all_in_one : bool, default=True + True if curves are to be plotted together, otherwise False. + n_bootstraps : int, default=100 + Number of bootstrap samples. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + plot_CIs : bool, default=True + True if bootstrapping confidence intervals are to be plotted, otherwise False. + print_max_hw : bool, default=True + True if caption with max half-width is to be printed, otherwise False. + + Returns + ------- + file_list : list [str] + List compiling path names for plots produced. + """ + # Check if problems are the same with the same x0 and x*. + check_common_problem_and_reference(experiments) + file_list = [] + # Set up plot. + n_experiments = len(experiments) + if all_in_one: + ref_experiment = experiments[0] + setup_plot(plot_type=plot_type, + solver_name="SOLVER SET", + problem_name=ref_experiment.problem.name, + normalize=normalize, + budget=ref_experiment.problem.factors["budget"], + beta=beta + ) + solver_curve_handles = [] + if print_max_hw: + curve_pairs = [] + for exp_idx in range(n_experiments): + experiment = experiments[exp_idx] + color_str = "C" + str(exp_idx) + if plot_type == "all": + # Plot all estimated progress curves. + if normalize: + handle = experiment.progress_curves[0].plot(color_str=color_str) + for curve in experiment.progress_curves[1:]: + curve.plot(color_str=color_str) + else: + handle = experiment.objective_curves[0].plot(color_str=color_str) + for curve in experiment.objective_curves[1:]: + curve.plot(color_str=color_str) + elif plot_type == "mean": + # Plot estimated mean progress curve. + if normalize: + estimator = mean_of_curves(experiment.progress_curves) + else: + estimator = mean_of_curves(experiment.objective_curves) + handle = estimator.plot(color_str=color_str) + elif plot_type == "quantile": + # Plot estimated beta-quantile progress curve. + if normalize: + estimator = quantile_of_curves(experiment.progress_curves, beta) + else: + estimator = quantile_of_curves(experiment.objective_curves, beta) + handle = estimator.plot(color_str=color_str) + else: + print("Not a valid plot type.") + solver_curve_handles.append(handle) + if (plot_CIs or print_max_hw) and plot_type != "all": + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type=plot_type, + beta=beta, + estimator=estimator, + normalize=normalize + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) + if print_max_hw: + curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) + plt.legend(handles=solver_curve_handles, labels=[experiment.solver.name for experiment in experiments], loc="upper right") + if print_max_hw and plot_type != "all": + report_max_halfwidth(curve_pairs=curve_pairs, normalize=normalize, conf_level=conf_level) + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name=ref_experiment.problem.name, + plot_type=plot_type, + normalize=normalize, + extra=beta + )) + else: # Plot separately. + for experiment in experiments: + setup_plot(plot_type=plot_type, + solver_name=experiment.solver.name, + problem_name=experiment.problem.name, + normalize=normalize, + budget=experiment.problem.factors["budget"], + beta=beta + ) + if plot_type == "all": + # Plot all estimated progress curves. + if normalize: + for curve in experiment.progress_curves: + curve.plot() + else: + for curve in experiment.objective_curves: + curve.plot() + elif plot_type == "mean": + # Plot estimated mean progress curve. + if normalize: + estimator = mean_of_curves(experiment.progress_curves) + else: + estimator = mean_of_curves(experiment.objective_curves) + estimator.plot() + elif plot_type == "quantile": + # Plot estimated beta-quantile progress curve. + if normalize: + estimator = quantile_of_curves(experiment.progress_curves, beta) + else: + estimator = quantile_of_curves(experiment.objective_curves, beta) + estimator.plot() + else: + print("Not a valid plot type.") + if (plot_CIs or print_max_hw) and plot_type != "all": + # Note: "experiments" needs to be a list of list of ProblemSolvers. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type=plot_type, + beta=beta, + estimator=estimator, + normalize=normalize + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) + if print_max_hw: + report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=normalize, conf_level=conf_level) + file_list.append(save_plot(solver_name=experiment.solver.name, + problem_name=experiment.problem.name, + plot_type=plot_type, + normalize=normalize, + extra=beta + )) + return file_list + + +def plot_solvability_cdfs(experiments, solve_tol=0.1, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True): + """Plot the solvability cdf for one or more solvers on a single problem. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on a common problem. + solve_tol : float, default=0.1 + Relative optimality gap definining when a problem is solved; in (0, 1]. + all_in_one : bool, default=True + True if curves are to be plotted together, otherwise False. + n_bootstraps : int, default=100 + Number of bootstrap samples. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + plot_CIs : bool, default=True + True if bootstrapping confidence intervals are to be plotted, otherwise False. + print_max_hw : bool, default=True + True if caption with max half-width is to be printed, otherwise False. + + Returns + ------- + file_list : list [str] + List compiling path names for plots produced. + """ + # Check if problems are the same with the same x0 and x*. + check_common_problem_and_reference(experiments) + file_list = [] + # Set up plot. + n_experiments = len(experiments) + if all_in_one: + ref_experiment = experiments[0] + setup_plot(plot_type="solve_time_cdf", + solver_name="SOLVER SET", + problem_name=ref_experiment.problem.name, + solve_tol=solve_tol + ) + solver_curve_handles = [] + if print_max_hw: + curve_pairs = [] + for exp_idx in range(n_experiments): + experiment = experiments[exp_idx] + color_str = "C" + str(exp_idx) + # Plot cdf of solve times. + estimator = cdf_of_curves_crossing_times(experiment.progress_curves, threshold=solve_tol) + handle = estimator.plot(color_str=color_str) + solver_curve_handles.append(handle) + if plot_CIs or print_max_hw: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type="solve_time_cdf", + solve_tol=solve_tol, + estimator=estimator, + normalize=True + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) + if print_max_hw: + curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) + plt.legend(handles=solver_curve_handles, labels=[experiment.solver.name for experiment in experiments], loc="upper left") + if print_max_hw: + report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level) + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name=ref_experiment.problem.name, + plot_type="solve_time_cdf", + normalize=True, + extra=solve_tol + )) + else: # Plot separately. + for experiment in experiments: + setup_plot(plot_type="solve_time_cdf", + solver_name=experiment.solver.name, + problem_name=experiment.problem.name, + solve_tol=solve_tol + ) + estimator = cdf_of_curves_crossing_times(experiment.progress_curves, threshold=solve_tol) + estimator.plot() + if plot_CIs or print_max_hw: + # Note: "experiments" needs to be a list of list of Problem-Solver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type="solve_time_cdf", + solve_tol=solve_tol, + estimator=estimator, + normalize=True + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) + if print_max_hw: + report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True, conf_level=conf_level) + file_list.append(save_plot(solver_name=experiment.solver.name, + problem_name=experiment.problem.name, + plot_type="solve_time_cdf", + normalize=True, + extra=solve_tol + )) + return file_list + + +def plot_area_scatterplots(experiments, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True): + """Plot a scatter plot of mean and standard deviation of area under progress curves. + Either one plot for each solver or one plot for all solvers. + + Notes + ----- + TO DO: Add the capability to compute and print the max halfwidth of + the bootstrapped CI intervals. + + Parameters + ---------- + experiments : list [list [``experiment_base.ProblemSolver``]] + Problem-solver pairs used to produce plots. + all_in_one : bool, default=True + True if curves are to be plotted together, otherwise False. + n_bootstraps : int, default=100 + Number of bootstrap samples. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + plot_CIs : bool, default=True + True if bootstrapping confidence intervals are to be plotted, otherwise False. + print_max_hw : bool, default=True + True if caption with max half-width is to be printed, otherwise False. + + Returns + ------- + file_list : list [str] + List compiling path names for plots produced. + """ + file_list = [] + # Set up plot. + n_solvers = len(experiments) + n_problems = len(experiments[0]) + if all_in_one: + marker_list = ["o", "v", "s", "*", "P", "X", "D", "V", ">", "<"] + setup_plot(plot_type="area", + solver_name="SOLVER SET", + problem_name="PROBLEM SET" + ) + solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] + solver_curve_handles = [] + # TO DO: Build up capability to print max half-width. + if print_max_hw: + curve_pairs = [] + for solver_idx in range(n_solvers): + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + color_str = "C" + str(solver_idx) + marker_str = marker_list[solver_idx % len(marker_list)] # Cycle through list of marker types. + # Plot mean and standard deviation of area under progress curve. + areas = [curve.compute_area_under_curve() for curve in experiment.progress_curves] + mean_estimator = np.mean(areas) + std_dev_estimator = np.std(areas, ddof=1) + if plot_CIs: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + mean_bs_CI_lb, mean_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type="area_mean", + estimator=mean_estimator, + normalize=True + ) + std_dev_bs_CI_lb, std_dev_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type="area_std_dev", + estimator=std_dev_estimator, + normalize=True + ) + # if print_max_hw: + # curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) + x_err = [[mean_estimator - mean_bs_CI_lb], [mean_bs_CI_ub - mean_estimator]] + y_err = [[std_dev_estimator - std_dev_bs_CI_lb], [std_dev_bs_CI_ub - std_dev_estimator]] + handle = plt.errorbar(x=mean_estimator, + y=std_dev_estimator, + xerr=x_err, + yerr=y_err, + color=color_str, + marker=marker_str, + elinewidth=1 + ) + else: + handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color=color_str, marker=marker_str) + solver_curve_handles.append(handle) + plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper right") + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name="PROBLEM SET", + plot_type="area", + normalize=True + )) + else: + for solver_idx in range(n_solvers): + ref_experiment = experiments[solver_idx][0] + setup_plot(plot_type="area", + solver_name=ref_experiment.solver.name, + problem_name="PROBLEM SET" + ) + if print_max_hw: + curve_pairs = [] + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + # Plot mean and standard deviation of area under progress curve. + areas = [curve.compute_area_under_curve() for curve in experiment.progress_curves] + mean_estimator = np.mean(areas) + std_dev_estimator = np.std(areas, ddof=1) + if plot_CIs: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + mean_bs_CI_lb, mean_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type="area_mean", + estimator=mean_estimator, + normalize=True + ) + std_dev_bs_CI_lb, std_dev_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type="area_std_dev", + estimator=std_dev_estimator, + normalize=True + ) + # if print_max_hw: + # curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) + x_err = [[mean_estimator - mean_bs_CI_lb], [mean_bs_CI_ub - mean_estimator]] + y_err = [[std_dev_estimator - std_dev_bs_CI_lb], [std_dev_bs_CI_ub - std_dev_estimator]] + handle = plt.errorbar(x=mean_estimator, + y=std_dev_estimator, + xerr=x_err, + yerr=y_err, + marker="o", + color="C0", + elinewidth=1 + ) + else: + handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color="C0", marker="o") + file_list.append(save_plot(solver_name=experiment.solver.name, + problem_name="PROBLEM SET", + plot_type="area", + normalize=True + )) + return file_list + + +def plot_solvability_profiles(experiments, plot_type, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True, solve_tol=0.1, beta=0.5, ref_solver=None): + """Plot the (difference of) solvability profiles for each solver on a set of problems. + + Parameters + ---------- + experiments : list [list [``experiment_base.ProblemSolver``]] + Problem-solver pairs used to produce plots. + plot_type : str + String indicating which type of plot to produce: + "cdf_solvability" : cdf-solvability profile; + + "quantile_solvability" : quantile-solvability profile; + + "diff_cdf_solvability" : difference of cdf-solvability profiles; + + "diff_quantile_solvability" : difference of quantile-solvability profiles. + all_in_one : bool, default=True + True if curves are to be plotted together, otherwise False. + n_bootstraps : int, default=100 + Number of bootstrap samples. + conf_level : float + Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). + plot_CIs : bool, default=True + True if bootstrapping confidence intervals are to be plotted, otherwise False. + print_max_hw : bool, default=True + True if caption with max half-width is to be printed, otherwise False. + solve_tol : float, default=0.1 + Relative optimality gap definining when a problem is solved; in (0, 1]. + beta : float, default=0.5 + Quantile to compute, e.g., beta quantile; in (0, 1). + ref_solver : str, optional + Name of solver used as benchmark for difference profiles. + + Returns + ------- + file_list : list [str] + List compiling path names for plots produced. + """ + file_list = [] + # Set up plot. + n_solvers = len(experiments) + n_problems = len(experiments[0]) + if all_in_one: + if plot_type == "cdf_solvability": + setup_plot(plot_type=plot_type, + solver_name="SOLVER SET", + problem_name="PROBLEM SET", + solve_tol=solve_tol + ) + elif plot_type == "quantile_solvability": + setup_plot(plot_type=plot_type, + solver_name="SOLVER SET", + problem_name="PROBLEM SET", + beta=beta, + solve_tol=solve_tol + ) + elif plot_type == "diff_cdf_solvability": + setup_plot(plot_type=plot_type, + solver_name="SOLVER SET", + problem_name="PROBLEM SET", + solve_tol=solve_tol + ) + elif plot_type == "diff_quantile_solvability": + setup_plot(plot_type=plot_type, + solver_name="SOLVER SET", + problem_name="PROBLEM SET", + beta=beta, + solve_tol=solve_tol + ) + if print_max_hw: + curve_pairs = [] + solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] + solver_curves = [] + solver_curve_handles = [] + for solver_idx in range(n_solvers): + solver_sub_curves = [] + color_str = "C" + str(solver_idx) + # For each problem compute the cdf or quantile of solve times. + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + if plot_type in {"cdf_solvability", "diff_cdf_solvability"}: + sub_curve = cdf_of_curves_crossing_times(curves=experiment.progress_curves, threshold=solve_tol) + if plot_type in {"quantile_solvability", "diff_quantile_solvability"}: + sub_curve = quantile_cross_jump(curves=experiment.progress_curves, threshold=solve_tol, beta=beta) + solver_sub_curves.append(sub_curve) + # Plot solvability profile for the solver. + # Exploit the fact that each solvability profile is an average of more basic curves. + solver_curve = mean_of_curves(solver_sub_curves) + # CAUTION: Using mean above requires an equal number of macro-replications per problem. + solver_curves.append(solver_curve) + if plot_type in {"cdf_solvability", "quantile_solvability"}: + handle = solver_curve.plot(color_str=color_str) + solver_curve_handles.append(handle) + if plot_CIs or print_max_hw: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type=plot_type, + solve_tol=solve_tol, + beta=beta, + estimator=solver_curve, + normalize=True + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) + if print_max_hw: + curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) + if plot_type == "cdf_solvability": + plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper left") + if print_max_hw: + report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level) + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=solve_tol + )) + elif plot_type == "quantile_solvability": + plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper left") + if print_max_hw: + report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level) + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=[solve_tol, beta] + )) + elif plot_type in {"diff_cdf_solvability", "diff_quantile_solvability"}: + non_ref_solvers = [solver_name for solver_name in solver_names if solver_name != ref_solver] + ref_solver_idx = solver_names.index(ref_solver) + for solver_idx in range(n_solvers): + if solver_idx is not ref_solver_idx: + diff_solver_curve = difference_of_curves(solver_curves[solver_idx], solver_curves[ref_solver_idx]) + color_str = "C" + str(solver_idx) + handle = diff_solver_curve.plot(color_str=color_str) + solver_curve_handles.append(handle) + if plot_CIs or print_max_hw: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx], experiments[ref_solver_idx]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type=plot_type, + solve_tol=solve_tol, + beta=beta, + estimator=diff_solver_curve, + normalize=True + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) + if print_max_hw: + curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) + offset_labels = [f"{non_ref_solver} - {ref_solver}" for non_ref_solver in non_ref_solvers] + plt.legend(handles=solver_curve_handles, labels=offset_labels, loc="upper left") + if print_max_hw: + report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level, difference=True) + if plot_type == "diff_cdf_solvability": + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=solve_tol + )) + elif plot_type == "diff_quantile_solvability": + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=[solve_tol, beta] + )) + else: + solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] + solver_curves = [] + for solver_idx in range(n_solvers): + solver_sub_curves = [] + # For each problem compute the cdf or quantile of solve times. + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + if plot_type in {"cdf_solvability", "diff_cdf_solvability"}: + sub_curve = cdf_of_curves_crossing_times(curves=experiment.progress_curves, threshold=solve_tol) + if plot_type in {"quantile_solvability", "diff_quantile_solvability"}: + sub_curve = quantile_cross_jump(curves=experiment.progress_curves, threshold=solve_tol, beta=beta) + solver_sub_curves.append(sub_curve) + # Plot solvability profile for the solver. + # Exploit the fact that each solvability profile is an average of more basic curves. + solver_curve = mean_of_curves(solver_sub_curves) + solver_curves.append(solver_curve) + if plot_type in {"cdf_solvability", "quantile_solvability"}: + # Set up plot. + if plot_type == "cdf_solvability": + file_list.append(setup_plot(plot_type=plot_type, + solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + solve_tol=solve_tol + )) + elif plot_type == "quantile_solvability": + file_list.append(setup_plot(plot_type=plot_type, + solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + beta=beta, + solve_tol=solve_tol + )) + handle = solver_curve.plot() + if plot_CIs or print_max_hw: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type=plot_type, + solve_tol=solve_tol, + beta=beta, + estimator=solver_curve, + normalize=True + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) + if print_max_hw: + report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True, conf_level=conf_level) + if plot_type == "cdf_solvability": + file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=solve_tol + )) + elif plot_type == "quantile_solvability": + file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=[solve_tol, beta] + )) + if plot_type in {"diff_cdf_solvability", "diff_quantile_solvability"}: + non_ref_solvers = [solver_name for solver_name in solver_names if solver_name != ref_solver] + ref_solver_idx = solver_names.index(ref_solver) + for solver_idx in range(n_solvers): + if solver_idx is not ref_solver_idx: + if plot_type == "diff_cdf_solvability": + file_list.append(setup_plot(plot_type=plot_type, + solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + solve_tol=solve_tol + )) + elif plot_type == "diff_quantile_solvability": + file_list.append(setup_plot(plot_type=plot_type, + solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + beta=beta, + solve_tol=solve_tol + )) + diff_solver_curve = difference_of_curves(solver_curves[solver_idx], solver_curves[ref_solver_idx]) + handle = diff_solver_curve.plot() + if plot_CIs or print_max_hw: + # Note: "experiments" needs to be a list of list of ProblemSolver objects. + bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx], experiments[ref_solver_idx]], + n_bootstraps=n_bootstraps, + conf_level=conf_level, + plot_type=plot_type, + solve_tol=solve_tol, + beta=beta, + estimator=diff_solver_curve, + normalize=True + ) + if plot_CIs: + plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) + if print_max_hw: + report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True, conf_level=conf_level, difference=True) + if plot_type == "diff_cdf_solvability": + file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=solve_tol + )) + elif plot_type == "diff_quantile_solvability": + file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, + problem_name="PROBLEM SET", + plot_type=plot_type, + normalize=True, + extra=[solve_tol, beta] + )) + return file_list + + +def plot_terminal_progress(experiments, plot_type="violin", normalize=True, all_in_one=True): + """Plot individual or aggregate terminal progress for one or more solvers + on a single problem. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + ProblemSolver pairs of different solvers on a common problem. + plot_type : str, default="violin" + String indicating which type of plot to produce: + + "box" : comparative box plots; + + "violin" : comparative violin plots. + normalize : bool, default=True + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + all_in_one : bool, default=True + True if curves are to be plotted together, otherwise False. + + Returns + ------- + file_list : list [str] + List compiling path names for plots produced. + """ + # Check if problems are the same with the same x0 and x*. + check_common_problem_and_reference(experiments) + file_list = [] + # Set up plot. + n_experiments = len(experiments) + if all_in_one: + ref_experiment = experiments[0] + setup_plot(plot_type=plot_type, + solver_name="SOLVER SET", + problem_name=ref_experiment.problem.name, + normalize=normalize, + budget=ref_experiment.problem.factors["budget"] + ) + # solver_curve_handles = [] + if normalize: + terminal_data = [[experiment.progress_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] for experiment in experiments] + else: + terminal_data = [[experiment.objective_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] for experiment in experiments] + if plot_type == "box": + plt.boxplot(terminal_data) + plt.xticks(range(1, n_experiments + 1), labels=[experiment.solver.name for experiment in experiments]) + if plot_type == "violin": + solver_names = [experiments[exp_idx].solver.name for exp_idx in range(n_experiments) for td in terminal_data[exp_idx]] + terminal_values = [td for exp_idx in range(n_experiments) for td in terminal_data[exp_idx]] + terminal_data_dict = {"Solvers": solver_names, "Terminal": terminal_values} + terminal_data_df = pd.DataFrame(terminal_data_dict) + # sns.violinplot(x="Solvers", y="Terminal", data=terminal_data_df, inner="stick", scale="width", showmeans=True, bw = 0.2, cut=2) + sns.violinplot(x="Solvers", y="Terminal", data=terminal_data_df, inner="stick", scale="width", showmeans=True, cut=0.1) + if normalize: + plt.ylabel("Terminal Progress") + else: + plt.ylabel("Terminal Objective") + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name=ref_experiment.problem.name, + plot_type=plot_type, + normalize=normalize + )) + else: # Plot separately. + for experiment in experiments: + setup_plot(plot_type=plot_type, + solver_name=experiment.solver.name, + problem_name=experiment.problem.name, + normalize=normalize, + budget=experiment.problem.factors["budget"] + ) + if normalize: + terminal_data = [experiment.progress_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] + else: + terminal_data = [experiment.objective_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] + if plot_type == "box": + plt.boxplot(terminal_data) + plt.xticks([1], labels=[experiment.solver.name]) + if plot_type == "violin": + solver_name_rep = [experiment.solver.name for td in terminal_data] + terminal_data_dict = {"Solver": solver_name_rep, "Terminal": terminal_data} + terminal_data_df = pd.DataFrame(terminal_data_dict) + sns.violinplot(x="Solver", y="Terminal", data=terminal_data_df, inner="stick") + if normalize: + plt.ylabel("Terminal Progress") + else: + plt.ylabel("Terminal Objective") + file_list.append(save_plot(solver_name=experiment.solver.name, + problem_name=experiment.problem.name, + plot_type=plot_type, + normalize=normalize + )) + return file_list + + +def plot_terminal_scatterplots(experiments, all_in_one=True): + """Plot a scatter plot of mean and standard deviation of terminal progress. + Either one plot for each solver or one plot for all solvers. + + Parameters + ---------- + experiments : list [list [``experiment_base.Experiment``]] + ProblemSolver pairs used to produce plots. + all_in_one : bool, default=True + True if curves are to be plotted together, otherwise False. + + Returns + ------- + file_list : list [str] + List compiling path names for plots produced. + """ + file_list = [] + # Set up plot. + n_solvers = len(experiments) + n_problems = len(experiments[0]) + if all_in_one: + marker_list = ["o", "v", "s", "*", "P", "X", "D", "V", ">", "<"] + setup_plot(plot_type="terminal_scatter", + solver_name="SOLVER SET", + problem_name="PROBLEM SET" + ) + solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] + solver_curve_handles = [] + for solver_idx in range(n_solvers): + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + color_str = "C" + str(solver_idx) + marker_str = marker_list[solver_idx % len(marker_list)] # Cycle through list of marker types. + # Plot mean and standard deviation of terminal progress. + terminals = [curve.y_vals[-1] for curve in experiment.progress_curves] + mean_estimator = np.mean(terminals) + std_dev_estimator = np.std(terminals, ddof=1) + handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color=color_str, marker=marker_str) + solver_curve_handles.append(handle) + plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper right") + file_list.append(save_plot(solver_name="SOLVER SET", + problem_name="PROBLEM SET", + plot_type="terminal_scatter", + normalize=True + )) + else: + for solver_idx in range(n_solvers): + ref_experiment = experiments[solver_idx][0] + setup_plot(plot_type="terminal_scatter", + solver_name=ref_experiment.solver.name, + problem_name="PROBLEM SET" + ) + for problem_idx in range(n_problems): + experiment = experiments[solver_idx][problem_idx] + # Plot mean and standard deviation of terminal progress. + terminals = [curve.y_vals[-1] for curve in experiment.progress_curves] + mean_estimator = np.mean(terminals) + std_dev_estimator = np.std(terminals, ddof=1) + handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color="C0", marker="o") + file_list.append(save_plot(solver_name=experiment.solver.name, + problem_name="PROBLEM SET", + plot_type="terminal_scatter", + normalize=True + )) + return file_list + + +def setup_plot(plot_type, solver_name="SOLVER SET", problem_name="PROBLEM SET", normalize=True, budget=None, beta=None, solve_tol=None): + """Create new figure. Add labels to plot and reformat axes. + + Parameters + ---------- + plot_type : str + String indicating which type of plot to produce: + "all" : all estimated progress curves; + + "mean" : estimated mean progress curve; + + "quantile" : estimated beta quantile progress curve; + + "solve_time_cdf" : cdf of solve time; + + "cdf_solvability" : cdf solvability profile; + + "quantile_solvability" : quantile solvability profile; + + "diff_cdf_solvability" : difference of cdf solvability profiles; + + "diff_quantile_solvability" : difference of quantile solvability profiles; + + "area" : area scatterplot; + + "box" : box plot of terminal progress; + + "violin" : violin plot of terminal progress; + + "terminal_scatter" : scatterplot of mean and std dev of terminal progress. + solver_name : str, default="SOLVER_SET" + Name of solver. + problem_name : str, default="PROBLEM_SET" + Name of problem. + normalize : bool, default=True + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + budget : int, optional + Budget of problem, measured in function evaluations. + beta : float, optional + Quantile to compute, e.g., beta quantile; in (0, 1). + solve_tol : float, optional + Relative optimality gap definining when a problem is solved; in (0, 1]. + """ + plt.figure() + # Set up axes and axis labels. + if normalize: + plt.ylabel("Fraction of Initial Optimality Gap", size=14) + if plot_type != "box" and plot_type != "violin": + plt.xlabel("Fraction of Budget", size=14) + plt.xlim((0, 1)) + plt.ylim((-0.1, 1.1)) + plt.tick_params(axis="both", which="major", labelsize=12) + else: + plt.ylabel("Objective Function Value", size=14) + if plot_type != "box" and plot_type != "violin": + plt.xlabel("Budget", size=14) + plt.xlim((0, budget)) + plt.tick_params(axis="both", which="major", labelsize=12) + # Specify title (plus alternative y-axis label and alternative axes). + if plot_type == "all": + if normalize: + title = f"{solver_name} on {problem_name}\nProgress Curves" + else: + title = f"{solver_name} on {problem_name}\nObjective Curves" + elif plot_type == "mean": + if normalize: + title = f"{solver_name} on {problem_name}\nMean Progress Curve" + else: + title = f"{solver_name} on {problem_name}\nMean Objective Curve" + elif plot_type == "quantile": + if normalize: + title = f"{solver_name} on {problem_name}\n{round(beta, 2)}-Quantile Progress Curve" + else: + title = f"{solver_name} on {problem_name}\n{round(beta, 2)}-Quantile Objective Curve" + elif plot_type == "solve_time_cdf": + plt.ylabel("Fraction of Macroreplications Solved", size=14) + title = f"{solver_name} on {problem_name}\nCDF of {round(solve_tol, 2)}-Solve Times" + elif plot_type == "cdf_solvability": + plt.ylabel("Problem Averaged Solve Fraction", size=14) + title = f"CDF-Solvability Profile for {solver_name}\nProfile of CDFs of {round(solve_tol, 2)}-Solve Times" + elif plot_type == "quantile_solvability": + plt.ylabel("Fraction of Problems Solved", size=14) + title = f"Quantile Solvability Profile for {solver_name}\nProfile of {round(beta, 2)}-Quantiles of {round(solve_tol, 2)}-Solve Times" + elif plot_type == "diff_cdf_solvability": + plt.ylabel("Difference in Problem Averaged Solve Fraction", size=14) + title = f"Difference of CDF-Solvability Profile for {solver_name}\nDifference of Profiles of CDFs of {round(solve_tol, 2)}-Solve Times" + plt.plot([0, 1], [0, 0], color="black", linestyle="--") + plt.ylim((-1, 1)) + elif plot_type == "diff_quantile_solvability": + plt.ylabel("Difference in Fraction of Problems Solved", size=14) + title = f"Difference of Quantile Solvability Profile for {solver_name}\nDifference of Profiles of {round(beta, 2)}-Quantiles of {round(solve_tol, 2)}-Solve Times" + plt.plot([0, 1], [0, 0], color="black", linestyle="--") + plt.ylim((-1, 1)) + elif plot_type == "area": + plt.xlabel("Mean Area", size=14) + plt.ylabel("Std Dev of Area") + # plt.xlim((0, 1)) + # plt.ylim((0, 0.5)) + title = f"{solver_name}\nAreas Under Progress Curves" + elif plot_type == "box" or plot_type == "violin": + plt.xlabel("Solvers") + if normalize: + plt.ylabel("Terminal Progress") + title = f"{solver_name} on {problem_name}" + else: + plt.ylabel("Terminal Objective") + title = f"{solver_name} on {problem_name}" + elif plot_type == "terminal_scatter": + plt.xlabel("Mean Terminal Progress", size=14) + plt.ylabel("Std Dev of Terminal Progress") + # plt.xlim((0, 1)) + # plt.ylim((0, 0.5)) + title = f"{solver_name}\nTerminal Progress" + plt.title(title, size=14) + + +def save_plot(solver_name, problem_name, plot_type, normalize, extra=None): + """Create new figure. Add labels to plot and reformat axes. + + Parameters + ---------- + solver_name : str + Name of solver. + problem_name : str + Name of problem. + plot_type : str + String indicating which type of plot to produce: + "all" : all estimated progress curves; + + "mean" : estimated mean progress curve; + + "quantile" : estimated beta quantile progress curve; + + "solve_time_cdf" : cdf of solve time; + + "cdf_solvability" : cdf solvability profile; + + "quantile_solvability" : quantile solvability profile; + + "diff_cdf_solvability" : difference of cdf solvability profiles; + + "diff_quantile_solvability" : difference of quantile solvability profiles; + + "area" : area scatterplot; + + "terminal_scatter" : scatterplot of mean and std dev of terminal progress. + normalize : bool + True if progress curves are to be normalized w.r.t. optimality gaps, + otherwise False. + extra : float or list [float], optional + Extra number(s) specifying quantile (e.g., beta) and/or solve tolerance. + + Returns + ------- + path_name : str + Path name pointing to location where plot will be saved. + """ + # Form string name for plot filename. + if plot_type == "all": + plot_name = "all_prog_curves" + elif plot_type == "mean": + plot_name = "mean_prog_curve" + elif plot_type == "quantile": + plot_name = f"{extra}_quantile_prog_curve" + elif plot_type == "solve_time_cdf": + plot_name = f"cdf_{extra}_solve_times" + elif plot_type == "cdf_solvability": + plot_name = f"profile_cdf_{extra}_solve_times" + elif plot_type == "quantile_solvability": + plot_name = f"profile_{extra[1]}_quantile_{extra[0]}_solve_times" + elif plot_type == "diff_cdf_solvability": + plot_name = f"diff_profile_cdf_{extra}_solve_times" + elif plot_type == "diff_quantile_solvability": + plot_name = f"diff_profile_{extra[1]}_quantile_{extra[0]}_solve_times" + elif plot_type == "area": + plot_name = "area_scatterplot" + elif plot_type == "box": + plot_name = "terminal_box" + elif plot_type == "violin": + plot_name = "terminal_violin" + elif plot_type == "terminal_scatter": + plot_name = "terminal_scatter" + if not normalize: + plot_name = plot_name + "_unnorm" + path_name = f"experiments/plots/{solver_name}_on_{problem_name}_{plot_name}.png" + # Reformat path_name to be suitable as a string literal. + path_name = path_name.replace("\\", "") + path_name = path_name.replace("$", "") + path_name = path_name.replace(" ", "_") + # Create directories if they do no exist. + if not os.path.exists("./experiments/plots"): + os.makedirs("./experiments", exist_ok=True) + os.makedirs("./experiments/plots") + plt.savefig(path_name, bbox_inches="tight") + # Return path_name for use in GUI. + return path_name + + +class ProblemsSolvers(object): + """Base class for running one or more solver on one or more problem. + + Attributes + ---------- + solver_names : list [str] + List of solver names. + n_solvers : int + Number of solvers. + problem_names : list [str] + List of problem names. + n_problems : int + Number of problems. + solvers : list [``base.Solver``] + List of solvers. + problems : list [``base.Problem``] + List of problems. + all_solver_fixed_factors : dict [dict] + Fixed solver factors for each solver: + outer key is solver name; + inner key is factor name. + all_problem_fixed_factors : dict [dict] + Fixed problem factors for each problem: + outer key is problem name; + inner key is factor name. + all_model_fixed_factors : dict of dict + Fixed model factors for each problem: + outer key is problem name; + inner key is factor name. + experiments : list [list [``experiment_base.ProblemSolver``]] + All problem-solver pairs. + file_name_path : str + Path of .pickle file for saving ``experiment_base.ProblemsSolvers`` object. + + Parameters + ---------- + solver_names : list [str], optional + List of solver names. + problem_names : list [str], optional + List of problem names. + solver_renames : list [str], optional + User-specified names for solvers. + problem_renames : list [str], optional + User-specified names for problems. + fixed_factors_filename : str, optional + Name of .py file containing dictionaries of fixed factors + for solvers/problems/models. + solvers : list [``base.Solver``], optional + List of solvers. + problems : list [``base.Problem``], optional + List of problems. + experiments : list [list [``experiment_base.ProblemSolver``]], optional + All problem-solver pairs. + file_name_path : str + Path of .pickle file for saving ``experiment_base.ProblemsSolvers`` object. + """ + def __init__(self, solver_names=None, problem_names=None, solver_renames=None, problem_renames=None, fixed_factors_filename=None, solvers=None, problems=None, experiments=None, file_name_path=None): + """There are three ways to create a ProblemsSolvers object: + 1. Provide the names of the solvers and problems to look up in directory.py. + 2. Provide the lists of unique solver and problem objects to pair. + 3. Provide a list of list of ProblemSolver objects. + + Notes + ----- + TO DO: If loading some ProblemSolver objects from file, + check that their factors match those in the overall ProblemsSolvers. + """ + if experiments is not None: # Method #3 + self.experiments = experiments + self.solvers = [experiments[idx][0].solver for idx in range(len(experiments))] + self.problems = [experiment.problem for experiment in experiments[0]] + self.solver_names = [solver.name for solver in self.solvers] + self.problem_names = [problem.name for problem in self.problems] + self.n_solvers = len(self.solvers) + self.n_problems = len(self.problems) + elif solvers is not None and problems is not None: # Method #2 + self.experiments = [[ProblemSolver(solver=solver, problem=problem) for problem in problems] for solver in solvers] + self.solvers = solvers + self.problems = problems + self.solver_names = [solver.name for solver in self.solvers] + self.problem_names = [problem.name for problem in self.problems] + self.n_solvers = len(self.solvers) + self.n_problems = len(self.problems) + elif solvers is None and problems is not None: # Method by providing solver and problem names + self.experiments = [[ProblemSolver(solver_name=solver_name, problem=problem) for problem in problems] for solver_name in solver_names] + self.solvers = [solver_directory[solver_name](name=solver_name) for solver_name in solver_names] + self.solver_names = solver_names + self.problems = problems + self.problem_names = [problem.name for problem in self.problems] + self.n_solvers = len(self.solvers) + self.n_problems = len(self.problems) + else: # Method #1 + if solver_renames is None: + self.solver_names = solver_names + else: + self.solver_names = solver_renames + if problem_renames is None: + self.problem_names = problem_names + else: + self.problem_names = problem_renames + self.n_solvers = len(solver_names) + self.n_problems = len(problem_names) + # Read in fixed solver/problem/model factors from .py file in the experiments folder. + # File should contain three dictionaries of dictionaries called + # - all_solver_fixed_factors + # - all_problem_fixed_factors + # - all_model_fixed_factors + if fixed_factors_filename is None: + self.all_solver_fixed_factors = {solver_name: {} for solver_name in self.solver_names} + self.all_problem_fixed_factors = {problem_name: {} for problem_name in self.problem_names} + self.all_model_fixed_factors = {problem_name: {} for problem_name in self.problem_names} + else: + fixed_factors_filename = "experiments.inputs." + fixed_factors_filename + all_factors = importlib.import_module(fixed_factors_filename) + self.all_solver_fixed_factors = getattr(all_factors, "all_solver_fixed_factors") + self.all_problem_fixed_factors = getattr(all_factors, "all_problem_fixed_factors") + self.all_model_fixed_factors = getattr(all_factors, "all_model_fixed_factors") + # Create all problem-solver pairs (i.e., instances of ProblemSolver class) + self.experiments = [] + for solver_idx in range(self.n_solvers): + solver_experiments = [] + for problem_idx in range(self.n_problems): + try: + # If a file exists, read in ProblemSolver object. + with open(f"./experiments/outputs/{self.solver_names[solver_idx]}_on_{self.problem_names[problem_idx]}.pickle", "rb") as file: + next_experiment = pickle.load(file) + # TODO: Check if the solver/problem/model factors in the file match + # those for the ProblemsSolvers. + except Exception: + # If no file exists, create new ProblemSolver object. + print(f"No experiment file exists for {self.solver_names[solver_idx]} on {self.problem_names[problem_idx]}. Creating new experiment.") + next_experiment = ProblemSolver(solver_name=solver_names[solver_idx], + problem_name=problem_names[problem_idx], + solver_rename=self.solver_names[solver_idx], + problem_rename=self.problem_names[problem_idx], + solver_fixed_factors=self.all_solver_fixed_factors[self.solver_names[solver_idx]], + problem_fixed_factors=self.all_problem_fixed_factors[self.problem_names[problem_idx]], + model_fixed_factors=self.all_model_fixed_factors[self.problem_names[problem_idx]] + ) + solver_experiments.append(next_experiment) + self.experiments.append(solver_experiments) + self.solvers = [self.experiments[idx][0].solver for idx in range(len(self.experiments))] + self.problems = [experiment.problem for experiment in self.experiments[0]] + # Initialize file path. + if file_name_path is None: + solver_names_string = "_".join(self.solver_names) + problem_names_string = "_".join(self.problem_names) + self.file_name_path = f"./experiments/outputs/group_{solver_names_string}_on_{problem_names_string}.pickle" + else: + self.file_name_path = file_name_path + + def check_compatibility(self): + """Check whether all experiments' solvers and problems are compatible. + + Returns + ------- + error_str : str + Error message in the event any problem and solver are incompatible. + """ + error_str = "" + for solver_idx in range(self.n_solvers): + for problem_idx in range(self.n_problems): + new_error_str = self.experiments[solver_idx][problem_idx].check_compatibility() + if new_error_str != "": + error_str += f"For solver {self.solver_names[solver_idx]} and problem {self.problem_names[problem_idx]}... {new_error_str}" + return error_str + + def run(self, n_macroreps): + """Run `n_macroreps` of each solver on each problem. + + Parameters + ---------- + n_macroreps : int + Number of macroreplications of the solver to run on the problem. + """ + for solver_idx in range(self.n_solvers): + for problem_idx in range(self.n_problems): + experiment = self.experiments[solver_idx][problem_idx] + # If the problem-solver pair has not been run in this way before, + # run it now and save result to .pickle file. + if (getattr(experiment, "n_macroreps", None) != n_macroreps): + print(f"Running {n_macroreps} macro-replications of {experiment.solver.name} on {experiment.problem.name}.") + experiment.clear_run() + experiment.run(n_macroreps) + # Save ProblemsSolvers object to .pickle file. + self.record_group_experiment_results() + + def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macroreps=False): + """For each problem-solver pair, run postreplications at solutions + recommended by the solver on each macroreplication. + + Parameters + ---------- + n_postreps : int + Number of postreplications to take at each recommended solution. + crn_across_budget : bool, default=True + True if CRN used for post-replications at solutions recommended at different times, + otherwise False. + crn_across_macroreps : bool, default=False + True if CRN used for post-replications at solutions recommended on different + macroreplications, otherwise False. + """ + for solver_index in range(self.n_solvers): + for problem_index in range(self.n_problems): + experiment = self.experiments[solver_index][problem_index] + # If the problem-solver pair has not been post-replicated in this way before, + # post-process it now. + if (getattr(experiment, "n_postreps", None) != n_postreps + or getattr(experiment, "crn_across_budget", None) != crn_across_budget + or getattr(experiment, "crn_across_macroreps", None) != crn_across_macroreps): + print(f"Post-processing {experiment.solver.name} on {experiment.problem.name}.") + experiment.clear_postreplicate() + experiment.post_replicate(n_postreps, crn_across_budget, crn_across_macroreps) + # Save ProblemsSolvers object to .pickle file. + self.record_group_experiment_results() + + def post_normalize(self, n_postreps_init_opt, crn_across_init_opt=True): + """Construct objective curves and (normalized) progress curves + for all collections of experiments on all given problem. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on a common problem. + n_postreps_init_opt : int + Number of postreplications to take at initial x0 and optimal x*. + crn_across_init_opt : bool, default=True + True if CRN used for post-replications at solutions x0 and x*, + otherwise False. + """ + for problem_idx in range(self.n_problems): + experiments_same_problem = [self.experiments[solver_idx][problem_idx] for solver_idx in range(self.n_solvers)] + post_normalize(experiments=experiments_same_problem, + n_postreps_init_opt=n_postreps_init_opt, + crn_across_init_opt=crn_across_init_opt) + # Save ProblemsSolvers object to .pickle file. + self.record_group_experiment_results() + + def record_group_experiment_results(self): + """Save ``experiment_base.ProblemsSolvers`` object to .pickle file. + """ + with open(self.file_name_path, "wb") as file: + pickle.dump(self, file, pickle.HIGHEST_PROTOCOL) + + +def read_group_experiment_results(file_name_path): + """Read in ``experiment_base.ProblemsSolvers`` object from .pickle file. + + Parameters + ---------- + file_name_path : str + Path of .pickle file for reading ``experiment_base.ProblemsSolvers`` object. + + Returns + ------- + groupexperiment : ``experiment_base.ProblemsSolvers`` + Problem-solver group that has been run or has been post-processed. + """ + with open(file_name_path, "rb") as file: + groupexperiment = pickle.load(file) + return groupexperiment + + +def find_unique_solvers_problems(experiments): + """Identify the unique problems and solvers in a collection + of experiments. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + ProblemSolver pairs of different solvers on different problems. + + Returns + ------- + unique_solvers : list [``base.Solver``] + Unique solvers. + unique_problems : list [``base.Problem``] + Unique problems. + """ + # Set comprehensions do not work because Solver and Problem objects are not + # hashable. + unique_solvers = [] + unique_problems = [] + for experiment in experiments: + if experiment.solver not in unique_solvers: + unique_solvers.append(experiment.solver) + if experiment.problem not in unique_problems: + unique_problems.append(experiment.problem) + return unique_solvers, unique_problems + + +def find_missing_experiments(experiments): + """Identify problem-solver pairs that are not part of a list + of experiments. + + Parameters + ---------- + experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on different problems. + + Returns + ------- + unique_solvers : list [``base.Solver``] + List of solvers present in the list of experiments + unique_problems : list [``base.Problem``] + List of problems present in the list of experiments. + missing : list [tuple [``base.Solver``, ``base.Problem``]] + List of names of missing problem-solver pairs. + """ + pairs = [(experiment.solver, experiment.problem) for experiment in experiments] + unique_solvers, unique_problems = find_unique_solvers_problems(experiments) + missing = [] + for solver in unique_solvers: + for problem in unique_problems: + if (solver, problem) not in pairs: + missing.append((solver, problem)) + return unique_solvers, unique_problems, missing + + +def make_full_metaexperiment(existing_experiments, unique_solvers, unique_problems, missing_experiments): + """Create experiment objects for missing problem-solver pairs + and run them. + + Parameters + ---------- + existing_experiments : list [``experiment_base.ProblemSolver``] + Problem-solver pairs of different solvers on different problems. + unique_solvers : list [``base.Solver objects``] + List of solvers present in the list of experiments. + unique_problems : list [``base.Problem``] + List of problems present in the list of experiments. + missing_experiments : list [tuple [``base.Solver``, ``base.Problem``]] + List of missing problem-solver pairs. + + Returns + ------- + metaexperiment : ``experiment_base.ProblemsSolvers`` + New ProblemsSolvers object. + """ + # Ordering of solvers and problems in unique_solvers and unique_problems + # is used to construct experiments. + full_experiments = [[[] for _ in range(len(unique_problems))] for _ in range(len(unique_solvers))] + for experiment in existing_experiments: + solver_idx = unique_solvers.index(experiment.solver) + problem_idx = unique_problems.index(experiment.problem) + full_experiments[solver_idx][problem_idx] = experiment + for pair in missing_experiments: + solver_idx = unique_solvers.index(pair[0]) + problem_idx = unique_problems.index(pair[1]) + full_experiments[solver_idx][problem_idx] = ProblemSolver(solver=pair[0], problem=pair[1]) + metaexperiment = ProblemsSolvers(experiments=full_experiments) + return metaexperiment From b8fd8a4e7d04988fc2ad404b4f6380bdb3ad88dc Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:07:10 -0400 Subject: [PATCH 03/21] Add files via upload --- simopt/models/random_smf.py | 618 ++++++++++++++++++++++++++++++++++++ 1 file changed, 618 insertions(+) create mode 100644 simopt/models/random_smf.py diff --git a/simopt/models/random_smf.py b/simopt/models/random_smf.py new file mode 100644 index 000000000..7aef24bf1 --- /dev/null +++ b/simopt/models/random_smf.py @@ -0,0 +1,618 @@ +""" +Summary +------- +Simulate duration of a stochastic Max-Flow network (SMF). This is a version +that be able to generate random problem instances (with random model and +problem factors). +A detailed description of the model/problem can be found +`here `_. +""" + +import numpy as np +from ortools.graph.python import max_flow +from ..base import Model, Problem + + +class SMF(Model): + """ + A model that simulates a stochastic Max-Flow problem with + capacities deducted with multivariate distributed noise distributed durations + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None, random = False): + if fixed_factors is None: + fixed_factors = {} + self.name = "SMF" + self.n_rngs = 1 + self.n_random = 3 + self.random = random + self.n_responses = 1 + cov_fac = np.zeros((20, 20)) + np.fill_diagonal(cov_fac, 4) + cov_fac = cov_fac.tolist() + self.specifications = { + "num_nodes": { + "description": "number of nodes, 0 being the source, highest being the sink", + "datatype": int, + "default": 10 + }, + "source_index": { + "description": "source node index", + "datatype": int, + "default": 0 + }, + "sink_index": { + "description": "sink node index", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 4), (4, 2), (3, 2), (2, 5), (4, 5), (3, 6), (3, 7), (6, 2), (6, 5), (6, 7), (5, 8), (6, 8), (6, 9), (7, 9), (8, 9)] + }, + "num_arcs": { + "description": "number of arcs to be generated", + "datatype": int, + "default": 20 + }, + "assigned_capacities": { + "description": "Assigned capacity of each arc", + "datatype": list, + "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + # "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + }, + "mean_noise": { + "description": "The mean noise in reduction of arc capacities", + "datatype": list, + "default": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "cov_noise": { + "description": "Covariance matrix of noise", + "datatype": list, + "default": cov_fac + } + + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "assigned_capacities": self.check_capacities, + "mean_noise": self.check_mean, + "cov_noise": self.check_cov, + "source_index": self.check_s, + "sink_index": self.check_t, + "num_arcs": self.check_num_arcs, + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check source is connected to the sink. + graph = {node: set() for node in range(0, self.factors["num_nodes"])} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, self.factors["source_index"]) + if self.factors["source_index"] in visited and self.factors["sink_index"] in visited: + return True + return False + + def check_capacities(self): + positive = True + for x in list(self.factors["assigned_capacities"]): + positive = positive & (x > 0) + return (len(self.factors["assigned_capacities"]) == len(self.factors["arcs"])) & positive + + def check_mean(self): + return len(self.factors["mean_noise"]) == len(self.factors["arcs"]) + + def check_cov(self): + return np.array(self.factors["cov_noise"]).shape == (len(self.factors["arcs"]), len(self.factors["arcs"])) + + def check_s(self): + return self.factors["source_index"] >= 0 and self.factors["source_index"] <= self.factors["num_nodes"] + + def check_t(self): + return self.factors["sink_index"] >= 0 and self.factors["sink_index"] <= self.factors["num_nodes"] + + def check_num_arcs(self): + return self.factors["num_arcs"] > 0 + + + def get_arcs(self, num_nodes, num_arcs, source, end, uni_rng): + # Generate a random graph + self.rand_fuc = True + + set_arcs = [] + for n1 in range(0, num_nodes - 1): + for n2 in range(n1 + 1, num_nodes): + set_arcs.append((n1, n2)) + + arcs = [(source, source + 1), (end - 1, end)] + remove = [] + def get_in(arcs, num_nodes, ind, in_ind=True): + global remove + if len(arcs) <= 0: + return False + graph = {node: set() for node in range(0, num_nodes)} + for a in arcs: + if in_ind == True: + graph[a[0]].add(a[1]) + else: + graph[a[1]].add(a[0]) + set0 = graph[ind] + for i in graph[ind]: + set0 = {*set0, *graph[i]} + for j in graph[i]: + set0 = {*set0, *graph[j]} + + if in_ind == True: + for j in set0 - graph[ind]: + if j in graph[ind]: + remove.append((ind, j)) + + set0 = {*set0, ind} + return set0 + + set0 = get_in(arcs, num_nodes, source) + for i in range(source + 1, end): + set0 = get_in(arcs, num_nodes, source) + if i not in set0: + set1 = list(get_in(arcs, num_nodes, i, False)) + n2 = set1[uni_rng.randint(0, len(set1)-1)] + set2 = [i for i in set0 if i < n2] + n1 = list(set2)[uni_rng.randint(0, len(set2)-1)] + + arc = (n1, n2) + arcs = {*arcs, arc} + + for i in range(1, num_nodes - 1): + set9 = get_in(arcs, num_nodes, i) + if end not in set9: + set_out = list(get_in(arcs, num_nodes, end, False)) + n1 = list(set9)[uni_rng.randint(0, len(set9)-1)] + set2 = [i for i in set_out if i > n1] + n2 = set2[uni_rng.randint(0, len(set2)-1)] + arc = (n1, n2) + arcs = {*arcs, arc} + + if len(arcs) < num_arcs: + remain_num = num_arcs - len(arcs) + remain = list(set(set_arcs) - set(arcs)) + idx = uni_rng.sample(range(0, len(remain)), remain_num) + aa = set([remain[i] for i in idx]) + arcs = {*arcs, *aa} + + else: + return list(arcs) + + return list(arcs) + + def get_covariance(self, num_arcs, cov_rng): + # Generate random covariance matrix + self.rand_fuc = True + random_values = [cov_rng.uniform(0, 1) for i in range(num_arcs*num_arcs)] + random_values = np.array(random_values).reshape((num_arcs, num_arcs)) + covariance_matrix = np.cov(random_values, rowvar=False) + 1 * np.eye(num_arcs) + + return covariance_matrix.tolist() + + def attach_rng(self, random_rng): + self.random_rng = random_rng + self.rand_fuc = False + + self.factors["sink_index"] = self.factors["num_nodes"] - 1 + arcs_set = self.get_arcs(self.factors["num_nodes"], self.factors["num_arcs"], self.factors["source_index"], self.factors["sink_index"], random_rng[0]) + arcs_set.sort(key=lambda a: a[1]) + arcs_set.sort(key=lambda a: a[0]) + self.factors["arcs"] = arcs_set + print('arcs: ', arcs_set) + self.factors["num_arcs"] = len(self.factors["arcs"]) + + self.factors["mean_noise"] = [0 for i in range(len(self.factors["arcs"]))] + + self.factors["cov_noise"] = self.get_covariance(self.factors["num_arcs"], random_rng[1]) + + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + # Designate separate random number generators. + solver = max_flow.SimpleMaxFlow() + exp_rng = rng_list[0] + + # From input graph generate start end end nodes. + start_nodes = [] + end_nodes = [] + for i, j in self.factors["arcs"]: + start_nodes.append(i) + end_nodes.append(j) + # Generate actual capacity. + for i in range(len(self.factors["arcs"])): + noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + capacities = [] + for i in range(len(noise)): + capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + # Add arcs in bulk. + solver.add_arcs_with_capacity(start_nodes, end_nodes, capacities) + status = solver.solve(self.factors["source_index"], self.factors["sink_index"]) + if status != solver.OPTIMAL: + print('There was an issue with the max flow input.') + print(f'Status: {status}') + exit(1) + + # Construct gradient vector (=1 if has a outflow from min-cut nodes). + gradient = np.zeros(len(self.factors["arcs"])) + grad_arclist = [] + min_cut_nodes = solver.get_source_side_min_cut() + for i in min_cut_nodes: + for j in range(self.factors['num_nodes']): + if j not in min_cut_nodes: + grad_arc = (i, j) + if (i, j) in self.factors['arcs']: + grad_arclist.append(grad_arc) + for arc in grad_arclist: + gradient[self.factors['arcs'].index(arc)] = 1 + + responses = {"Max Flow": solver.optimal_flow() / 1000} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["Max Flow"]["assigned_capacities"] = gradient + return responses, gradients + + +""" +Summary +------- +Maximize the expected max flow from the source node s to the sink node t. +""" + + +class SMF_Max(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SMF-1", fixed_factors=None, model_fixed_factors=None, random=False, random_rng=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1, ) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"assigned_capacities"} + self.factors = fixed_factors + self.random = random + self.n_rngs = 1 + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (1, ) * 20 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 10000 + }, + "cap": { + "description": "total set-capacity to be allocated to arcs.", + "datatype": int, + "default": 100 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "cap": self.check_cap + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults and the random status. + self.model = SMF(self.model_fixed_factors, random) + if random and random_rng != None: + self.model.attach_rng(random_rng) + if self.model.rand_fuc == False: + print("Error: No random generator exists.") + return False + # self.dim = len(self.model.factors["arcs"]) + self.dim = self.model.factors["num_arcs"] + self.lower_bounds = (0, ) * self.dim + self.upper_bounds = (np.inf, ) * self.dim + self.factors["initial_solution"] = (1,) * self.dim + self.Ci = np.ones(self.dim) + self.Ce = None + self.di = np.array([self.factors["cap"]]) + self.de = None + + def check_cap(self): + return self.factors["cap"] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "assigned_capacities": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["assigned_capacities"]) + return vector + + def random_budget(self, uni_rng): + # Choose a random budget + l = [300, 400, 500, 600, 700, 800, 900, 1000] + budget = uni_rng.choice(l) * self.dim + + return budget + + def attach_rngs(self, random_rng): + # Attach rng for problem class and generate random problem factors for random instances + self.random_rng = random_rng + + # For random version, randomize problem factors + if self.random: + self.factors["budget"] = self.random_budget(random_rng[0]) + print('budget: ', self.factors["budget"]) + + return random_rng + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["Max Flow"], ) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0, ) + det_objectives_gradients = ((0, ) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return sum(self.factors["assigned_capacities"]) <= self.factors["cap"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + x = rand_sol_rng.continuous_random_vector_from_simplex(len(self.model.factors["arcs"]), self.factors["cap"], False) + return x From a00804c5f38430c12d435dcdac5a616265dcea19 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:12:07 -0400 Subject: [PATCH 04/21] Delete base.py --- base.py | 989 -------------------------------------------------------- 1 file changed, 989 deletions(-) delete mode 100644 base.py diff --git a/base.py b/base.py deleted file mode 100644 index e2e8b47d0..000000000 --- a/base.py +++ /dev/null @@ -1,989 +0,0 @@ -#!/usr/bin/env python -""" -Summary -------- -Provide base classes for solvers, problems, and models. -This is the modified version to generate and run random model/random problem instance. -""" - -import numpy as np -from copy import deepcopy -import sys -import os.path as o -# from mrg32k3a.mrg32k3a import MRG32k3a -from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local - -from simopt.auto_diff_util import bi_dict, replicate_wrapper - -sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) - - -class Solver(object): - """Base class to implement simulation-optimization solvers. - - Attributes - ---------- - name : str - Name of solver. - objective_type : str - Description of objective types: "single" or "multi". - constraint_type : str - Description of constraints types: "unconstrained", "box", "deterministic", "stochastic". - variable_type : str - Description of variable types: "discrete", "continuous", "mixed". - gradient_needed : bool - True if gradient of objective function is needed, otherwise False. - factors : dict - Changeable factors (i.e., parameters) of the solver. - specifications : dict - Details of each factor (for GUI, data validation, and defaults). - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of RNGs used for the solver's internal purposes. - solution_progenitor_rngs : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of RNGs used as a baseline for simulating solutions. - - Parameters - ---------- - fixed_factors : dict - Dictionary of user-specified solver factors. - """ - def __init__(self, fixed_factors): - # Set factors of the solver. - # Fill in missing factors with default values. - self.factors = fixed_factors - for key in self.specifications: - if key not in fixed_factors: - self.factors[key] = self.specifications[key]["default"] - - def __eq__(self, other): - """Check if two solvers are equivalent. - - Parameters - ---------- - other : ``base.Solver`` - Other Solver object to compare to self. - - Returns - ------- - bool - True if the two solvers are equivalent, otherwise False. - """ - if type(self) == type(other): - if self.factors == other.factors: - return True - else: - # print("Solver factors do not match.") - return False - else: - # print("Solver types do not match.") - return False - - def attach_rngs(self, rng_list): - """Attach a list of random-number generators to the solver. - - Parameters - ---------- - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of random-number generators used for the solver's internal purposes. - """ - self.rng_list = rng_list - - def solve(self, problem): - """Run a single macroreplication of a solver on a problem. - - Notes - ----- - Each subclass of ``base.Solver`` has its own custom ``solve`` method. - - Parameters - ---------- - problem : ``base.Problem`` - Simulation-optimization problem to solve. - - Returns - ------- - recommended_solns : list [``Solution``] - List of solutions recommended throughout the budget. - intermediate_budgets : list [int] - List of intermediate budgets when recommended solutions changes. - """ - raise NotImplementedError - - def check_crn_across_solns(self): - """Check solver factor crn_across_solns. - - Notes - ----- - Currently implemented to always return True. This factor must be a bool. - """ - return True - - def check_solver_factor(self, factor_name): - """Determine if the setting of a solver factor is permissible. - - Parameters - ---------- - factor_name : str - Name of factor for dictionary lookup (i.e., key). - - Returns - ------- - is_permissible : bool - True if the solver factor is permissible, otherwise False. - """ - is_permissible = True - is_permissible *= self.check_factor_datatype(factor_name) - is_permissible *= self.check_factor_list[factor_name]() - return is_permissible - # raise NotImplementedError - - def check_solver_factors(self): - """Determine if the joint settings of solver factors are permissible. - - Notes - ----- - Each subclass of ``base.Solver`` has its own custom ``check_solver_factors`` method. - - Returns - ------- - is_simulatable : bool - True if the solver factors are permissible, otherwise False. - """ - return True - # raise NotImplementedError - - def check_factor_datatype(self, factor_name): - """Determine if a factor's data type matches its specification. - - Parameters - ---------- - factor_name : str - String corresponding to name of factor to check. - - Returns - ------- - is_right_type : bool - True if factor is of specified data type, otherwise False. - """ - is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) - return is_right_type - - def create_new_solution(self, x, problem): - """Create a new solution object with attached RNGs primed - to simulate replications. - - Parameters - ---------- - x : tuple - Vector of decision variables. - problem : ``base.Problem`` - Problem being solved by the solvers. - - Returns - ------- - new_solution : ``base.Solution`` - New solution. - """ - # Create new solution with attached rngs. - new_solution = Solution(x, problem) - new_solution.attach_rngs(rng_list=self.solution_progenitor_rngs, copy=True) - # Manipulate progenitor rngs to prepare for next new solution. - if not self.factors["crn_across_solns"]: # If CRN are not used ... - # ...advance each rng to start of the substream = current substream + # of model RNGs. - for rng in self.solution_progenitor_rngs: - for _ in range(problem.model.n_rngs): - rng.advance_substream() - return new_solution - - def rebase(self, n_reps): - """Rebase the progenitor rngs to start at a later subsubstream index. - - Parameters - ---------- - n_reps : int - Substream index to skip to. - """ - new_rngs = [] - for rng in self.solution_progenitor_rngs: - stream_index = rng.s_ss_sss_index[0] - substream_index = rng.s_ss_sss_index[1] - new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps])) - self.solution_progenitor_rngs = new_rngs - - -class Problem(object): - """Base class to implement simulation-optimization problems. - - Attributes - ---------- - name : str - Name of problem. - dim : int - Number of decision variables. - n_objectives : int - Number of objectives. - n_stochastic_constraints : int - Number of stochastic constraints. - minmax : tuple [int] - Indicators of maximization (+1) or minimization (-1) for each objective. - constraint_type : str - Description of constraints types: "unconstrained", "box", "deterministic", "stochastic". - variable_type : str - Description of variable types: "discrete", "continuous", "mixed". - lower_bounds : tuple - Lower bound for each decision variable. - upper_bounds : tuple - Upper bound for each decision variable. - gradient_available : bool - True if direct gradient of objective function is available, otherwise False. - optimal_value : float - Optimal objective function value. - optimal_solution : tuple - Optimal solution. - model : ``base.Model`` - Associated simulation model that generates replications. - model_default_factors : dict - Default values for overriding model-level default factors. - model_fixed_factors : dict - Combination of overriden model-level factors and defaults. - model_decision_factors : set [str] - Set of keys for factors that are decision variables. - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of RNGs used to generate a random initial solution - or a random problem instance. - factors : dict - Changeable factors of the problem: - initial_solution : tuple - Default initial solution from which solvers start. - budget : int - Max number of replications (fn evals) for a solver to take. - specifications : dict - Details of each factor (for GUI, data validation, and defaults). - - Parameters - ---------- - fixed_factors : dict - Dictionary of user-specified problem factors. - model_fixed_factors : dict - Subset of user-specified non-decision factors to pass through to the model. - """ - def __init__(self, fixed_factors, model_fixed_factors): - # Set factors of the problem. - # Fill in missing factors with default values. - self.factors = fixed_factors - for key in self.specifications: - if key not in fixed_factors: - self.factors[key] = self.specifications[key]["default"] - # Set subset of factors of the simulation model. - # Fill in missing model factors with problem-level default values. - for key in self.model_default_factors: - if key not in model_fixed_factors: - model_fixed_factors[key] = self.model_default_factors[key] - self.model_fixed_factors = model_fixed_factors - # super().__init__() - - def __eq__(self, other): - """Check if two problems are equivalent. - - Parameters - ---------- - other : ``base.Problem`` - Other ``base.Problem`` objects to compare to self. - - Returns - ------- - bool - True if the two problems are equivalent, otherwise False. - """ - if type(self) == type(other): - if self.factors == other.factors: - # Check if non-decision-variable factors of models are the same. - non_decision_factors = set(self.model.factors.keys()) - self.model_decision_factors - for factor in non_decision_factors: - if self.model.factors[factor] != other.model.factors[factor]: - # print("Model factors do not match") - return False - return True - else: - # print("Problem factors do not match.") - return False - else: - # print("Problem types do not match.") - return False - - def check_initial_solution(self): - """Check if initial solution is feasible and of correct dimension. - - Returns - ------- - bool - True if initial solution is feasible and of correct dimension, otherwise False. - """ - if len(self.factors["initial_solution"]) != self.dim: - return False - elif not self.check_deterministic_constraints(x=self.factors["initial_solution"]): - return False - else: - return True - - def check_budget(self): - """Check if budget is strictly positive. - - Returns - ------- - bool - True if budget is strictly positive, otherwise False. - """ - return self.factors["budget"] > 0 - - def check_problem_factor(self, factor_name): - """Determine if the setting of a problem factor is permissible. - - Parameters - ---------- - factor_name : str - Name of factor for dictionary lookup (i.e., key). - - Returns - ------- - is_permissible : bool - True if problem factor is permissible, otherwise False. - """ - is_permissible = True - is_permissible *= self.check_factor_datatype(factor_name) - is_permissible *= self.check_factor_list[factor_name]() - return is_permissible - # raise NotImplementedError - - def check_problem_factors(self): - """Determine if the joint settings of problem factors are permissible. - - Notes - ----- - Each subclass of ``base.Problem`` has its own custom ``check_problem_factors`` method. - - Returns - ------- - is_simulatable : bool - True if problem factors are permissible, otherwise False. - """ - return True - # raise NotImplementedError - - def check_factor_datatype(self, factor_name): - """Determine if a factor's data type matches its specification. - - Parameters - ---------- - factor_name : str - String corresponding to name of factor to check. - - Returns - ------- - is_right_type : bool - True if factor is of specified data type, otherwise False. - """ - is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) - return is_right_type - - def attach_rngs(self, random_rng, copy=True): - """Attach a list of random-number generators to the problem. - - Parameters - ---------- - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of random-number generators used to generate a random initial solution - or a random problem instance. - """ - if copy: - self.random_rng = [deepcopy(rng) for rng in random_rng] - else: - self.random_rng = random_rng - - def rebase(self, n_reps): - """Rebase the progenitor rngs to start at a later subsubstream index. - - Parameters - ---------- - n_reps : int - Substream index to skip to. - """ - new_rngs = [] - for rng in self.random_rng: - stream_index = rng.s_ss_sss_index[0] - substream_index = rng.s_ss_sss_index[1] - new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps])) - self.random_rng = new_rngs - - def vector_to_factor_dict(self, vector): - """ - Convert a vector of variables to a dictionary with factor keys. - - Notes - ----- - Each subclass of ``base.Problem`` has its own custom ``vector_to_factor_dict`` method. - - Parameters - ---------- - vector : tuple - Vector of values associated with decision variables. - - Returns - ------- - factor_dict : dict - Dictionary with factor keys and associated values. - """ - raise NotImplementedError - - def factor_dict_to_vector(self, factor_dict): - """Convert a dictionary with factor keys to a vector - of variables. - - Notes - ----- - Each subclass of ``base.Problem`` has its own custom ``factor_dict_to_vector`` method. - - Parameters - ---------- - factor_dict : dict - Dictionary with factor keys and associated values. - - Returns - ------- - vector : tuple - Vector of values associated with decision variables. - """ - raise NotImplementedError - - def factor_dict_to_vector_gradients(self, factor_dict): - """Convert a dictionary with factor keys to a gradient vector. - - Notes - ----- - A subclass of ``base.Problem`` can have its own custom - ``factor_dict_to_vector_gradients`` method if the - objective is deterministic. - - Parameters - ---------- - factor_dict : dict - Dictionary with factor keys and associated values. - - Returns - ------- - vector : tuple - Vector of partial derivatives associated with decision variables. - """ - return self.factor_dict_to_vector(factor_dict) - - def response_dict_to_objectives(self, response_dict): - """Convert a dictionary with response keys to a vector - of objectives. - - Notes - ----- - Each subclass of ``base.Problem`` has its own custom ``response_dict_to_objectives`` method. - - Parameters - ---------- - response_dict : dict - Dictionary with response keys and associated values. - - Returns - ------- - objectives : tuple - Vector of objectives. - """ - raise NotImplementedError - - def response_dict_to_objectives_gradients(self, response_dict): - """Convert a dictionary with response keys to a vector - of gradients. - - Notes - ----- - A subclass of ``base.Problem`` can have its own custom - ``response_dict_to_objectives_gradients`` method if the - objective is deterministic. - - Parameters - ---------- - response_dict : dict - Dictionary with response keys and associated values. - - Returns - ------- - vector : tuple - Vector of gradients. - """ - return self.response_dict_to_objectives(response_dict) - - def response_dict_to_stoch_constraints(self, response_dict): - """Convert a dictionary with response keys to a vector - of left-hand sides of stochastic constraints: E[Y] <= 0. - - Notes - ----- - Each subclass of ``base.Problem`` has its own custom ``response_dict_to_stoch_constraints`` method. - - Parameters - ---------- - response_dict : dict - Dictionary with response keys and associated values. - - Returns - ------- - stoch_constraints : tuple - Vector of LHSs of stochastic constraints. - """ - stoch_constraints = () - return stoch_constraints - - def deterministic_objectives_and_gradients(self, x): - """Compute deterministic components of objectives for a solution `x`. - - Parameters - ---------- - x : tuple - Vector of decision variables. - - Returns - ------- - det_objectives : tuple - Vector of deterministic components of objectives. - det_objectives_gradients : tuple - Vector of gradients of deterministic components of objectives. - """ - det_objectives = (0,) * self.n_objectives - det_objectives_gradients = tuple([(0,) * self.dim for _ in range(self.n_objectives)]) - return det_objectives, det_objectives_gradients - - def deterministic_stochastic_constraints_and_gradients(self, x): - """Compute deterministic components of stochastic constraints - for a solution `x`. - - Parameters - ---------- - x : tuple - Vector of decision variables. - - Returns - ------- - det_stoch_constraints : tuple - Vector of deterministic components of stochastic - constraints. - det_stoch_constraints_gradients : tuple - Vector of gradients of deterministic components of - stochastic constraints. - """ - det_stoch_constraints = (0,) * self.n_stochastic_constraints - det_stoch_constraints_gradients = tuple([(0,) * self.dim for _ in range(self.n_stochastic_constraints)]) - return det_stoch_constraints, det_stoch_constraints_gradients - - def check_deterministic_constraints(self, x): - """Check if a solution `x` satisfies the problem's deterministic - constraints. - - Parameters - ---------- - x : tuple - Vector of decision variables. - - Returns - ------- - satisfies : bool - True if solution `x` satisfies the deterministic constraints, - otherwise False. - """ - # Check box constraints. - return bool(np.prod([self.lower_bounds[idx] <= x[idx] <= self.upper_bounds[idx] for idx in range(len(x))])) - - def get_random_solution(self, rand_sol_rng): - """Generate a random solution for starting or restarting solvers. - - Parameters - ---------- - rand_sol_rng : ``mrg32k3a.mrg32k3a.MRG32k3a`` - Random-number generator used to sample a new random solution. - - Returns - ------- - x : tuple - vector of decision variables - """ - pass - - def simulate(self, solution, m=1): - """Simulate `m` i.i.d. replications at solution `x`. - - Notes - ----- - Gradients of objective function and stochastic constraint LHSs - are temporarily commented out. Under development. - - Parameters - ---------- - solution : ``base.Solution`` - Solution to evalaute. - m : int - Number of replications to simulate at `x`. - """ - if m < 1: - print('--* Error: Number of replications must be at least 1. ') - print('--* Aborting. ') - else: - # Pad numpy arrays if necessary. - if solution.n_reps + m > solution.storage_size: - solution.pad_storage(m) - # Set the decision factors of the model. - self.model.factors.update(solution.decision_factors) - for _ in range(m): - # Generate one replication at x. - responses, gradients = self.model.replicate(solution.rng_list) - # Convert gradient subdictionaries to vectors mapping to decision variables. - if self.gradient_available: - vector_gradients = {keys: self.factor_dict_to_vector_gradients(gradient_dict) for (keys, gradient_dict) in gradients.items()} - # vector_gradients = {keys: self.factor_dict_to_vector(gradient_dict) for (keys, gradient_dict) in gradients.items()} - # Convert responses and gradients to objectives and gradients and add - # to those of deterministic components of objectives. - solution.objectives[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_objectives(responses), solution.det_objectives)] - if self.gradient_available: - solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives_gradients(vector_gradients), solution.det_objectives_gradients)] - # solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives(vector_gradients), solution.det_objectives_gradients)] - if self.n_stochastic_constraints > 0: - # Convert responses and gradients to stochastic constraints and gradients and add - # to those of deterministic components of stochastic constraints. - solution.stoch_constraints[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_stoch_constraints(responses), solution.det_stoch_constraints)] - # solution.stoch_constraints_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_stoch_cons, det_stoch_cons)] for stoch_stoch_cons, det_stoch_cons in zip(self.response_dict_to_stoch_constraints(vector_gradients), solution.det_stoch_constraints_gradients)] - # Increment counter. - solution.n_reps += 1 - # Advance rngs to start of next subsubstream. - for rng in solution.rng_list: - rng.advance_subsubstream() - # Update summary statistics. - solution.recompute_summary_statistics() - - def simulate_up_to(self, solutions, n_reps): - """Simulate a set of solutions up to a given number of replications. - - Parameters - ---------- - solutions : set [``base.Solution``] - A set of ``base.Solution`` objects. - n_reps : int - Common number of replications to simulate each solution up to. - """ - for solution in solutions: - # If more replications needed, take them. - if solution.n_reps < n_reps: - n_reps_to_take = n_reps - solution.n_reps - self.simulate(solution=solution, m=n_reps_to_take) - - -class Model(object): - """Base class to implement simulation models (models) featured in - simulation-optimization problems. - - Attributes - ---------- - name : str - Name of model. - n_rngs : int - Number of random-number generators used to run a simulation replication. - n_responses : int - Number of responses (performance measures). - factors : dict - Changeable factors of the simulation model. - specifications : dict - Details of each factor (for GUI, data validation, and defaults). - check_factor_list : dict - Switch case for checking factor simulatability. - - Parameters - ---------- - fixed_factors : dict - Dictionary of user-specified model factors. - """ - def __init__(self, fixed_factors): - # Set factors of the simulation model. - # Fill in missing factors with default values. - self.factors = fixed_factors - for key in self.specifications: - if key not in fixed_factors: - self.factors[key] = self.specifications[key]["default"] - - def __eq__(self, other): - """Check if two models are equivalent. - - Parameters - ---------- - other : ``base.Model`` - Other ``base.Model`` object to compare to self. - - Returns - ------- - bool - True if the two models are equivalent, otherwise False. - """ - if type(self) == type(other): - if self.factors == other.factors: - return True - else: - # print("Model factors do not match.") - return False - else: - # print("Model types do not match.") - return False - - def check_simulatable_factor(self, factor_name): - """Determine if a simulation replication can be run with the given factor. - - Parameters - ---------- - factor_name : str - Name of factor for dictionary lookup (i.e., key). - - Returns - ------- - is_simulatable : bool - True if model specified by factors is simulatable, otherwise False. - """ - is_simulatable = True - is_simulatable *= self.check_factor_datatype(factor_name) - is_simulatable *= self.check_factor_list[factor_name]() - return is_simulatable - # raise NotImplementedError - - def check_simulatable_factors(self): - """Determine if a simulation replication can be run with the given factors. - - Notes - ----- - Each subclass of ``base.Model`` has its own custom ``check_simulatable_factors`` method. - - Returns - ------- - is_simulatable : bool - True if model specified by factors is simulatable, otherwise False. - """ - return True - # raise NotImplementedError - - def check_factor_datatype(self, factor_name): - """Determine if a factor's data type matches its specification. - - Returns - ------- - is_right_type : bool - True if factor is of specified data type, otherwise False. - """ - is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) - return is_right_type - - def attach_rng(self, random_rng, copy=True): - """Attach a list of random-number generators to the problem. - - Parameters - ---------- - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of random-number generators used to generate a random initial solution - or a random problem instance. - """ - # self.random_rng = random_rng - if copy: - self.random_rng = [deepcopy(rng) for rng in random_rng] - else: - self.random_rng = random_rng - - def replicate(self, rng_list): - """Simulate a single replication for the current model factors. - - Parameters - ---------- - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - RNGs for model to use when simulating a replication. - - Returns - ------- - responses : dict - Performance measures of interest. - gradients : dict [dict] - Gradient estimate for each response. - """ - raise NotImplementedError - - -class Auto_Model(Model): - """ - Subclass of Model. - """ - def __init__(self, fixed_factors): - # set factors of the simulation model - # fill in missing factors with default values - super(Auto_Model, self).__init__(fixed_factors) - self.differentiable_factor_names = [] - for key in self.specifications: - if self.specifications[key]["datatype"] == float: - self.differentiable_factor_names.append(key) - self.bi_dict = bi_dict(self.response_names) - - def innner_replicate(self, rng_list): - raise NotImplementedError - - def replicate(self, rng_list, **kwargs): - return replicate_wrapper(self, rng_list, **kwargs) - - -class Solution(object): - """Base class for solutions represented as vectors of decision variables - and dictionaries of decision factors. - - Attributes - ---------- - x : tuple - Vector of decision variables. - dim : int - Number of decision variables describing `x`. - decision_factors : dict - Decision factor names and values. - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - RNGs for model to use when running replications at the solution. - n_reps : int - Number of replications run at the solution. - det_objectives : tuple - Deterministic components added to objectives. - det_objectives_gradients : tuple [tuple] - Gradients of deterministic components added to objectives; - # objectives x dimension. - det_stoch_constraints : tuple - Deterministic components added to LHS of stochastic constraints. - det_stoch_constraints_gradients : tuple [tuple] - Gradients of deterministics components added to LHS stochastic constraints; - # stochastic constraints x dimension. - storage_size : int - Max number of replications that can be recorded in current storage. - objectives : numpy array - Objective(s) estimates from each replication; - # replications x # objectives. - objectives_gradients : numpy array - Gradient estimates of objective(s) from each replication; - # replications x # objectives x dimension. - stochastic_constraints : numpy array - Stochastic constraint estimates from each replication; - # replications x # stochastic constraints. - stochastic_constraints_gradients : numpy array - Gradient estimates of stochastic constraints from each replication; - # replications x # stochastic constraints x dimension. - - - Parameters - ---------- - x : tuple - Vector of decision variables. - problem : ``base.Problem`` - Problem to which `x` is a solution. - """ - def __init__(self, x, problem): - super().__init__() - self.x = x - if isinstance(x, int) or isinstance(x, float): - self.dim = 1 - else: - self.dim = len(x) - self.decision_factors = problem.vector_to_factor_dict(x) - self.n_reps = 0 - self.det_objectives, self.det_objectives_gradients = problem.deterministic_objectives_and_gradients(self.x) - self.det_stoch_constraints, self.det_stoch_constraints_gradients = problem.deterministic_stochastic_constraints_and_gradients(self.x) - init_size = 100 # Initialize numpy arrays to store up to 100 replications. - self.storage_size = init_size - # Raw data. - self.objectives = np.zeros((init_size, problem.n_objectives)) - self.objectives_gradients = np.zeros((init_size, problem.n_objectives, problem.dim)) - if problem.n_stochastic_constraints > 0: - self.stoch_constraints = np.zeros((init_size, problem.n_stochastic_constraints)) - self.stoch_constraints_gradients = np.zeros((init_size, problem.n_stochastic_constraints, problem.dim)) - else: - self.stoch_constraints = None - self.stoch_constraints_gradients = None - # Summary statistics - # self.objectives_mean = np.full((problem.n_objectives), np.nan) - # self.objectives_var = np.full((problem.n_objectives), np.nan) - # self.objectives_stderr = np.full((problem.n_objectives), np.nan) - # self.objectives_cov = np.full((problem.n_objectives, problem.n_objectives), np.nan) - # self.objectives_gradients_mean = np.full((problem.n_objectives, problem.dim), np.nan) - # self.objectives_gradients_var = np.full((problem.n_objectives, problem.dim), np.nan) - # self.objectives_gradients_stderr = np.full((problem.n_objectives, problem.dim), np.nan) - # self.objectives_gradients_cov = np.full((problem.n_objectives, problem.dim, problem.dim), np.nan) - # self.stoch_constraints_mean = np.full((problem.n_stochastic_constraints), np.nan) - # self.stoch_constraints_var = np.full((problem.n_stochastic_constraints), np.nan) - # self.stoch_constraints_stderr = np.full((problem.n_stochastic_constraints), np.nan) - # self.stoch_constraints_cov = np.full((problem.n_stochastic_constraints, problem.n_stochastic_constraints), np.nan) - # self.stoch_constraints_gradients_mean = np.full((problem.n_stochastic_constraints, problem.dim), np.nan) - # self.stoch_constraints_gradients_var = np.full((problem.n_stochastic_constraints, problem.dim), np.nan) - # self.stoch_constraints_gradients_stderr = np.full((problem.n_stochastic_constraints, problem.dim), np.nan) - # self.stoch_constraints_gradients_cov = np.full((problem.n_stochastic_constraints, problem.dim, problem.dim), np.nan) - - def attach_rngs(self, rng_list, copy=True): - """Attach a list of random-number generators to the solution. - - Parameters - ---------- - rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] - List of random-number generators used to run simulation replications. - copy : bool, default=True - True if we want to copy the ``mrg32k3a.mrg32k3a.MRG32k3a`` objects, otherwise False. - """ - if copy: - self.rng_list = [deepcopy(rng) for rng in rng_list] - else: - self.rng_list = rng_list - - def pad_storage(self, m): - """Append zeros to numpy arrays for summary statistics. - - Parameters - ---------- - m : int - Number of replications to simulate. - """ - # Size of data storage. - n_objectives = len(self.det_objectives) - base_pad_size = 100 - # Default is to append space for 100 more replications. - # If more space needed, append in multiples of 100. - pad_size = int(np.ceil(m / base_pad_size)) * base_pad_size - self.storage_size += pad_size - self.objectives = np.concatenate((self.objectives, np.zeros((pad_size, n_objectives)))) - self.objectives_gradients = np.concatenate((self.objectives_gradients, np.zeros((pad_size, n_objectives, self.dim)))) - if self.stoch_constraints is not None: - n_stochastic_constraints = len(self.det_stoch_constraints) - self.stoch_constraints = np.concatenate((self.stoch_constraints, np.zeros((pad_size, n_stochastic_constraints)))) - self.stoch_constraints_gradients = np.concatenate((self.stoch_constraints_gradients, np.zeros((pad_size, n_stochastic_constraints, self.dim)))) - - def recompute_summary_statistics(self): - """Recompute summary statistics of the solution. - - Notes - ----- - Statistics for gradients of objectives and stochastic constraint LHSs - are temporarily commented out. Under development. - """ - self.objectives_mean = np.mean(self.objectives[:self.n_reps], axis=0) - if self.n_reps > 1: - self.objectives_var = np.var(self.objectives[:self.n_reps], axis=0, ddof=1) - self.objectives_stderr = np.std(self.objectives[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) - self.objectives_cov = np.cov(self.objectives[:self.n_reps], rowvar=False, ddof=1) - self.objectives_gradients_mean = np.mean(self.objectives_gradients[:self.n_reps], axis=0) - if self.n_reps > 1: - self.objectives_gradients_var = np.var(self.objectives_gradients[:self.n_reps], axis=0, ddof=1) - self.objectives_gradients_stderr = np.std(self.objectives_gradients[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) - self.objectives_gradients_cov = np.array([np.cov(self.objectives_gradients[:self.n_reps, obj], rowvar=False, ddof=1) for obj in range(len(self.det_objectives))]) - if self.stoch_constraints is not None: - self.stoch_constraints_mean = np.mean(self.stoch_constraints[:self.n_reps], axis=0) - self.stoch_constraints_var = np.var(self.stoch_constraints[:self.n_reps], axis=0, ddof=1) - self.stoch_constraints_stderr = np.std(self.stoch_constraints[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) - self.stoch_constraints_cov = np.cov(self.stoch_constraints[:self.n_reps], rowvar=False, ddof=1) - # self.stoch_constraints_gradients_mean = np.mean(self.stoch_constraints_gradients[:self.n_reps], axis=0) - # self.stoch_constraints_gradients_var = np.var(self.stoch_constraints_gradients[:self.n_reps], axis=0, ddof=1) - # self.stoch_constraints_gradients_stderr = np.std(self.stoch_constraints_gradients[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps) - # self.stoch_constraints_gradients_cov = np.array([np.cov(self.stoch_constraints_gradients[:self.n_reps, stcon], rowvar=False, ddof=1) for stcon in range(len(self.det_stoch_constraints))]) From f8c1faa916f1d1752026e7c74d1415eb976b4ed8 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:12:23 -0400 Subject: [PATCH 05/21] Delete experiment_base.py --- experiment_base.py | 2725 -------------------------------------------- 1 file changed, 2725 deletions(-) delete mode 100644 experiment_base.py diff --git a/experiment_base.py b/experiment_base.py deleted file mode 100644 index a325ec3fb..000000000 --- a/experiment_base.py +++ /dev/null @@ -1,2725 +0,0 @@ -#!/usr/bin/env python -""" -Summary -------- -Provide base classes for problem-solver pairs and helper functions -for reading/writing data and plotting. -This is the modified version to generate and solve random problem instances by solvers. -""" - -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -import pandas as pd -from scipy.stats import norm -import pickle -import importlib -import time -import os -# from mrg32k3a.mrg32k3a import MRG32k3a -from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local - -from .base import Solution -from .directory import solver_directory, problem_directory - - -class Curve(object): - """Base class for all curves. - - Attributes - ---------- - x_vals : list [float] - Values of horizontal components. - y_vals : list [float] - Values of vertical components. - n_points : int - Number of values in x- and y- vectors. - - Parameters - ---------- - x_vals : list [float] - Values of horizontal components. - y_vals : list [float] - Values of vertical components. - """ - def __init__(self, x_vals, y_vals): - if len(x_vals) != len(y_vals): - print("Vectors of x- and y- values must be of same length.") - self.x_vals = x_vals - self.y_vals = y_vals - self.n_points = len(x_vals) - - def lookup(self, x): - """Lookup the y-value of the curve at an intermediate x-value. - - Parameters - ---------- - x : float - X-value at which to lookup the y-value. - - Returns - ------- - y : float - Y-value corresponding to x. - """ - if x < self.x_vals[0]: - y = np.nan - else: - idx = np.max(np.where(np.array(self.x_vals) <= x)) - y = self.y_vals[idx] - return y - - def compute_crossing_time(self, threshold): - """Compute the first time at which a curve drops below a given threshold. - - Parameters - ---------- - threshold : float - Value for which to find first crossing time. - - Returns - ------- - crossing_time : float - First time at which a curve drops below threshold. - """ - # Crossing time is defined as infinity if the curve does not drop - # below threshold. - crossing_time = np.inf - # Pass over curve to find first crossing time. - for i in range(self.n_points): - if self.y_vals[i] < threshold: - crossing_time = self.x_vals[i] - break - return crossing_time - - def compute_area_under_curve(self): - """Compute the area under a curve. - - Returns - ------- - area : float - Area under the curve. - """ - area = np.dot(self.y_vals[:-1], np.diff(self.x_vals)) - return area - - def curve_to_mesh(self, mesh): - """Create a curve defined at equally spaced x values. - - Parameters - ---------- - mesh : list of floats - List of uniformly spaced x-values. - - Returns - ------- - mesh_curve : ``experiment_base.Curve`` - Curve with equally spaced x-values. - """ - mesh_curve = Curve(x_vals=mesh, y_vals=[self.lookup(x) for x in mesh]) - return mesh_curve - - def curve_to_full_curve(self): - """Create a curve with duplicate x- and y-values to indicate steps. - - Returns - ------- - full_curve : ``experiment_base.Curve`` - Curve with duplicate x- and y-values. - """ - duplicate_x_vals = [x for x in self.x_vals for _ in (0, 1)] - duplicate_y_vals = [y for y in self.y_vals for _ in (0, 1)] - full_curve = Curve(x_vals=duplicate_x_vals[1:], y_vals=duplicate_y_vals[:-1]) - return full_curve - - def plot(self, color_str="C0", curve_type="regular"): - """Plot a curve. - - Parameters - ---------- - color_str : str, default="C0" - String indicating line color, e.g., "C0", "C1", etc. - curve_type : str, default="regular" - String indicating type of line: "regular" or "conf_bound". - - Returns - ------- - handle : list [``matplotlib.lines.Line2D``] - Curve handle, to use when creating legends. - """ - if curve_type == "regular": - linestyle = "-" - linewidth = 2 - elif curve_type == "conf_bound": - linestyle = "--" - linewidth = 1 - handle, = plt.step(self.x_vals, - self.y_vals, - color=color_str, - linestyle=linestyle, - linewidth=linewidth, - where="post" - ) - return handle - - -def mean_of_curves(curves): - """Compute pointwise (w.r.t. x-values) mean of curves. - Starting and ending x-values must coincide for all curves. - - Parameters - ---------- - curves : list [``experiment_base.Curve``] - Collection of curves to aggregate. - - Returns - ------- - mean_curve : ``experiment_base.Curve object`` - Mean curve. - """ - unique_x_vals = np.unique([x_val for curve in curves for x_val in curve.x_vals]) - mean_y_vals = [np.mean([curve.lookup(x_val) for curve in curves]) for x_val in unique_x_vals] - mean_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=mean_y_vals) - return mean_curve - - -def quantile_of_curves(curves, beta): - """Compute pointwise (w.r.t. x values) quantile of curves. - Starting and ending x values must coincide for all curves. - - Parameters - ---------- - curves : list [``experiment_base.Curve``] - Collection of curves to aggregate. - beta : float - Quantile level. - - Returns - ------- - quantile_curve : ``experiment_base.Curve`` - Quantile curve. - """ - unique_x_vals = np.unique([x_val for curve in curves for x_val in curve.x_vals]) - quantile_y_vals = [np.quantile([curve.lookup(x_val) for curve in curves], q=beta) for x_val in unique_x_vals] - quantile_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=quantile_y_vals) - return quantile_curve - - -def cdf_of_curves_crossing_times(curves, threshold): - """Compute the cdf of crossing times of curves. - - Parameters - ---------- - curves : list [``experiment_base.Curve``] - Collection of curves to aggregate. - threshold : float - Value for which to find first crossing time. - - Returns - ------- - cdf_curve : ``experiment_base.Curve`` - CDF of crossing times. - """ - n_curves = len(curves) - crossing_times = [curve.compute_crossing_time(threshold) for curve in curves] - unique_x_vals = [0] + list(np.unique([crossing_time for crossing_time in crossing_times if crossing_time < np.inf])) + [1] - cdf_y_vals = [sum(crossing_time <= x_val for crossing_time in crossing_times) / n_curves for x_val in unique_x_vals] - cdf_curve = Curve(x_vals=unique_x_vals, y_vals=cdf_y_vals) - return cdf_curve - - -def quantile_cross_jump(curves, threshold, beta): - """Compute a simple curve with a jump at the quantile of the crossing times. - - Parameters - ---------- - curves : list [``experiment_base.Curve``] - Collection of curves to aggregate. - threshold : float - Value for which to find first crossing time. - beta : float - Quantile level. - - Returns - ------- - jump_curve : ``experiment_base.Curve`` - Piecewise-constant curve with a jump at the quantile crossing time (if finite). - """ - solve_time_quantile = np.quantile([curve.compute_crossing_time(threshold=threshold) for curve in curves], q=beta) - # Note: np.quantile will evaluate to np.nan if forced to interpolate - # between a finite and infinite value. These are rare cases. Since - # crossing times must be non-negative, the quantile should be mapped - # to positive infinity. - if solve_time_quantile == np.inf or np.isnan(solve_time_quantile): - jump_curve = Curve(x_vals=[0, 1], y_vals=[0, 0]) - else: - jump_curve = Curve(x_vals=[0, solve_time_quantile, 1], y_vals=[0, 1, 1]) - return jump_curve - - -def difference_of_curves(curve1, curve2): - """Compute the difference of two curves (Curve 1 - Curve 2). - - Parameters - ---------- - curve1, curve2 : ``experiment_base.Curve`` - Curves to take the difference of. - - Returns - ------- - difference_curve : ``experiment_base.Curve`` - Difference of curves. - """ - unique_x_vals = np.unique(curve1.x_vals + curve2.x_vals) - difference_y_vals = [(curve1.lookup(x_val) - curve2.lookup(x_val)) for x_val in unique_x_vals] - difference_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=difference_y_vals) - return difference_curve - - -def max_difference_of_curves(curve1, curve2): - """Compute the maximum difference of two curves (Curve 1 - Curve 2). - - Parameters - ---------- - curve1, curve2 : ``experiment_base.Curve`` - Curves to take the difference of. - - Returns - ------- - max_diff : float - Maximum difference of curves. - """ - difference_curve = difference_of_curves(curve1, curve2) - max_diff = max(difference_curve.y_vals) - return max_diff - - -class ProblemSolver(object): - """Base class for running one solver on one problem. - - Attributes - ---------- - solver : ``base.Solver`` - Simulation-optimization solver. - problem : ``base.Problem`` - Simulation-optimization problem. - n_macroreps : int - Number of macroreplications run. - file_name_path : str - Path of .pickle file for saving ``experiment_base.ProblemSolver`` object. - all_recommended_xs : list [list [tuple]] - Sequences of recommended solutions from each macroreplication. - all_intermediate_budgets : list [list] - Sequences of intermediate budgets from each macroreplication. - timings : list [float] - Runtimes (in seconds) for each macroreplication. - n_postreps : int - Number of postreplications to take at each recommended solution. - crn_across_budget : bool - True if CRN used for post-replications at solutions recommended at - different times, otherwise False. - crn_across_macroreps : bool - True if CRN used for post-replications at solutions recommended on - different macroreplications, otherwise False. - all_post_replicates : list [list [list]] - All post-replicates from all solutions from all macroreplications. - all_est_objectives : numpy array [numpy array] - Estimated objective values of all solutions from all macroreplications. - n_postreps_init_opt : int - Number of postreplications to take at initial solution (x0) and - optimal solution (x*). - crn_across_init_opt : bool - True if CRN used for post-replications at solutions x0 and x*, otherwise False. - x0 : tuple - Initial solution (x0). - x0_postreps : list - Post-replicates at x0. - xstar : tuple - Proxy for optimal solution (x*). - xstar_postreps : list - Post-replicates at x*. - objective_curves : list [``experiment_base.Curve``] - Curves of estimated objective function values, - one for each macroreplication. - progress_curves : list [``experiment_base.Curve``] - Progress curves, one for each macroreplication. - - Parameters - ---------- - solver_name : str, optional - Name of solver. - problem_name : str, optional - Name of problem. - solver_rename : str, optional - User-specified name for solver. - problem_rename : str, optional - User-specified name for problem. - solver : ``base.Solver``, optional - Simulation-optimization solver. - problem : ``base.Problem``, optional - Simulation-optimization problem. - solver_fixed_factors : dict, optional - Dictionary of user-specified solver factors. - problem_fixed_factors : dict, optional - Dictionary of user-specified problem factors. - model_fixed_factors : dict, optional - Dictionary of user-specified model factors. - file_name_path : str, optional - Path of .pickle file for saving ``experiment_base.ProblemSolver`` objects. - """ - def __init__(self, solver_name=None, problem_name=None, solver_rename=None, problem_rename=None, solver=None, problem=None, solver_fixed_factors=None, problem_fixed_factors=None, model_fixed_factors=None, file_name_path=None): - """There are two ways to create a ProblemSolver object: - 1. Provide the names of the solver and problem to look up in ``directory.py``. - 2. Provide the solver and problem objects to pair. - """ - # Handle unassigned arguments. - if solver_fixed_factors is None: - solver_fixed_factors = {} - if problem_fixed_factors is None: - problem_fixed_factors = {} - if model_fixed_factors is None: - model_fixed_factors = {} - # Initialize solver. - if solver is not None: # Method #2 - self.solver = solver - elif solver_rename is None: # Method #1 - self.solver = solver_directory[solver_name](fixed_factors=solver_fixed_factors) - else: # Method #1 - self.solver = solver_directory[solver_name](name=solver_rename, fixed_factors=solver_fixed_factors) - # Initialize problem. - if problem is not None: # Method #2 - self.problem = problem - elif problem_rename is None: # Method #1 - self.problem = problem_directory[problem_name](fixed_factors=problem_fixed_factors, model_fixed_factors=model_fixed_factors) - else: # Method #1 - self.problem = problem_directory[problem_name](name=problem_rename, fixed_factors=problem_fixed_factors, model_fixed_factors=model_fixed_factors) - # Initialize file path. - if file_name_path is None: - self.file_name_path = f"./experiments/outputs/{self.solver.name}_on_{self.problem.name}.pickle" - else: - self.file_name_path = file_name_path - - def check_compatibility(self): - """Check whether the experiment's solver and problem are compatible. - - Returns - ------- - error_str : str - Error message in the event problem and solver are incompatible. - """ - error_str = "" - # Check number of objectives. - if self.solver.objective_type == "single" and self.problem.n_objectives > 1: - error_str += "Solver cannot solve a multi-objective problem.\n" - elif self.solver.objective_type == "multi" and self.problem.n_objectives == 1: - error_str += "Multi-objective solver being run on a single-objective problem.\n" - # Check constraint types. - constraint_types = ["unconstrained", "box", "deterministic", "stochastic"] - if constraint_types.index(self.solver.constraint_type) < constraint_types.index(self.problem.constraint_type): - error_str += "Solver can handle upto " + self.solver.constraint_type + " constraints, but problem has " + self.problem.constraint_type + " constraints.\n" - # Check variable types. - if self.solver.variable_type == "discrete" and self.problem.variable_type != "discrete": - error_str += "Solver is for discrete variables but problem variables are " + self.problem.variable_type + ".\n" - elif self.solver.variable_type == "continuous" and self.problem.variable_type != "continuous": - error_str += "Solver is for continuous variables but problem variables are " + self.problem.variable_type + ".\n" - # Check for existence of gradient estimates. - if self.solver.gradient_needed and not self.problem.gradient_available: - error_str += "Gradient-based solver does not have access to gradient for this problem.\n" - return error_str - - def run(self, n_macroreps): - """Run n_macroreps of the solver on the problem. - - Notes - ----- - RNGs dedicated for random problem instances and temporarily unused. - Under development. - - Parameters - ---------- - n_macroreps : int - Number of macroreplications of the solver to run on the problem. - """ - self.n_macroreps = n_macroreps - self.all_recommended_xs = [] - self.all_intermediate_budgets = [] - self.timings = [] - # Create, initialize, and attach random number generators - # Stream 0: reserved for taking post-replications - # Stream 1: reserved for bootstrapping - # Stream 2: reserved for overhead ... - # Substream 0: rng for random problem instance - # Substream 1: rng for random initial solution x0 and - # restart solutions - # Substream 2: rng for selecting random feasible solutions - # Substream 3: rng for solver's internal randomness - # Streams 3, 4, ..., n_macroreps + 2: reserved for - # macroreplications - rng0 = MRG32k3a(s_ss_sss_index=[2, 0, 0]) # Currently unused. - rng1 = MRG32k3a(s_ss_sss_index=[2, 1, 0]) - rng2 = MRG32k3a(s_ss_sss_index=[2, 2, 0]) - rng3 = MRG32k3a(s_ss_sss_index=[2, 3, 0]) - self.solver.attach_rngs([rng1, rng2, rng3]) - # Run n_macroreps of the solver on the problem. - # Report recommended solutions and corresponding intermediate budgets. - for mrep in range(self.n_macroreps): - print(f"Running macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.") - # Create, initialize, and attach RNGs used for simulating solutions. - progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 2, ss, 0]) for ss in range(self.problem.model.n_rngs)] - self.solver.solution_progenitor_rngs = progenitor_rngs - # print([rng.s_ss_sss_index for rng in progenitor_rngs]) - # Run the solver on the problem. - tic = time.perf_counter() - recommended_solns, intermediate_budgets = self.solver.solve(problem=self.problem) - toc = time.perf_counter() - # Record the run time of the macroreplication. - self.timings.append(toc - tic) - # Trim solutions recommended after final budget. - recommended_solns, intermediate_budgets = trim_solver_results(problem=self.problem, recommended_solns=recommended_solns, intermediate_budgets=intermediate_budgets) - # Extract decision-variable vectors (x) from recommended solutions. - # Record recommended solutions and intermediate budgets. - self.all_recommended_xs.append([solution.x for solution in recommended_solns]) - self.all_intermediate_budgets.append(intermediate_budgets) - # Save ProblemSolver object to .pickle file. - self.record_experiment_results() - - def check_run(self): - """Check if the experiment has been run. - - Returns - ------- - ran : bool - True if the experiment been run, otherwise False. - """ - if getattr(self, "all_recommended_xs", None) is None: - ran = False - else: - ran = True - return ran - - def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macroreps=False): - """Run postreplications at solutions recommended by the solver. - - Parameters - ---------- - n_postreps : int - Number of postreplications to take at each recommended solution. - crn_across_budget : bool, default=True - True if CRN used for post-replications at solutions recommended at different times, - otherwise False. - crn_across_macroreps : bool, default=False - True if CRN used for post-replications at solutions recommended on different - macroreplications, otherwise False. - """ - self.n_postreps = n_postreps - self.crn_across_budget = crn_across_budget - self.crn_across_macroreps = crn_across_macroreps - # Create, initialize, and attach RNGs for model. - # Stream 0: reserved for post-replications. - # Skip over first set of substreams dedicated for sampling x0 and x*. - baseline_rngs = [MRG32k3a(s_ss_sss_index=[0, self.problem.model.n_rngs + rng_index, 0]) for rng_index in range(self.problem.model.n_rngs)] - # Initialize matrix containing - # all postreplicates of objective, - # for each macroreplication, - # for each budget. - self.all_post_replicates = [[[] for _ in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)] - # Simulate intermediate recommended solutions. - for mrep in range(self.n_macroreps): - for budget_index in range(len(self.all_intermediate_budgets[mrep])): - x = self.all_recommended_xs[mrep][budget_index] - fresh_soln = Solution(x, self.problem) - fresh_soln.attach_rngs(rng_list=baseline_rngs, copy=False) - self.problem.simulate(solution=fresh_soln, m=self.n_postreps) - # Store results - self.all_post_replicates[mrep][budget_index] = list(fresh_soln.objectives[:fresh_soln.n_reps][:, 0]) # 0 <- assuming only one objective - if crn_across_budget: - # Reset each rng to start of its current substream. - for rng in baseline_rngs: - rng.reset_substream() - if crn_across_macroreps: - # Reset each rng to start of its current substream. - for rng in baseline_rngs: - rng.reset_substream() - else: - # Advance each rng to start of - # substream = current substream + # of model RNGs. - for rng in baseline_rngs: - for _ in range(self.problem.model.n_rngs): - rng.advance_substream() - # Store estimated objective for each macrorep for each budget. - self.all_est_objectives = [[np.mean(self.all_post_replicates[mrep][budget_index]) for budget_index in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)] - # Save ProblemSolver object to .pickle file. - self.record_experiment_results() - - def check_postreplicate(self): - """Check if the experiment has been postreplicated. - - Returns - ------- - postreplicated : bool - True if the experiment has been postreplicated, otherwise False. - """ - if getattr(self, "all_est_objectives", None) is None: - postreplicated = False - else: - postreplicated = True - return postreplicated - - def check_postnormalize(self): - """Check if the experiment has been postnormalized. - - Returns - ------- - postnormalized : bool - True if the experiment has been postnormalized, otherwise False. - """ - if getattr(self, "n_postreps_init_opt", None) is None: - postnormalized = False - else: - postnormalized = True - return postnormalized - - def bootstrap_sample(self, bootstrap_rng, normalize=True): - """Generate a bootstrap sample of estimated objective curves - or estimated progress curves. - - Parameters - ---------- - bootstrap_rng : ``mrg32k3a.mrg32k3a.MRG32k3a`` - Random number generator to use for bootstrapping. - normalize : bool, default=True - True if progress curves are to be normalized w.r.t. - optimality gaps, otherwise False. - - Returns - ------- - bootstrap_curves : list [``experiment_base.Curve``] - Bootstrapped estimated objective curves or estimated progress - curves of all solutions from all bootstrapped macroreplications. - """ - bootstrap_curves = [] - # Uniformly resample M macroreplications (with replacement) from 0, 1, ..., M-1. - # Subsubstream 0: reserved for this outer-level bootstrapping. - bs_mrep_idxs = bootstrap_rng.choices(range(self.n_macroreps), k=self.n_macroreps) - # Advance RNG subsubstream to prepare for inner-level bootstrapping. - bootstrap_rng.advance_subsubstream() - # Subsubstream 1: reserved for bootstrapping at x0 and x*. - # Bootstrap sample post-replicates at common x0. - # Uniformly resample L postreps (with replacement) from 0, 1, ..., L-1. - bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps_init_opt), k=self.n_postreps_init_opt) - # Compute the mean of the resampled postreplications. - bs_initial_obj_val = np.mean([self.x0_postreps[postrep] for postrep in bs_postrep_idxs]) - # Reset subsubstream if using CRN across budgets. - # This means the same postreplication indices will be used for resampling at x0 and x*. - if self.crn_across_init_opt: - bootstrap_rng.reset_subsubstream() - # Bootstrap sample postreplicates at reference optimal solution x*. - # Uniformly resample L postreps (with replacement) from 0, 1, ..., L. - bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps_init_opt), k=self.n_postreps_init_opt) - # Compute the mean of the resampled postreplications. - bs_optimal_obj_val = np.mean([self.xstar_postreps[postrep] for postrep in bs_postrep_idxs]) - # Compute initial optimality gap. - bs_initial_opt_gap = bs_initial_obj_val - bs_optimal_obj_val - # Advance RNG subsubstream to prepare for inner-level bootstrapping. - # Will now be at start of subsubstream 2. - bootstrap_rng.advance_subsubstream() - # Bootstrap within each bootstrapped macroreplication. - # Option 1: Simpler (default) CRN scheme, which makes for faster code. - if self.crn_across_budget and not self.crn_across_macroreps: - for idx in range(self.n_macroreps): - mrep = bs_mrep_idxs[idx] - # Inner-level bootstrapping over intermediate recommended solutions. - est_objectives = [] - # Same postreplication indices for all intermediate budgets on - # a given macroreplciation. - bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps), k=self.n_postreps) - for budget in range(len(self.all_intermediate_budgets[mrep])): - # If solution is x0... - if self.all_recommended_xs[mrep][budget] == self.x0: - est_objectives.append(bs_initial_obj_val) - # ...else if solution is x*... - elif self.all_recommended_xs[mrep][budget] == self.xstar: - est_objectives.append(bs_optimal_obj_val) - # ... else solution other than x0 or x*. - else: - # Compute the mean of the resampled postreplications. - est_objectives.append(np.mean([self.all_post_replicates[mrep][budget][postrep] for postrep in bs_postrep_idxs])) - # Record objective or progress curve. - if normalize: - frac_intermediate_budgets = [budget / self.problem.factors["budget"] for budget in self.all_intermediate_budgets[mrep]] - norm_est_objectives = [(est_objective - bs_optimal_obj_val) / bs_initial_opt_gap for est_objective in est_objectives] - new_progress_curve = Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives) - bootstrap_curves.append(new_progress_curve) - else: - new_objective_curve = Curve(x_vals=self.all_intermediate_budgets[mrep], y_vals=est_objectives) - bootstrap_curves.append(new_objective_curve) - # Option 2: Non-default CRN behavior. - else: - for idx in range(self.n_macroreps): - mrep = bs_mrep_idxs[idx] - # Inner-level bootstrapping over intermediate recommended solutions. - est_objectives = [] - for budget in range(len(self.all_intermediate_budgets[mrep])): - # If solution is x0... - if self.all_recommended_xs[mrep][budget] == self.x0: - est_objectives.append(bs_initial_obj_val) - # ...else if solution is x*... - elif self.all_recommended_xs[mrep][budget] == self.xstar: - est_objectives.append(bs_optimal_obj_val) - # ... else solution other than x0 or x*. - else: - # Uniformly resample N postreps (with replacement) from 0, 1, ..., N-1. - bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps), k=self.n_postreps) - # Compute the mean of the resampled postreplications. - est_objectives.append(np.mean([self.all_post_replicates[mrep][budget][postrep] for postrep in bs_postrep_idxs])) - # Reset subsubstream if using CRN across budgets. - if self.crn_across_budget: - bootstrap_rng.reset_subsubstream() - # If using CRN across macroreplications... - if self.crn_across_macroreps: - # ...reset subsubstreams... - bootstrap_rng.reset_subsubstream() - # ...else if not using CRN across macrorep... - else: - # ...advance subsubstream. - bootstrap_rng.advance_subsubstream() - # Record objective or progress curve. - if normalize: - frac_intermediate_budgets = [budget / self.problem.factors["budget"] for budget in self.all_intermediate_budgets[mrep]] - norm_est_objectives = [(est_objective - bs_optimal_obj_val) / bs_initial_opt_gap for est_objective in est_objectives] - new_progress_curve = Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives) - bootstrap_curves.append(new_progress_curve) - else: - new_objective_curve = Curve(x_vals=self.all_intermediate_budgets[mrep], y_vals=est_objectives) - bootstrap_curves.append(new_objective_curve) - return bootstrap_curves - - def clear_run(self): - """Delete results from ``run()`` method and any downstream results. - """ - attributes = ["n_macroreps", - "all_recommended_xs", - "all_intermediate_budgets"] - for attribute in attributes: - try: - delattr(self, attribute) - except Exception: - pass - self.clear_postreplicate() - - def clear_postreplicate(self): - """Delete results from ``post_replicate()`` method and any downstream results. - """ - attributes = ["n_postreps", - "crn_across_budget", - "crn_across_macroreps", - "all_post_replicates", - "all_est_objectives"] - for attribute in attributes: - try: - delattr(self, attribute) - except Exception: - pass - self.clear_postnorm() - - def clear_postnorm(self): - """Delete results from ``post_normalize()`` associated with experiment. - """ - attributes = ["n_postreps_init_opt", - "crn_across_init_opt", - "x0", - "x0_postreps", - "xstar", - "xstar_postreps", - "objective_curves", - "progress_curves" - ] - for attribute in attributes: - try: - delattr(self, attribute) - except Exception: - pass - - def record_experiment_results(self): - """Save ``experiment_base.ProblemSolver`` object to .pickle file. - """ - # Create directories if they do no exist. - if "./experiments/outputs" in self.file_name_path and not os.path.exists("./experiments/outputs"): - os.makedirs("./experiments", exist_ok=True) - os.makedirs("./experiments/outputs") - elif "./data_farming_experiments/outputs" in self.file_name_path and not os.path.exists("./data_farming_experiments/outputs"): - os.makedirs("./data_farming_experiments", exist_ok=True) - os.makedirs("./data_farming_experiments/outputs") - with open(self.file_name_path, "wb") as file: - pickle.dump(self, file, pickle.HIGHEST_PROTOCOL) - - def log_experiment_results(self, print_solutions=True): - """Create readable .txt file from a problem-solver pair's .pickle file. - """ - # Create a new text file in experiments/logs folder with correct name. - new_path = self.file_name_path.replace("outputs", "logs") # Adjust file_path_name to correct folder. - new_path2 = new_path.replace(".pickle", "") # Remove .pickle from .txt file name. - - # Create directories if they do no exist. - if "./experiments/logs" in new_path2 and not os.path.exists("./experiments/logs"): - os.makedirs("./experiments", exist_ok=True) - os.makedirs("./experiments/logs") - - with open(new_path2 + "_experiment_results.txt", "w") as file: - # Title txt file with experiment information. - file.write(self.file_name_path) - file.write('\n') - file.write(f"Problem: {self.problem.name}\n") - file.write(f"Solver: {self.solver.name}\n\n") - - # Display model factors. - file.write("Model Factors:\n") - for key, value in self.problem.model.factors.items(): - # Excluding model factors corresponding to decision variables. - if key not in self.problem.model_decision_factors: - file.write(f"\t{key}: {value}\n") - file.write("\n") - # Display problem factors. - file.write("Problem Factors:\n") - for key, value in self.problem.factors.items(): - file.write(f"\t{key}: {value}\n") - file.write("\n") - # Display solver factors. - file.write("Solver Factors:\n") - for key, value in self.solver.factors.items(): - file.write(f"\t{key}: {value}\n") - file.write("\n") - - # Display macroreplication information. - file.write(f"{self.n_macroreps} macroreplications were run.\n") - # If results have been postreplicated, list the number of post-replications. - if self.check_postreplicate(): - file.write(f"{self.n_postreps} postreplications were run at each recommended solution.\n\n") - # If post-normalized, state initial solution (x0) and proxy optimal solution (x_star) - # and how many replications were taken of them (n_postreps_init_opt). - if self.check_postnormalize(): - file.write(f"The initial solution is {tuple([round(x, 4) for x in self.x0])}. Its estimated objective is {round(np.mean(self.x0_postreps), 4)}.\n") - file.write(f"The proxy optimal solution is {tuple([round(x, 4) for x in self.xstar])}. Its estimated objective is {round(np.mean(self.xstar_postreps), 4)}.\n") - file.write(f"{self.n_postreps_init_opt} postreplications were taken at x0 and x_star.\n\n") - # Display recommended solution at each budget value for each macroreplication. - file.write('Macroreplication Results:\n') - for mrep in range(self.n_macroreps): - file.write(f"\nMacroreplication {mrep + 1}:\n") - for budget in range(len(self.all_intermediate_budgets[mrep])): - file.write(f"\tBudget: {round(self.all_intermediate_budgets[mrep][budget], 4)}") - # Optionally print solutions. - if print_solutions: - file.write(f"\tRecommended Solution: {tuple([round(x, 4) for x in self.all_recommended_xs[mrep][budget]])}") - # If postreplicated, add estimated objective function values. - if self.check_postreplicate(): - file.write(f"\tEstimated Objective: {round(self.all_est_objectives[mrep][budget], 4)}\n") - file.write(f"\tThe time taken to complete this macroreplication was {round(self.timings[mrep], 2)} s.\n") - file.close() - - -def trim_solver_results(problem, recommended_solns, intermediate_budgets): - """Trim solutions recommended by solver after problem's max budget. - - Parameters - ---------- - problem : ``base.Problem`` - Problem object on which the solver was run. - recommended_solutions : list [``base.Solution``] - Solutions recommended by the solver. - intermediate_budgets : list [int] - Intermediate budgets at which solver recommended different solutions. - """ - # Remove solutions corresponding to intermediate budgets exceeding max budget. - invalid_idxs = [idx for idx, element in enumerate(intermediate_budgets) if element > problem.factors["budget"]] - for invalid_idx in sorted(invalid_idxs, reverse=True): - del recommended_solns[invalid_idx] - del intermediate_budgets[invalid_idx] - # If no solution is recommended at the final budget, - # re-recommend the latest recommended solution. - # (Necessary for clean plotting of progress curves.) - if intermediate_budgets[-1] < problem.factors["budget"]: - recommended_solns.append(recommended_solns[-1]) - intermediate_budgets.append(problem.factors["budget"]) - return recommended_solns, intermediate_budgets - - -def read_experiment_results(file_name_path): - """Read in ``experiment_base.ProblemSolver`` object from .pickle file. - - Parameters - ---------- - file_name_path : str - Path of .pickle file for reading ``experiment_base.ProblemSolver`` object. - - Returns - ------- - experiment : ``experiment_base.ProblemSolver`` - Problem-solver pair that has been run or has been post-processed. - """ - with open(file_name_path, "rb") as file: - experiment = pickle.load(file) - return experiment - - -def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, proxy_init_val=None, proxy_opt_val=None, proxy_opt_x=None): - """Construct objective curves and (normalized) progress curves - for a collection of experiments on a given problem. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on a common problem. - n_postreps_init_opt : int - Number of postreplications to take at initial x0 and optimal x*. - crn_across_init_opt : bool, default=True - True if CRN used for post-replications at solutions x0 and x*, otherwise False. - proxy_init_val : float, optional - Known objective function value of initial solution. - proxy_opt_val : float, optional - Proxy for or bound on optimal objective function value. - proxy_opt_x : tuple, optional - Proxy for optimal solution. - """ - # Check that all experiments have the same problem and same - # post-experimental setup. - ref_experiment = experiments[0] - for experiment in experiments: - # Check if problems are the same. - if experiment.problem != ref_experiment.problem: - print("At least two experiments have different problem instances.") - # Check if experiments have common number of macroreps. - if experiment.n_macroreps != ref_experiment.n_macroreps: - print("At least two experiments have different numbers of macro-replications.") - # Check if experiment has been post-replicated and with common number of postreps. - if getattr(experiment, "n_postreps", None) is None: - print(f"The experiment of {experiment.solver.name} on {experiment.problem.name} has not been post-replicated.") - elif getattr(experiment, "n_postreps", None) != getattr(ref_experiment, "n_postreps", None): - print("At least two experiments have different numbers of post-replications.") - print("Estimation of optimal solution x* may be based on different numbers of post-replications.") - # Take post-replications at common x0. - # Create, initialize, and attach RNGs for model. - # Stream 0: reserved for post-replications. - baseline_rngs = [MRG32k3a(s_ss_sss_index=[0, rng_index, 0]) for rng_index in range(experiment.problem.model.n_rngs)] - x0 = ref_experiment.problem.factors["initial_solution"] - if proxy_init_val is not None: - x0_postreps = [proxy_init_val] * n_postreps_init_opt - else: - initial_soln = Solution(x0, ref_experiment.problem) - initial_soln.attach_rngs(rng_list=baseline_rngs, copy=False) - ref_experiment.problem.simulate(solution=initial_soln, m=n_postreps_init_opt) - x0_postreps = list(initial_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective - if crn_across_init_opt: - # Reset each rng to start of its current substream. - for rng in baseline_rngs: - rng.reset_substream() - # Determine (proxy for) optimal solution and/or (proxy for) its - # objective function value. If deterministic (proxy for) f(x*), - # create duplicate post-replicates to facilitate later bootstrapping. - # If proxy for f(x*) is specified... - if proxy_opt_val is not None: - xstar = None - xstar_postreps = [proxy_opt_val] * n_postreps_init_opt - # ...else if proxy for x* is specified... - elif proxy_opt_x is not None: - xstar = proxy_opt_x - # Take post-replications at xstar. - opt_soln = Solution(xstar, ref_experiment.problem) - opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False) - ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt) - xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective - # ...else if f(x*) is known... - elif ref_experiment.problem.optimal_value is not None: - xstar = None - xstar_postreps = [ref_experiment.problem.optimal_value] * n_postreps_init_opt - # ...else if x* is known... - elif ref_experiment.problem.optimal_solution is not None: - xstar = ref_experiment.problem.optimal_solution - # Take post-replications at xstar. - opt_soln = Solution(xstar, ref_experiment.problem) - opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False) - ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt) - xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective - # ...else determine x* empirically as estimated best solution - # found by any solver on any macroreplication. - else: - # TO DO: Simplify this block of code. - best_est_objectives = np.zeros(len(experiments)) - for experiment_idx in range(len(experiments)): - experiment = experiments[experiment_idx] - exp_best_est_objectives = np.zeros(experiment.n_macroreps) - for mrep in range(experiment.n_macroreps): - exp_best_est_objectives[mrep] = np.max(experiment.problem.minmax[0] * np.array(experiment.all_est_objectives[mrep])) - best_est_objectives[experiment_idx] = np.max(exp_best_est_objectives) - best_experiment_idx = np.argmax(best_est_objectives) - best_experiment = experiments[best_experiment_idx] - best_exp_best_est_objectives = np.zeros(experiment.n_macroreps) - for mrep in range(best_experiment.n_macroreps): - best_exp_best_est_objectives[mrep] = np.max(best_experiment.problem.minmax[0] * np.array(best_experiment.all_est_objectives[mrep])) - best_mrep = np.argmax(best_exp_best_est_objectives) - best_budget_idx = np.argmax(experiment.problem.minmax[0] * np.array(best_experiment.all_est_objectives[best_mrep])) - xstar = best_experiment.all_recommended_xs[best_mrep][best_budget_idx] - # Take post-replications at x*. - opt_soln = Solution(xstar, ref_experiment.problem) - opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False) - ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt) - xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective - # Compute signed initial optimality gap = f(x0) - f(x*). - initial_obj_val = np.mean(x0_postreps) - opt_obj_val = np.mean(xstar_postreps) - initial_opt_gap = initial_obj_val - opt_obj_val - # Store x0 and x* info and compute progress curves for each ProblemSolver. - for experiment in experiments: - # DOUBLE-CHECK FOR SHALLOW COPY ISSUES. - experiment.n_postreps_init_opt = n_postreps_init_opt - experiment.crn_across_init_opt = crn_across_init_opt - experiment.x0 = x0 - experiment.x0_postreps = x0_postreps - experiment.xstar = xstar - experiment.xstar_postreps = xstar_postreps - # Construct objective and progress curves. - experiment.objective_curves = [] - experiment.progress_curves = [] - for mrep in range(experiment.n_macroreps): - est_objectives = [] - # Substitute estimates at x0 and x* (based on N postreplicates) - # with new estimates (based on L postreplicates). - for budget in range(len(experiment.all_intermediate_budgets[mrep])): - if experiment.all_recommended_xs[mrep][budget] == x0: - est_objectives.append(np.mean(x0_postreps)) - elif experiment.all_recommended_xs[mrep][budget] == xstar: - est_objectives.append(np.mean(xstar_postreps)) - else: - est_objectives.append(experiment.all_est_objectives[mrep][budget]) - experiment.objective_curves.append(Curve(x_vals=experiment.all_intermediate_budgets[mrep], y_vals=est_objectives)) - # Normalize by initial optimality gap. - norm_est_objectives = [(est_objective - opt_obj_val) / initial_opt_gap for est_objective in est_objectives] - frac_intermediate_budgets = [budget / experiment.problem.factors["budget"] for budget in experiment.all_intermediate_budgets[mrep]] - experiment.progress_curves.append(Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives)) - # Save ProblemSolver object to .pickle file. - experiment.record_experiment_results() - - -def bootstrap_sample_all(experiments, bootstrap_rng, normalize=True): - """Generate bootstrap samples of estimated progress curves (normalized - and unnormalized) from a set of experiments. - - Parameters - ---------- - experiments : list [list [``experiment_base.ProblemSolver``]] - Problem-solver pairs of different solvers and/or problems. - bootstrap_rng : ``mrg32k3a.mrg32k3a.MRG32k3a`` - Random number generator to use for bootstrapping. - normalize : bool, default=True - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - - Returns - ------- - bootstrap_curves : list [list [list [``experiment_base.Curve``]]] - Bootstrapped estimated objective curves or estimated progress curves - of all solutions from all macroreplications. - """ - n_solvers = len(experiments) - n_problems = len(experiments[0]) - bootstrap_curves = [[[] for _ in range(n_problems)] for _ in range(n_solvers)] - # Obtain a bootstrap sample from each experiment. - for solver_idx in range(n_solvers): - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - bootstrap_curves[solver_idx][problem_idx] = experiment.bootstrap_sample(bootstrap_rng, normalize) - # Reset substream for next solver-problem pair. - bootstrap_rng.reset_substream() - # Advance substream of random number generator to prepare for next bootstrap sample. - bootstrap_rng.advance_substream() - return bootstrap_curves - - -def bootstrap_procedure(experiments, n_bootstraps, conf_level, plot_type, beta=None, solve_tol=None, estimator=None, normalize=True): - """Obtain bootstrap sample and compute confidence intervals. - - Parameters - ---------- - experiments : list [list [``experiment_base.ProblemSolver``]] - Problem-solver pairs of different solvers and/or problems. - n_bootstraps : int - Number of times to generate a bootstrap sample of estimated progress curves. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - plot_type : str - String indicating which type of plot to produce: - "mean" : estimated mean progress curve; - - "quantile" : estimated beta quantile progress curve; - - "area_mean" : mean of area under progress curve; - - "area_std_dev" : standard deviation of area under progress curve; - - "solve_time_quantile" : beta quantile of solve time; - - "solve_time_cdf" : cdf of solve time; - - "cdf_solvability" : cdf solvability profile; - - "quantile_solvability" : quantile solvability profile; - - "diff_cdf_solvability" : difference of cdf solvability profiles; - - "diff_quantile_solvability" : difference of quantile solvability profiles. - beta : float, optional - Quantile to plot, e.g., beta quantile; in (0, 1). - solve_tol : float, optional - Relative optimality gap definining when a problem is solved; in (0, 1]. - estimator : float or ``experiment_base.Curve``, optional - Main estimator, e.g., mean convergence curve from an experiment. - normalize : bool, default=True - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - - Returns - ------- - bs_CI_lower_bounds, bs_CI_upper_bounds = float or ``experiment_base.Curve`` - Lower and upper bound(s) of bootstrap CI(s), as floats or curves. - """ - # Create random number generator for bootstrap sampling. - # Stream 1 dedicated for bootstrapping. - bootstrap_rng = MRG32k3a(s_ss_sss_index=[1, 0, 0]) - # Obtain n_bootstrap replications. - bootstrap_replications = [] - for bs_index in range(n_bootstraps): - # Generate bootstrap sample of estimated objective/progress curves. - bootstrap_curves = bootstrap_sample_all(experiments, bootstrap_rng=bootstrap_rng, normalize=normalize) - # Apply the functional of the bootstrap sample. - bootstrap_replications.append(functional_of_curves(bootstrap_curves, plot_type, beta=beta, solve_tol=solve_tol)) - # Distinguish cases where functional returns a scalar vs a curve. - if plot_type in {"area_mean", "area_std_dev", "solve_time_quantile"}: - # Functional returns a scalar. - bs_CI_lower_bounds, bs_CI_upper_bounds = compute_bootstrap_CI(bootstrap_replications, - conf_level=conf_level, - bias_correction=True, - overall_estimator=estimator - ) - elif plot_type in {"mean", "quantile", "solve_time_cdf", "cdf_solvability", "quantile_solvability", "diff_cdf_solvability", "diff_quantile_solvability"}: - # Functional returns a curve. - unique_budgets = list(np.unique([budget for curve in bootstrap_replications for budget in curve.x_vals])) - bs_CI_lbs = [] - bs_CI_ubs = [] - for budget in unique_budgets: - bootstrap_subreplications = [curve.lookup(x=budget) for curve in bootstrap_replications] - sub_estimator = estimator.lookup(x=budget) - bs_CI_lower_bound, bs_CI_upper_bound = compute_bootstrap_CI(bootstrap_subreplications, - conf_level=conf_level, - bias_correction=True, - overall_estimator=sub_estimator - ) - bs_CI_lbs.append(bs_CI_lower_bound) - bs_CI_ubs.append(bs_CI_upper_bound) - bs_CI_lower_bounds = Curve(x_vals=unique_budgets, y_vals=bs_CI_lbs) - bs_CI_upper_bounds = Curve(x_vals=unique_budgets, y_vals=bs_CI_ubs) - return bs_CI_lower_bounds, bs_CI_upper_bounds - - -def functional_of_curves(bootstrap_curves, plot_type, beta=0.5, solve_tol=0.1): - """Compute a functional of the bootstrapped objective/progress curves. - - Parameters - ---------- - bootstrap_curves : list [list [list [``experiment_base.Curve``]]] - Bootstrapped estimated objective curves or estimated progress curves - of all solutions from all macroreplications. - plot_type : str - String indicating which type of plot to produce: - "mean" : estimated mean progress curve; - - "quantile" : estimated beta quantile progress curve; - - "area_mean" : mean of area under progress curve; - - "area_std_dev" : standard deviation of area under progress curve; - - "solve_time_quantile" : beta quantile of solve time; - - "solve_time_cdf" : cdf of solve time; - - "cdf_solvability" : cdf solvability profile; - - "quantile_solvability" : quantile solvability profile; - - "diff_cdf_solvability" : difference of cdf solvability profiles; - - "diff_quantile_solvability" : difference of quantile solvability profiles; - beta : float, default=0.5 - Quantile to plot, e.g., beta quantile; in (0, 1). - solve_tol : float, default=0.1 - Relative optimality gap definining when a problem is solved; in (0, 1]. - - Returns - ------- - functional : list - Functional of bootstrapped curves, e.g, mean progress curves, - mean area under progress curve, quantile of crossing time, etc. - """ - if plot_type == "mean": - # Single experiment --> returns a curve. - functional = mean_of_curves(bootstrap_curves[0][0]) - elif plot_type == "quantile": - # Single experiment --> returns a curve. - functional = quantile_of_curves(bootstrap_curves[0][0], beta=beta) - elif plot_type == "area_mean": - # Single experiment --> returns a scalar. - functional = np.mean([curve.compute_area_under_curve() for curve in bootstrap_curves[0][0]]) - elif plot_type == "area_std_dev": - # Single experiment --> returns a scalar. - functional = np.std([curve.compute_area_under_curve() for curve in bootstrap_curves[0][0]], ddof=1) - elif plot_type == "solve_time_quantile": - # Single experiment --> returns a scalar - functional = np.quantile([curve.compute_crossing_time(threshold=solve_tol) for curve in bootstrap_curves[0][0]], q=beta) - elif plot_type == "solve_time_cdf": - # Single experiment --> returns a curve. - functional = cdf_of_curves_crossing_times(bootstrap_curves[0][0], threshold=solve_tol) - elif plot_type == "cdf_solvability": - # One solver, multiple problems --> returns a curve. - functional = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[0]]) - elif plot_type == "quantile_solvability": - # One solver, multiple problems --> returns a curve. - functional = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[0]]) - elif plot_type == "diff_cdf_solvability": - # Two solvers, multiple problems --> returns a curve. - solvability_profile_1 = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[0]]) - solvability_profile_2 = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[1]]) - functional = difference_of_curves(solvability_profile_1, solvability_profile_2) - elif plot_type == "diff_quantile_solvability": - # Two solvers, multiple problems --> returns a curve. - solvability_profile_1 = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[0]]) - solvability_profile_2 = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[1]]) - functional = difference_of_curves(solvability_profile_1, solvability_profile_2) - else: - print("Not a valid plot type.") - return functional - - -def compute_bootstrap_CI(observations, conf_level, bias_correction=True, overall_estimator=None): - """Construct a bootstrap confidence interval for an estimator. - - Parameters - ---------- - observations : list - Estimators from all bootstrap instances. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - bias_correction : bool, default=True - True if bias-corrected bootstrap CIs (via percentile method) are to be used, - otherwise False. - overall_estimator : float, optional - Estimator to compute bootstrap confidence interval of; - required for bias corrected CI. - - Returns - ------- - bs_CI_lower_bound : float - Lower bound of bootstrap CI. - bs_CI_upper_bound : float - Upper bound of bootstrap CI. - """ - # Compute bootstrapping confidence interval via percentile method. - # See Efron (1981) "Nonparameteric Standard Errors and Confidence Intervals." - if bias_correction: - if overall_estimator is None: - print("Estimator required to compute bias-corrected CIs.") - # For biased-corrected CIs, see equation (4.4) on page 146. - z0 = norm.ppf(np.mean([obs < overall_estimator for obs in observations])) - zconflvl = norm.ppf(conf_level) - q_lower = norm.cdf(2 * z0 - zconflvl) - q_upper = norm.cdf(2 * z0 + zconflvl) - else: - # For uncorrected CIs, see equation (4.3) on page 146. - q_lower = (1 - conf_level) / 2 - q_upper = 1 - (1 - conf_level) / 2 - bs_CI_lower_bound = np.quantile(observations, q=q_lower) - bs_CI_upper_bound = np.quantile(observations, q=q_upper) - return bs_CI_lower_bound, bs_CI_upper_bound - - -def plot_bootstrap_CIs(bs_CI_lower_bounds, bs_CI_upper_bounds, color_str="C0"): - """Plot bootstrap confidence intervals. - - Parameters - ---------- - bs_CI_lower_bounds, bs_CI_upper_bounds : ``experiment_base.Curve`` - Lower and upper bounds of bootstrap CIs, as curves. - color_str : str, default="C0" - String indicating line color, e.g., "C0", "C1", etc. - """ - bs_CI_lower_bounds.plot(color_str=color_str, curve_type="conf_bound") - bs_CI_upper_bounds.plot(color_str=color_str, curve_type="conf_bound") - # Shade space between curves. - # Convert to full curves to get piecewise-constant shaded areas. - plt.fill_between(x=bs_CI_lower_bounds.curve_to_full_curve().x_vals, - y1=bs_CI_lower_bounds.curve_to_full_curve().y_vals, - y2=bs_CI_upper_bounds.curve_to_full_curve().y_vals, - color=color_str, - alpha=0.2 - ) - - -def report_max_halfwidth(curve_pairs, normalize, conf_level, difference=False,): - """Compute and print caption for max halfwidth of one or more bootstrap CI curves. - - Parameters - ---------- - curve_pairs : list [list [``experiment_base.Curve``]] - List of paired bootstrap CI curves. - normalize : bool - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - difference : bool - True if the plot is for difference profiles, otherwise False. - """ - # Compute max halfwidth of bootstrap confidence intervals. - min_lower_bound = np.inf - max_upper_bound = -np.inf - max_halfwidths = [] - for curve_pair in curve_pairs: - min_lower_bound = min(min_lower_bound, min(curve_pair[0].y_vals)) - max_upper_bound = max(max_upper_bound, max(curve_pair[1].y_vals)) - max_halfwidths.append(0.5 * max_difference_of_curves(curve_pair[1], curve_pair[0])) - max_halfwidth = max(max_halfwidths) - # Print caption about max halfwidth. - if normalize: - if difference: - xloc = 0.05 - yloc = -1.35 - else: - xloc = 0.05 - yloc = -0.35 - else: - # xloc = 0.05 * budget of the problem - xloc = 0.05 * curve_pairs[0][0].x_vals[-1] - yloc = min_lower_bound - 0.25 * (max_upper_bound - min_lower_bound) - txt = f"The max halfwidth of the bootstrap {round(conf_level * 100)}% CIs is {round(max_halfwidth, 2)}." - plt.text(x=xloc, y=yloc, s=txt) - - -def check_common_problem_and_reference(experiments): - """Check if a collection of experiments have the same problem, x0, and x*. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on a common problem. - """ - ref_experiment = experiments[0] - for experiment in experiments: - if experiment.problem != ref_experiment.problem: - print("At least two experiments have different problem instances.") - if experiment.x0 != ref_experiment.x0: - print("At least two experiments have different starting solutions.") - if experiment.xstar != ref_experiment.xstar: - print("At least two experiments have different optimal solutions.") - - -def plot_progress_curves(experiments, plot_type, beta=0.50, normalize=True, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True): - """Plot individual or aggregate progress curves for one or more solvers - on a single problem. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on a common problem. - plot_type : str - String indicating which type of plot to produce: - "all" : all estimated progress curves; - - "mean" : estimated mean progress curve; - - "quantile" : estimated beta quantile progress curve. - beta : float, default=0.50 - Quantile to plot, e.g., beta quantile; in (0, 1). - normalize : bool, default=True - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - all_in_one : bool, default=True - True if curves are to be plotted together, otherwise False. - n_bootstraps : int, default=100 - Number of bootstrap samples. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - plot_CIs : bool, default=True - True if bootstrapping confidence intervals are to be plotted, otherwise False. - print_max_hw : bool, default=True - True if caption with max half-width is to be printed, otherwise False. - - Returns - ------- - file_list : list [str] - List compiling path names for plots produced. - """ - # Check if problems are the same with the same x0 and x*. - check_common_problem_and_reference(experiments) - file_list = [] - # Set up plot. - n_experiments = len(experiments) - if all_in_one: - ref_experiment = experiments[0] - setup_plot(plot_type=plot_type, - solver_name="SOLVER SET", - problem_name=ref_experiment.problem.name, - normalize=normalize, - budget=ref_experiment.problem.factors["budget"], - beta=beta - ) - solver_curve_handles = [] - if print_max_hw: - curve_pairs = [] - for exp_idx in range(n_experiments): - experiment = experiments[exp_idx] - color_str = "C" + str(exp_idx) - if plot_type == "all": - # Plot all estimated progress curves. - if normalize: - handle = experiment.progress_curves[0].plot(color_str=color_str) - for curve in experiment.progress_curves[1:]: - curve.plot(color_str=color_str) - else: - handle = experiment.objective_curves[0].plot(color_str=color_str) - for curve in experiment.objective_curves[1:]: - curve.plot(color_str=color_str) - elif plot_type == "mean": - # Plot estimated mean progress curve. - if normalize: - estimator = mean_of_curves(experiment.progress_curves) - else: - estimator = mean_of_curves(experiment.objective_curves) - handle = estimator.plot(color_str=color_str) - elif plot_type == "quantile": - # Plot estimated beta-quantile progress curve. - if normalize: - estimator = quantile_of_curves(experiment.progress_curves, beta) - else: - estimator = quantile_of_curves(experiment.objective_curves, beta) - handle = estimator.plot(color_str=color_str) - else: - print("Not a valid plot type.") - solver_curve_handles.append(handle) - if (plot_CIs or print_max_hw) and plot_type != "all": - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type=plot_type, - beta=beta, - estimator=estimator, - normalize=normalize - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) - if print_max_hw: - curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) - plt.legend(handles=solver_curve_handles, labels=[experiment.solver.name for experiment in experiments], loc="upper right") - if print_max_hw and plot_type != "all": - report_max_halfwidth(curve_pairs=curve_pairs, normalize=normalize, conf_level=conf_level) - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name=ref_experiment.problem.name, - plot_type=plot_type, - normalize=normalize, - extra=beta - )) - else: # Plot separately. - for experiment in experiments: - setup_plot(plot_type=plot_type, - solver_name=experiment.solver.name, - problem_name=experiment.problem.name, - normalize=normalize, - budget=experiment.problem.factors["budget"], - beta=beta - ) - if plot_type == "all": - # Plot all estimated progress curves. - if normalize: - for curve in experiment.progress_curves: - curve.plot() - else: - for curve in experiment.objective_curves: - curve.plot() - elif plot_type == "mean": - # Plot estimated mean progress curve. - if normalize: - estimator = mean_of_curves(experiment.progress_curves) - else: - estimator = mean_of_curves(experiment.objective_curves) - estimator.plot() - elif plot_type == "quantile": - # Plot estimated beta-quantile progress curve. - if normalize: - estimator = quantile_of_curves(experiment.progress_curves, beta) - else: - estimator = quantile_of_curves(experiment.objective_curves, beta) - estimator.plot() - else: - print("Not a valid plot type.") - if (plot_CIs or print_max_hw) and plot_type != "all": - # Note: "experiments" needs to be a list of list of ProblemSolvers. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type=plot_type, - beta=beta, - estimator=estimator, - normalize=normalize - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) - if print_max_hw: - report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=normalize, conf_level=conf_level) - file_list.append(save_plot(solver_name=experiment.solver.name, - problem_name=experiment.problem.name, - plot_type=plot_type, - normalize=normalize, - extra=beta - )) - return file_list - - -def plot_solvability_cdfs(experiments, solve_tol=0.1, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True): - """Plot the solvability cdf for one or more solvers on a single problem. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on a common problem. - solve_tol : float, default=0.1 - Relative optimality gap definining when a problem is solved; in (0, 1]. - all_in_one : bool, default=True - True if curves are to be plotted together, otherwise False. - n_bootstraps : int, default=100 - Number of bootstrap samples. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - plot_CIs : bool, default=True - True if bootstrapping confidence intervals are to be plotted, otherwise False. - print_max_hw : bool, default=True - True if caption with max half-width is to be printed, otherwise False. - - Returns - ------- - file_list : list [str] - List compiling path names for plots produced. - """ - # Check if problems are the same with the same x0 and x*. - check_common_problem_and_reference(experiments) - file_list = [] - # Set up plot. - n_experiments = len(experiments) - if all_in_one: - ref_experiment = experiments[0] - setup_plot(plot_type="solve_time_cdf", - solver_name="SOLVER SET", - problem_name=ref_experiment.problem.name, - solve_tol=solve_tol - ) - solver_curve_handles = [] - if print_max_hw: - curve_pairs = [] - for exp_idx in range(n_experiments): - experiment = experiments[exp_idx] - color_str = "C" + str(exp_idx) - # Plot cdf of solve times. - estimator = cdf_of_curves_crossing_times(experiment.progress_curves, threshold=solve_tol) - handle = estimator.plot(color_str=color_str) - solver_curve_handles.append(handle) - if plot_CIs or print_max_hw: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type="solve_time_cdf", - solve_tol=solve_tol, - estimator=estimator, - normalize=True - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) - if print_max_hw: - curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) - plt.legend(handles=solver_curve_handles, labels=[experiment.solver.name for experiment in experiments], loc="upper left") - if print_max_hw: - report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level) - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name=ref_experiment.problem.name, - plot_type="solve_time_cdf", - normalize=True, - extra=solve_tol - )) - else: # Plot separately. - for experiment in experiments: - setup_plot(plot_type="solve_time_cdf", - solver_name=experiment.solver.name, - problem_name=experiment.problem.name, - solve_tol=solve_tol - ) - estimator = cdf_of_curves_crossing_times(experiment.progress_curves, threshold=solve_tol) - estimator.plot() - if plot_CIs or print_max_hw: - # Note: "experiments" needs to be a list of list of Problem-Solver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type="solve_time_cdf", - solve_tol=solve_tol, - estimator=estimator, - normalize=True - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) - if print_max_hw: - report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True, conf_level=conf_level) - file_list.append(save_plot(solver_name=experiment.solver.name, - problem_name=experiment.problem.name, - plot_type="solve_time_cdf", - normalize=True, - extra=solve_tol - )) - return file_list - - -def plot_area_scatterplots(experiments, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True): - """Plot a scatter plot of mean and standard deviation of area under progress curves. - Either one plot for each solver or one plot for all solvers. - - Notes - ----- - TO DO: Add the capability to compute and print the max halfwidth of - the bootstrapped CI intervals. - - Parameters - ---------- - experiments : list [list [``experiment_base.ProblemSolver``]] - Problem-solver pairs used to produce plots. - all_in_one : bool, default=True - True if curves are to be plotted together, otherwise False. - n_bootstraps : int, default=100 - Number of bootstrap samples. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - plot_CIs : bool, default=True - True if bootstrapping confidence intervals are to be plotted, otherwise False. - print_max_hw : bool, default=True - True if caption with max half-width is to be printed, otherwise False. - - Returns - ------- - file_list : list [str] - List compiling path names for plots produced. - """ - file_list = [] - # Set up plot. - n_solvers = len(experiments) - n_problems = len(experiments[0]) - if all_in_one: - marker_list = ["o", "v", "s", "*", "P", "X", "D", "V", ">", "<"] - setup_plot(plot_type="area", - solver_name="SOLVER SET", - problem_name="PROBLEM SET" - ) - solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] - solver_curve_handles = [] - # TO DO: Build up capability to print max half-width. - if print_max_hw: - curve_pairs = [] - for solver_idx in range(n_solvers): - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - color_str = "C" + str(solver_idx) - marker_str = marker_list[solver_idx % len(marker_list)] # Cycle through list of marker types. - # Plot mean and standard deviation of area under progress curve. - areas = [curve.compute_area_under_curve() for curve in experiment.progress_curves] - mean_estimator = np.mean(areas) - std_dev_estimator = np.std(areas, ddof=1) - if plot_CIs: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - mean_bs_CI_lb, mean_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type="area_mean", - estimator=mean_estimator, - normalize=True - ) - std_dev_bs_CI_lb, std_dev_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type="area_std_dev", - estimator=std_dev_estimator, - normalize=True - ) - # if print_max_hw: - # curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) - x_err = [[mean_estimator - mean_bs_CI_lb], [mean_bs_CI_ub - mean_estimator]] - y_err = [[std_dev_estimator - std_dev_bs_CI_lb], [std_dev_bs_CI_ub - std_dev_estimator]] - handle = plt.errorbar(x=mean_estimator, - y=std_dev_estimator, - xerr=x_err, - yerr=y_err, - color=color_str, - marker=marker_str, - elinewidth=1 - ) - else: - handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color=color_str, marker=marker_str) - solver_curve_handles.append(handle) - plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper right") - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name="PROBLEM SET", - plot_type="area", - normalize=True - )) - else: - for solver_idx in range(n_solvers): - ref_experiment = experiments[solver_idx][0] - setup_plot(plot_type="area", - solver_name=ref_experiment.solver.name, - problem_name="PROBLEM SET" - ) - if print_max_hw: - curve_pairs = [] - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - # Plot mean and standard deviation of area under progress curve. - areas = [curve.compute_area_under_curve() for curve in experiment.progress_curves] - mean_estimator = np.mean(areas) - std_dev_estimator = np.std(areas, ddof=1) - if plot_CIs: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - mean_bs_CI_lb, mean_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type="area_mean", - estimator=mean_estimator, - normalize=True - ) - std_dev_bs_CI_lb, std_dev_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type="area_std_dev", - estimator=std_dev_estimator, - normalize=True - ) - # if print_max_hw: - # curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) - x_err = [[mean_estimator - mean_bs_CI_lb], [mean_bs_CI_ub - mean_estimator]] - y_err = [[std_dev_estimator - std_dev_bs_CI_lb], [std_dev_bs_CI_ub - std_dev_estimator]] - handle = plt.errorbar(x=mean_estimator, - y=std_dev_estimator, - xerr=x_err, - yerr=y_err, - marker="o", - color="C0", - elinewidth=1 - ) - else: - handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color="C0", marker="o") - file_list.append(save_plot(solver_name=experiment.solver.name, - problem_name="PROBLEM SET", - plot_type="area", - normalize=True - )) - return file_list - - -def plot_solvability_profiles(experiments, plot_type, all_in_one=True, n_bootstraps=100, conf_level=0.95, plot_CIs=True, print_max_hw=True, solve_tol=0.1, beta=0.5, ref_solver=None): - """Plot the (difference of) solvability profiles for each solver on a set of problems. - - Parameters - ---------- - experiments : list [list [``experiment_base.ProblemSolver``]] - Problem-solver pairs used to produce plots. - plot_type : str - String indicating which type of plot to produce: - "cdf_solvability" : cdf-solvability profile; - - "quantile_solvability" : quantile-solvability profile; - - "diff_cdf_solvability" : difference of cdf-solvability profiles; - - "diff_quantile_solvability" : difference of quantile-solvability profiles. - all_in_one : bool, default=True - True if curves are to be plotted together, otherwise False. - n_bootstraps : int, default=100 - Number of bootstrap samples. - conf_level : float - Confidence level for confidence intervals, i.e., 1-gamma; in (0, 1). - plot_CIs : bool, default=True - True if bootstrapping confidence intervals are to be plotted, otherwise False. - print_max_hw : bool, default=True - True if caption with max half-width is to be printed, otherwise False. - solve_tol : float, default=0.1 - Relative optimality gap definining when a problem is solved; in (0, 1]. - beta : float, default=0.5 - Quantile to compute, e.g., beta quantile; in (0, 1). - ref_solver : str, optional - Name of solver used as benchmark for difference profiles. - - Returns - ------- - file_list : list [str] - List compiling path names for plots produced. - """ - file_list = [] - # Set up plot. - n_solvers = len(experiments) - n_problems = len(experiments[0]) - if all_in_one: - if plot_type == "cdf_solvability": - setup_plot(plot_type=plot_type, - solver_name="SOLVER SET", - problem_name="PROBLEM SET", - solve_tol=solve_tol - ) - elif plot_type == "quantile_solvability": - setup_plot(plot_type=plot_type, - solver_name="SOLVER SET", - problem_name="PROBLEM SET", - beta=beta, - solve_tol=solve_tol - ) - elif plot_type == "diff_cdf_solvability": - setup_plot(plot_type=plot_type, - solver_name="SOLVER SET", - problem_name="PROBLEM SET", - solve_tol=solve_tol - ) - elif plot_type == "diff_quantile_solvability": - setup_plot(plot_type=plot_type, - solver_name="SOLVER SET", - problem_name="PROBLEM SET", - beta=beta, - solve_tol=solve_tol - ) - if print_max_hw: - curve_pairs = [] - solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] - solver_curves = [] - solver_curve_handles = [] - for solver_idx in range(n_solvers): - solver_sub_curves = [] - color_str = "C" + str(solver_idx) - # For each problem compute the cdf or quantile of solve times. - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - if plot_type in {"cdf_solvability", "diff_cdf_solvability"}: - sub_curve = cdf_of_curves_crossing_times(curves=experiment.progress_curves, threshold=solve_tol) - if plot_type in {"quantile_solvability", "diff_quantile_solvability"}: - sub_curve = quantile_cross_jump(curves=experiment.progress_curves, threshold=solve_tol, beta=beta) - solver_sub_curves.append(sub_curve) - # Plot solvability profile for the solver. - # Exploit the fact that each solvability profile is an average of more basic curves. - solver_curve = mean_of_curves(solver_sub_curves) - # CAUTION: Using mean above requires an equal number of macro-replications per problem. - solver_curves.append(solver_curve) - if plot_type in {"cdf_solvability", "quantile_solvability"}: - handle = solver_curve.plot(color_str=color_str) - solver_curve_handles.append(handle) - if plot_CIs or print_max_hw: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type=plot_type, - solve_tol=solve_tol, - beta=beta, - estimator=solver_curve, - normalize=True - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) - if print_max_hw: - curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) - if plot_type == "cdf_solvability": - plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper left") - if print_max_hw: - report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level) - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=solve_tol - )) - elif plot_type == "quantile_solvability": - plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper left") - if print_max_hw: - report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level) - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=[solve_tol, beta] - )) - elif plot_type in {"diff_cdf_solvability", "diff_quantile_solvability"}: - non_ref_solvers = [solver_name for solver_name in solver_names if solver_name != ref_solver] - ref_solver_idx = solver_names.index(ref_solver) - for solver_idx in range(n_solvers): - if solver_idx is not ref_solver_idx: - diff_solver_curve = difference_of_curves(solver_curves[solver_idx], solver_curves[ref_solver_idx]) - color_str = "C" + str(solver_idx) - handle = diff_solver_curve.plot(color_str=color_str) - solver_curve_handles.append(handle) - if plot_CIs or print_max_hw: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx], experiments[ref_solver_idx]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type=plot_type, - solve_tol=solve_tol, - beta=beta, - estimator=diff_solver_curve, - normalize=True - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str) - if print_max_hw: - curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve]) - offset_labels = [f"{non_ref_solver} - {ref_solver}" for non_ref_solver in non_ref_solvers] - plt.legend(handles=solver_curve_handles, labels=offset_labels, loc="upper left") - if print_max_hw: - report_max_halfwidth(curve_pairs=curve_pairs, normalize=True, conf_level=conf_level, difference=True) - if plot_type == "diff_cdf_solvability": - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=solve_tol - )) - elif plot_type == "diff_quantile_solvability": - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=[solve_tol, beta] - )) - else: - solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] - solver_curves = [] - for solver_idx in range(n_solvers): - solver_sub_curves = [] - # For each problem compute the cdf or quantile of solve times. - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - if plot_type in {"cdf_solvability", "diff_cdf_solvability"}: - sub_curve = cdf_of_curves_crossing_times(curves=experiment.progress_curves, threshold=solve_tol) - if plot_type in {"quantile_solvability", "diff_quantile_solvability"}: - sub_curve = quantile_cross_jump(curves=experiment.progress_curves, threshold=solve_tol, beta=beta) - solver_sub_curves.append(sub_curve) - # Plot solvability profile for the solver. - # Exploit the fact that each solvability profile is an average of more basic curves. - solver_curve = mean_of_curves(solver_sub_curves) - solver_curves.append(solver_curve) - if plot_type in {"cdf_solvability", "quantile_solvability"}: - # Set up plot. - if plot_type == "cdf_solvability": - file_list.append(setup_plot(plot_type=plot_type, - solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - solve_tol=solve_tol - )) - elif plot_type == "quantile_solvability": - file_list.append(setup_plot(plot_type=plot_type, - solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - beta=beta, - solve_tol=solve_tol - )) - handle = solver_curve.plot() - if plot_CIs or print_max_hw: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type=plot_type, - solve_tol=solve_tol, - beta=beta, - estimator=solver_curve, - normalize=True - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) - if print_max_hw: - report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True, conf_level=conf_level) - if plot_type == "cdf_solvability": - file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=solve_tol - )) - elif plot_type == "quantile_solvability": - file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=[solve_tol, beta] - )) - if plot_type in {"diff_cdf_solvability", "diff_quantile_solvability"}: - non_ref_solvers = [solver_name for solver_name in solver_names if solver_name != ref_solver] - ref_solver_idx = solver_names.index(ref_solver) - for solver_idx in range(n_solvers): - if solver_idx is not ref_solver_idx: - if plot_type == "diff_cdf_solvability": - file_list.append(setup_plot(plot_type=plot_type, - solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - solve_tol=solve_tol - )) - elif plot_type == "diff_quantile_solvability": - file_list.append(setup_plot(plot_type=plot_type, - solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - beta=beta, - solve_tol=solve_tol - )) - diff_solver_curve = difference_of_curves(solver_curves[solver_idx], solver_curves[ref_solver_idx]) - handle = diff_solver_curve.plot() - if plot_CIs or print_max_hw: - # Note: "experiments" needs to be a list of list of ProblemSolver objects. - bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx], experiments[ref_solver_idx]], - n_bootstraps=n_bootstraps, - conf_level=conf_level, - plot_type=plot_type, - solve_tol=solve_tol, - beta=beta, - estimator=diff_solver_curve, - normalize=True - ) - if plot_CIs: - plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve) - if print_max_hw: - report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True, conf_level=conf_level, difference=True) - if plot_type == "diff_cdf_solvability": - file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=solve_tol - )) - elif plot_type == "diff_quantile_solvability": - file_list.append(save_plot(solver_name=experiments[solver_idx][0].solver.name, - problem_name="PROBLEM SET", - plot_type=plot_type, - normalize=True, - extra=[solve_tol, beta] - )) - return file_list - - -def plot_terminal_progress(experiments, plot_type="violin", normalize=True, all_in_one=True): - """Plot individual or aggregate terminal progress for one or more solvers - on a single problem. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - ProblemSolver pairs of different solvers on a common problem. - plot_type : str, default="violin" - String indicating which type of plot to produce: - - "box" : comparative box plots; - - "violin" : comparative violin plots. - normalize : bool, default=True - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - all_in_one : bool, default=True - True if curves are to be plotted together, otherwise False. - - Returns - ------- - file_list : list [str] - List compiling path names for plots produced. - """ - # Check if problems are the same with the same x0 and x*. - check_common_problem_and_reference(experiments) - file_list = [] - # Set up plot. - n_experiments = len(experiments) - if all_in_one: - ref_experiment = experiments[0] - setup_plot(plot_type=plot_type, - solver_name="SOLVER SET", - problem_name=ref_experiment.problem.name, - normalize=normalize, - budget=ref_experiment.problem.factors["budget"] - ) - # solver_curve_handles = [] - if normalize: - terminal_data = [[experiment.progress_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] for experiment in experiments] - else: - terminal_data = [[experiment.objective_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] for experiment in experiments] - if plot_type == "box": - plt.boxplot(terminal_data) - plt.xticks(range(1, n_experiments + 1), labels=[experiment.solver.name for experiment in experiments]) - if plot_type == "violin": - solver_names = [experiments[exp_idx].solver.name for exp_idx in range(n_experiments) for td in terminal_data[exp_idx]] - terminal_values = [td for exp_idx in range(n_experiments) for td in terminal_data[exp_idx]] - terminal_data_dict = {"Solvers": solver_names, "Terminal": terminal_values} - terminal_data_df = pd.DataFrame(terminal_data_dict) - # sns.violinplot(x="Solvers", y="Terminal", data=terminal_data_df, inner="stick", scale="width", showmeans=True, bw = 0.2, cut=2) - sns.violinplot(x="Solvers", y="Terminal", data=terminal_data_df, inner="stick", scale="width", showmeans=True, cut=0.1) - if normalize: - plt.ylabel("Terminal Progress") - else: - plt.ylabel("Terminal Objective") - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name=ref_experiment.problem.name, - plot_type=plot_type, - normalize=normalize - )) - else: # Plot separately. - for experiment in experiments: - setup_plot(plot_type=plot_type, - solver_name=experiment.solver.name, - problem_name=experiment.problem.name, - normalize=normalize, - budget=experiment.problem.factors["budget"] - ) - if normalize: - terminal_data = [experiment.progress_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] - else: - terminal_data = [experiment.objective_curves[mrep].y_vals[-1] for mrep in range(experiment.n_macroreps)] - if plot_type == "box": - plt.boxplot(terminal_data) - plt.xticks([1], labels=[experiment.solver.name]) - if plot_type == "violin": - solver_name_rep = [experiment.solver.name for td in terminal_data] - terminal_data_dict = {"Solver": solver_name_rep, "Terminal": terminal_data} - terminal_data_df = pd.DataFrame(terminal_data_dict) - sns.violinplot(x="Solver", y="Terminal", data=terminal_data_df, inner="stick") - if normalize: - plt.ylabel("Terminal Progress") - else: - plt.ylabel("Terminal Objective") - file_list.append(save_plot(solver_name=experiment.solver.name, - problem_name=experiment.problem.name, - plot_type=plot_type, - normalize=normalize - )) - return file_list - - -def plot_terminal_scatterplots(experiments, all_in_one=True): - """Plot a scatter plot of mean and standard deviation of terminal progress. - Either one plot for each solver or one plot for all solvers. - - Parameters - ---------- - experiments : list [list [``experiment_base.Experiment``]] - ProblemSolver pairs used to produce plots. - all_in_one : bool, default=True - True if curves are to be plotted together, otherwise False. - - Returns - ------- - file_list : list [str] - List compiling path names for plots produced. - """ - file_list = [] - # Set up plot. - n_solvers = len(experiments) - n_problems = len(experiments[0]) - if all_in_one: - marker_list = ["o", "v", "s", "*", "P", "X", "D", "V", ">", "<"] - setup_plot(plot_type="terminal_scatter", - solver_name="SOLVER SET", - problem_name="PROBLEM SET" - ) - solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments] - solver_curve_handles = [] - for solver_idx in range(n_solvers): - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - color_str = "C" + str(solver_idx) - marker_str = marker_list[solver_idx % len(marker_list)] # Cycle through list of marker types. - # Plot mean and standard deviation of terminal progress. - terminals = [curve.y_vals[-1] for curve in experiment.progress_curves] - mean_estimator = np.mean(terminals) - std_dev_estimator = np.std(terminals, ddof=1) - handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color=color_str, marker=marker_str) - solver_curve_handles.append(handle) - plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper right") - file_list.append(save_plot(solver_name="SOLVER SET", - problem_name="PROBLEM SET", - plot_type="terminal_scatter", - normalize=True - )) - else: - for solver_idx in range(n_solvers): - ref_experiment = experiments[solver_idx][0] - setup_plot(plot_type="terminal_scatter", - solver_name=ref_experiment.solver.name, - problem_name="PROBLEM SET" - ) - for problem_idx in range(n_problems): - experiment = experiments[solver_idx][problem_idx] - # Plot mean and standard deviation of terminal progress. - terminals = [curve.y_vals[-1] for curve in experiment.progress_curves] - mean_estimator = np.mean(terminals) - std_dev_estimator = np.std(terminals, ddof=1) - handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color="C0", marker="o") - file_list.append(save_plot(solver_name=experiment.solver.name, - problem_name="PROBLEM SET", - plot_type="terminal_scatter", - normalize=True - )) - return file_list - - -def setup_plot(plot_type, solver_name="SOLVER SET", problem_name="PROBLEM SET", normalize=True, budget=None, beta=None, solve_tol=None): - """Create new figure. Add labels to plot and reformat axes. - - Parameters - ---------- - plot_type : str - String indicating which type of plot to produce: - "all" : all estimated progress curves; - - "mean" : estimated mean progress curve; - - "quantile" : estimated beta quantile progress curve; - - "solve_time_cdf" : cdf of solve time; - - "cdf_solvability" : cdf solvability profile; - - "quantile_solvability" : quantile solvability profile; - - "diff_cdf_solvability" : difference of cdf solvability profiles; - - "diff_quantile_solvability" : difference of quantile solvability profiles; - - "area" : area scatterplot; - - "box" : box plot of terminal progress; - - "violin" : violin plot of terminal progress; - - "terminal_scatter" : scatterplot of mean and std dev of terminal progress. - solver_name : str, default="SOLVER_SET" - Name of solver. - problem_name : str, default="PROBLEM_SET" - Name of problem. - normalize : bool, default=True - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - budget : int, optional - Budget of problem, measured in function evaluations. - beta : float, optional - Quantile to compute, e.g., beta quantile; in (0, 1). - solve_tol : float, optional - Relative optimality gap definining when a problem is solved; in (0, 1]. - """ - plt.figure() - # Set up axes and axis labels. - if normalize: - plt.ylabel("Fraction of Initial Optimality Gap", size=14) - if plot_type != "box" and plot_type != "violin": - plt.xlabel("Fraction of Budget", size=14) - plt.xlim((0, 1)) - plt.ylim((-0.1, 1.1)) - plt.tick_params(axis="both", which="major", labelsize=12) - else: - plt.ylabel("Objective Function Value", size=14) - if plot_type != "box" and plot_type != "violin": - plt.xlabel("Budget", size=14) - plt.xlim((0, budget)) - plt.tick_params(axis="both", which="major", labelsize=12) - # Specify title (plus alternative y-axis label and alternative axes). - if plot_type == "all": - if normalize: - title = f"{solver_name} on {problem_name}\nProgress Curves" - else: - title = f"{solver_name} on {problem_name}\nObjective Curves" - elif plot_type == "mean": - if normalize: - title = f"{solver_name} on {problem_name}\nMean Progress Curve" - else: - title = f"{solver_name} on {problem_name}\nMean Objective Curve" - elif plot_type == "quantile": - if normalize: - title = f"{solver_name} on {problem_name}\n{round(beta, 2)}-Quantile Progress Curve" - else: - title = f"{solver_name} on {problem_name}\n{round(beta, 2)}-Quantile Objective Curve" - elif plot_type == "solve_time_cdf": - plt.ylabel("Fraction of Macroreplications Solved", size=14) - title = f"{solver_name} on {problem_name}\nCDF of {round(solve_tol, 2)}-Solve Times" - elif plot_type == "cdf_solvability": - plt.ylabel("Problem Averaged Solve Fraction", size=14) - title = f"CDF-Solvability Profile for {solver_name}\nProfile of CDFs of {round(solve_tol, 2)}-Solve Times" - elif plot_type == "quantile_solvability": - plt.ylabel("Fraction of Problems Solved", size=14) - title = f"Quantile Solvability Profile for {solver_name}\nProfile of {round(beta, 2)}-Quantiles of {round(solve_tol, 2)}-Solve Times" - elif plot_type == "diff_cdf_solvability": - plt.ylabel("Difference in Problem Averaged Solve Fraction", size=14) - title = f"Difference of CDF-Solvability Profile for {solver_name}\nDifference of Profiles of CDFs of {round(solve_tol, 2)}-Solve Times" - plt.plot([0, 1], [0, 0], color="black", linestyle="--") - plt.ylim((-1, 1)) - elif plot_type == "diff_quantile_solvability": - plt.ylabel("Difference in Fraction of Problems Solved", size=14) - title = f"Difference of Quantile Solvability Profile for {solver_name}\nDifference of Profiles of {round(beta, 2)}-Quantiles of {round(solve_tol, 2)}-Solve Times" - plt.plot([0, 1], [0, 0], color="black", linestyle="--") - plt.ylim((-1, 1)) - elif plot_type == "area": - plt.xlabel("Mean Area", size=14) - plt.ylabel("Std Dev of Area") - # plt.xlim((0, 1)) - # plt.ylim((0, 0.5)) - title = f"{solver_name}\nAreas Under Progress Curves" - elif plot_type == "box" or plot_type == "violin": - plt.xlabel("Solvers") - if normalize: - plt.ylabel("Terminal Progress") - title = f"{solver_name} on {problem_name}" - else: - plt.ylabel("Terminal Objective") - title = f"{solver_name} on {problem_name}" - elif plot_type == "terminal_scatter": - plt.xlabel("Mean Terminal Progress", size=14) - plt.ylabel("Std Dev of Terminal Progress") - # plt.xlim((0, 1)) - # plt.ylim((0, 0.5)) - title = f"{solver_name}\nTerminal Progress" - plt.title(title, size=14) - - -def save_plot(solver_name, problem_name, plot_type, normalize, extra=None): - """Create new figure. Add labels to plot and reformat axes. - - Parameters - ---------- - solver_name : str - Name of solver. - problem_name : str - Name of problem. - plot_type : str - String indicating which type of plot to produce: - "all" : all estimated progress curves; - - "mean" : estimated mean progress curve; - - "quantile" : estimated beta quantile progress curve; - - "solve_time_cdf" : cdf of solve time; - - "cdf_solvability" : cdf solvability profile; - - "quantile_solvability" : quantile solvability profile; - - "diff_cdf_solvability" : difference of cdf solvability profiles; - - "diff_quantile_solvability" : difference of quantile solvability profiles; - - "area" : area scatterplot; - - "terminal_scatter" : scatterplot of mean and std dev of terminal progress. - normalize : bool - True if progress curves are to be normalized w.r.t. optimality gaps, - otherwise False. - extra : float or list [float], optional - Extra number(s) specifying quantile (e.g., beta) and/or solve tolerance. - - Returns - ------- - path_name : str - Path name pointing to location where plot will be saved. - """ - # Form string name for plot filename. - if plot_type == "all": - plot_name = "all_prog_curves" - elif plot_type == "mean": - plot_name = "mean_prog_curve" - elif plot_type == "quantile": - plot_name = f"{extra}_quantile_prog_curve" - elif plot_type == "solve_time_cdf": - plot_name = f"cdf_{extra}_solve_times" - elif plot_type == "cdf_solvability": - plot_name = f"profile_cdf_{extra}_solve_times" - elif plot_type == "quantile_solvability": - plot_name = f"profile_{extra[1]}_quantile_{extra[0]}_solve_times" - elif plot_type == "diff_cdf_solvability": - plot_name = f"diff_profile_cdf_{extra}_solve_times" - elif plot_type == "diff_quantile_solvability": - plot_name = f"diff_profile_{extra[1]}_quantile_{extra[0]}_solve_times" - elif plot_type == "area": - plot_name = "area_scatterplot" - elif plot_type == "box": - plot_name = "terminal_box" - elif plot_type == "violin": - plot_name = "terminal_violin" - elif plot_type == "terminal_scatter": - plot_name = "terminal_scatter" - if not normalize: - plot_name = plot_name + "_unnorm" - path_name = f"experiments/plots/{solver_name}_on_{problem_name}_{plot_name}.png" - # Reformat path_name to be suitable as a string literal. - path_name = path_name.replace("\\", "") - path_name = path_name.replace("$", "") - path_name = path_name.replace(" ", "_") - # Create directories if they do no exist. - if not os.path.exists("./experiments/plots"): - os.makedirs("./experiments", exist_ok=True) - os.makedirs("./experiments/plots") - plt.savefig(path_name, bbox_inches="tight") - # Return path_name for use in GUI. - return path_name - - -class ProblemsSolvers(object): - """Base class for running one or more solver on one or more problem. - - Attributes - ---------- - solver_names : list [str] - List of solver names. - n_solvers : int - Number of solvers. - problem_names : list [str] - List of problem names. - n_problems : int - Number of problems. - solvers : list [``base.Solver``] - List of solvers. - problems : list [``base.Problem``] - List of problems. - all_solver_fixed_factors : dict [dict] - Fixed solver factors for each solver: - outer key is solver name; - inner key is factor name. - all_problem_fixed_factors : dict [dict] - Fixed problem factors for each problem: - outer key is problem name; - inner key is factor name. - all_model_fixed_factors : dict of dict - Fixed model factors for each problem: - outer key is problem name; - inner key is factor name. - experiments : list [list [``experiment_base.ProblemSolver``]] - All problem-solver pairs. - file_name_path : str - Path of .pickle file for saving ``experiment_base.ProblemsSolvers`` object. - - Parameters - ---------- - solver_names : list [str], optional - List of solver names. - problem_names : list [str], optional - List of problem names. - solver_renames : list [str], optional - User-specified names for solvers. - problem_renames : list [str], optional - User-specified names for problems. - fixed_factors_filename : str, optional - Name of .py file containing dictionaries of fixed factors - for solvers/problems/models. - solvers : list [``base.Solver``], optional - List of solvers. - problems : list [``base.Problem``], optional - List of problems. - experiments : list [list [``experiment_base.ProblemSolver``]], optional - All problem-solver pairs. - file_name_path : str - Path of .pickle file for saving ``experiment_base.ProblemsSolvers`` object. - """ - def __init__(self, solver_names=None, problem_names=None, solver_renames=None, problem_renames=None, fixed_factors_filename=None, solvers=None, problems=None, experiments=None, file_name_path=None): - """There are three ways to create a ProblemsSolvers object: - 1. Provide the names of the solvers and problems to look up in directory.py. - 2. Provide the lists of unique solver and problem objects to pair. - 3. Provide a list of list of ProblemSolver objects. - - Notes - ----- - TO DO: If loading some ProblemSolver objects from file, - check that their factors match those in the overall ProblemsSolvers. - """ - if experiments is not None: # Method #3 - self.experiments = experiments - self.solvers = [experiments[idx][0].solver for idx in range(len(experiments))] - self.problems = [experiment.problem for experiment in experiments[0]] - self.solver_names = [solver.name for solver in self.solvers] - self.problem_names = [problem.name for problem in self.problems] - self.n_solvers = len(self.solvers) - self.n_problems = len(self.problems) - elif solvers is not None and problems is not None: # Method #2 - self.experiments = [[ProblemSolver(solver=solver, problem=problem) for problem in problems] for solver in solvers] - self.solvers = solvers - self.problems = problems - self.solver_names = [solver.name for solver in self.solvers] - self.problem_names = [problem.name for problem in self.problems] - self.n_solvers = len(self.solvers) - self.n_problems = len(self.problems) - elif solvers is None and problems is not None: # Method by providing solver and problem names - self.experiments = [[ProblemSolver(solver_name=solver_name, problem=problem) for problem in problems] for solver_name in solver_names] - self.solvers = [solver_directory[solver_name](name=solver_name) for solver_name in solver_names] - self.solver_names = solver_names - self.problems = problems - self.problem_names = [problem.name for problem in self.problems] - self.n_solvers = len(self.solvers) - self.n_problems = len(self.problems) - else: # Method #1 - if solver_renames is None: - self.solver_names = solver_names - else: - self.solver_names = solver_renames - if problem_renames is None: - self.problem_names = problem_names - else: - self.problem_names = problem_renames - self.n_solvers = len(solver_names) - self.n_problems = len(problem_names) - # Read in fixed solver/problem/model factors from .py file in the experiments folder. - # File should contain three dictionaries of dictionaries called - # - all_solver_fixed_factors - # - all_problem_fixed_factors - # - all_model_fixed_factors - if fixed_factors_filename is None: - self.all_solver_fixed_factors = {solver_name: {} for solver_name in self.solver_names} - self.all_problem_fixed_factors = {problem_name: {} for problem_name in self.problem_names} - self.all_model_fixed_factors = {problem_name: {} for problem_name in self.problem_names} - else: - fixed_factors_filename = "experiments.inputs." + fixed_factors_filename - all_factors = importlib.import_module(fixed_factors_filename) - self.all_solver_fixed_factors = getattr(all_factors, "all_solver_fixed_factors") - self.all_problem_fixed_factors = getattr(all_factors, "all_problem_fixed_factors") - self.all_model_fixed_factors = getattr(all_factors, "all_model_fixed_factors") - # Create all problem-solver pairs (i.e., instances of ProblemSolver class) - self.experiments = [] - for solver_idx in range(self.n_solvers): - solver_experiments = [] - for problem_idx in range(self.n_problems): - try: - # If a file exists, read in ProblemSolver object. - with open(f"./experiments/outputs/{self.solver_names[solver_idx]}_on_{self.problem_names[problem_idx]}.pickle", "rb") as file: - next_experiment = pickle.load(file) - # TODO: Check if the solver/problem/model factors in the file match - # those for the ProblemsSolvers. - except Exception: - # If no file exists, create new ProblemSolver object. - print(f"No experiment file exists for {self.solver_names[solver_idx]} on {self.problem_names[problem_idx]}. Creating new experiment.") - next_experiment = ProblemSolver(solver_name=solver_names[solver_idx], - problem_name=problem_names[problem_idx], - solver_rename=self.solver_names[solver_idx], - problem_rename=self.problem_names[problem_idx], - solver_fixed_factors=self.all_solver_fixed_factors[self.solver_names[solver_idx]], - problem_fixed_factors=self.all_problem_fixed_factors[self.problem_names[problem_idx]], - model_fixed_factors=self.all_model_fixed_factors[self.problem_names[problem_idx]] - ) - solver_experiments.append(next_experiment) - self.experiments.append(solver_experiments) - self.solvers = [self.experiments[idx][0].solver for idx in range(len(self.experiments))] - self.problems = [experiment.problem for experiment in self.experiments[0]] - # Initialize file path. - if file_name_path is None: - solver_names_string = "_".join(self.solver_names) - problem_names_string = "_".join(self.problem_names) - self.file_name_path = f"./experiments/outputs/group_{solver_names_string}_on_{problem_names_string}.pickle" - else: - self.file_name_path = file_name_path - - def check_compatibility(self): - """Check whether all experiments' solvers and problems are compatible. - - Returns - ------- - error_str : str - Error message in the event any problem and solver are incompatible. - """ - error_str = "" - for solver_idx in range(self.n_solvers): - for problem_idx in range(self.n_problems): - new_error_str = self.experiments[solver_idx][problem_idx].check_compatibility() - if new_error_str != "": - error_str += f"For solver {self.solver_names[solver_idx]} and problem {self.problem_names[problem_idx]}... {new_error_str}" - return error_str - - def run(self, n_macroreps): - """Run `n_macroreps` of each solver on each problem. - - Parameters - ---------- - n_macroreps : int - Number of macroreplications of the solver to run on the problem. - """ - for solver_idx in range(self.n_solvers): - for problem_idx in range(self.n_problems): - experiment = self.experiments[solver_idx][problem_idx] - # If the problem-solver pair has not been run in this way before, - # run it now and save result to .pickle file. - if (getattr(experiment, "n_macroreps", None) != n_macroreps): - print(f"Running {n_macroreps} macro-replications of {experiment.solver.name} on {experiment.problem.name}.") - experiment.clear_run() - experiment.run(n_macroreps) - # Save ProblemsSolvers object to .pickle file. - self.record_group_experiment_results() - - def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macroreps=False): - """For each problem-solver pair, run postreplications at solutions - recommended by the solver on each macroreplication. - - Parameters - ---------- - n_postreps : int - Number of postreplications to take at each recommended solution. - crn_across_budget : bool, default=True - True if CRN used for post-replications at solutions recommended at different times, - otherwise False. - crn_across_macroreps : bool, default=False - True if CRN used for post-replications at solutions recommended on different - macroreplications, otherwise False. - """ - for solver_index in range(self.n_solvers): - for problem_index in range(self.n_problems): - experiment = self.experiments[solver_index][problem_index] - # If the problem-solver pair has not been post-replicated in this way before, - # post-process it now. - if (getattr(experiment, "n_postreps", None) != n_postreps - or getattr(experiment, "crn_across_budget", None) != crn_across_budget - or getattr(experiment, "crn_across_macroreps", None) != crn_across_macroreps): - print(f"Post-processing {experiment.solver.name} on {experiment.problem.name}.") - experiment.clear_postreplicate() - experiment.post_replicate(n_postreps, crn_across_budget, crn_across_macroreps) - # Save ProblemsSolvers object to .pickle file. - self.record_group_experiment_results() - - def post_normalize(self, n_postreps_init_opt, crn_across_init_opt=True): - """Construct objective curves and (normalized) progress curves - for all collections of experiments on all given problem. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on a common problem. - n_postreps_init_opt : int - Number of postreplications to take at initial x0 and optimal x*. - crn_across_init_opt : bool, default=True - True if CRN used for post-replications at solutions x0 and x*, - otherwise False. - """ - for problem_idx in range(self.n_problems): - experiments_same_problem = [self.experiments[solver_idx][problem_idx] for solver_idx in range(self.n_solvers)] - post_normalize(experiments=experiments_same_problem, - n_postreps_init_opt=n_postreps_init_opt, - crn_across_init_opt=crn_across_init_opt) - # Save ProblemsSolvers object to .pickle file. - self.record_group_experiment_results() - - def record_group_experiment_results(self): - """Save ``experiment_base.ProblemsSolvers`` object to .pickle file. - """ - with open(self.file_name_path, "wb") as file: - pickle.dump(self, file, pickle.HIGHEST_PROTOCOL) - - -def read_group_experiment_results(file_name_path): - """Read in ``experiment_base.ProblemsSolvers`` object from .pickle file. - - Parameters - ---------- - file_name_path : str - Path of .pickle file for reading ``experiment_base.ProblemsSolvers`` object. - - Returns - ------- - groupexperiment : ``experiment_base.ProblemsSolvers`` - Problem-solver group that has been run or has been post-processed. - """ - with open(file_name_path, "rb") as file: - groupexperiment = pickle.load(file) - return groupexperiment - - -def find_unique_solvers_problems(experiments): - """Identify the unique problems and solvers in a collection - of experiments. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - ProblemSolver pairs of different solvers on different problems. - - Returns - ------- - unique_solvers : list [``base.Solver``] - Unique solvers. - unique_problems : list [``base.Problem``] - Unique problems. - """ - # Set comprehensions do not work because Solver and Problem objects are not - # hashable. - unique_solvers = [] - unique_problems = [] - for experiment in experiments: - if experiment.solver not in unique_solvers: - unique_solvers.append(experiment.solver) - if experiment.problem not in unique_problems: - unique_problems.append(experiment.problem) - return unique_solvers, unique_problems - - -def find_missing_experiments(experiments): - """Identify problem-solver pairs that are not part of a list - of experiments. - - Parameters - ---------- - experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on different problems. - - Returns - ------- - unique_solvers : list [``base.Solver``] - List of solvers present in the list of experiments - unique_problems : list [``base.Problem``] - List of problems present in the list of experiments. - missing : list [tuple [``base.Solver``, ``base.Problem``]] - List of names of missing problem-solver pairs. - """ - pairs = [(experiment.solver, experiment.problem) for experiment in experiments] - unique_solvers, unique_problems = find_unique_solvers_problems(experiments) - missing = [] - for solver in unique_solvers: - for problem in unique_problems: - if (solver, problem) not in pairs: - missing.append((solver, problem)) - return unique_solvers, unique_problems, missing - - -def make_full_metaexperiment(existing_experiments, unique_solvers, unique_problems, missing_experiments): - """Create experiment objects for missing problem-solver pairs - and run them. - - Parameters - ---------- - existing_experiments : list [``experiment_base.ProblemSolver``] - Problem-solver pairs of different solvers on different problems. - unique_solvers : list [``base.Solver objects``] - List of solvers present in the list of experiments. - unique_problems : list [``base.Problem``] - List of problems present in the list of experiments. - missing_experiments : list [tuple [``base.Solver``, ``base.Problem``]] - List of missing problem-solver pairs. - - Returns - ------- - metaexperiment : ``experiment_base.ProblemsSolvers`` - New ProblemsSolvers object. - """ - # Ordering of solvers and problems in unique_solvers and unique_problems - # is used to construct experiments. - full_experiments = [[[] for _ in range(len(unique_problems))] for _ in range(len(unique_solvers))] - for experiment in existing_experiments: - solver_idx = unique_solvers.index(experiment.solver) - problem_idx = unique_problems.index(experiment.problem) - full_experiments[solver_idx][problem_idx] = experiment - for pair in missing_experiments: - solver_idx = unique_solvers.index(pair[0]) - problem_idx = unique_problems.index(pair[1]) - full_experiments[solver_idx][problem_idx] = ProblemSolver(solver=pair[0], problem=pair[1]) - metaexperiment = ProblemsSolvers(experiments=full_experiments) - return metaexperiment From 382561d40918dac6fcfb54e7c1f873bdca0f46e9 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:14:22 -0400 Subject: [PATCH 06/21] Add files via upload These base.py and experiment_base.py are needed if one wants to generate, run and solve random problem instances. --- simopt/base.py | 75 ++++++++++++++++++++++++++++--- simopt/experiment_base.py | 95 ++++++++------------------------------- 2 files changed, 88 insertions(+), 82 deletions(-) diff --git a/simopt/base.py b/simopt/base.py index d2cfa12a2..e2e8b47d0 100644 --- a/simopt/base.py +++ b/simopt/base.py @@ -3,11 +3,19 @@ Summary ------- Provide base classes for solvers, problems, and models. +This is the modified version to generate and run random model/random problem instance. """ import numpy as np from copy import deepcopy -from mrg32k3a.mrg32k3a import MRG32k3a +import sys +import os.path as o +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local + +from simopt.auto_diff_util import bi_dict, replicate_wrapper + +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) class Solver(object): @@ -378,7 +386,7 @@ def check_factor_datatype(self, factor_name): is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) return is_right_type - def attach_rngs(self, rng_list): + def attach_rngs(self, random_rng, copy=True): """Attach a list of random-number generators to the problem. Parameters @@ -387,7 +395,25 @@ def attach_rngs(self, rng_list): List of random-number generators used to generate a random initial solution or a random problem instance. """ - self.rng_list = rng_list + if copy: + self.random_rng = [deepcopy(rng) for rng in random_rng] + else: + self.random_rng = random_rng + + def rebase(self, n_reps): + """Rebase the progenitor rngs to start at a later subsubstream index. + + Parameters + ---------- + n_reps : int + Substream index to skip to. + """ + new_rngs = [] + for rng in self.random_rng: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps])) + self.random_rng = new_rngs def vector_to_factor_dict(self, vector): """ @@ -622,8 +648,6 @@ def simulate(self, solution, m=1): # to those of deterministic components of objectives. solution.objectives[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_objectives(responses), solution.det_objectives)] if self.gradient_available: - # print(self.response_dict_to_objectives_gradients(vector_gradients)) - # print(solution.det_objectives_gradients) solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives_gradients(vector_gradients), solution.det_objectives_gradients)] # solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives(vector_gradients), solution.det_objectives_gradients)] if self.n_stochastic_constraints > 0: @@ -755,6 +779,21 @@ def check_factor_datatype(self, factor_name): """ is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) return is_right_type + + def attach_rng(self, random_rng, copy=True): + """Attach a list of random-number generators to the problem. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of random-number generators used to generate a random initial solution + or a random problem instance. + """ + # self.random_rng = random_rng + if copy: + self.random_rng = [deepcopy(rng) for rng in random_rng] + else: + self.random_rng = random_rng def replicate(self, rng_list): """Simulate a single replication for the current model factors. @@ -772,6 +811,27 @@ def replicate(self, rng_list): Gradient estimate for each response. """ raise NotImplementedError + + +class Auto_Model(Model): + """ + Subclass of Model. + """ + def __init__(self, fixed_factors): + # set factors of the simulation model + # fill in missing factors with default values + super(Auto_Model, self).__init__(fixed_factors) + self.differentiable_factor_names = [] + for key in self.specifications: + if self.specifications[key]["datatype"] == float: + self.differentiable_factor_names.append(key) + self.bi_dict = bi_dict(self.response_names) + + def innner_replicate(self, rng_list): + raise NotImplementedError + + def replicate(self, rng_list, **kwargs): + return replicate_wrapper(self, rng_list, **kwargs) class Solution(object): @@ -826,7 +886,10 @@ class Solution(object): def __init__(self, x, problem): super().__init__() self.x = x - self.dim = len(x) + if isinstance(x, int) or isinstance(x, float): + self.dim = 1 + else: + self.dim = len(x) self.decision_factors = problem.vector_to_factor_dict(x) self.n_reps = 0 self.det_objectives, self.det_objectives_gradients = problem.deterministic_objectives_and_gradients(self.x) diff --git a/simopt/experiment_base.py b/simopt/experiment_base.py index bdabf48c1..a325ec3fb 100644 --- a/simopt/experiment_base.py +++ b/simopt/experiment_base.py @@ -4,6 +4,7 @@ ------- Provide base classes for problem-solver pairs and helper functions for reading/writing data and plotting. +This is the modified version to generate and solve random problem instances by solvers. """ import numpy as np @@ -15,8 +16,8 @@ import importlib import time import os -from mrg32k3a.mrg32k3a import MRG32k3a - +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local from .base import Solution from .directory import solver_directory, problem_directory @@ -464,7 +465,7 @@ def run(self, n_macroreps): for mrep in range(self.n_macroreps): print(f"Running macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.") # Create, initialize, and attach RNGs used for simulating solutions. - progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 3, ss, 0]) for ss in range(self.problem.model.n_rngs)] + progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 2, ss, 0]) for ss in range(self.problem.model.n_rngs)] self.solver.solution_progenitor_rngs = progenitor_rngs # print([rng.s_ss_sss_index for rng in progenitor_rngs]) # Run the solver on the problem. @@ -524,7 +525,6 @@ def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macrorep self.all_post_replicates = [[[] for _ in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)] # Simulate intermediate recommended solutions. for mrep in range(self.n_macroreps): - print(f"Postreplicating macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.") for budget_index in range(len(self.all_intermediate_budgets[mrep])): x = self.all_recommended_xs[mrep][budget_index] fresh_soln = Solution(x, self.problem) @@ -760,7 +760,7 @@ def log_experiment_results(self, print_solutions=True): new_path = self.file_name_path.replace("outputs", "logs") # Adjust file_path_name to correct folder. new_path2 = new_path.replace(".pickle", "") # Remove .pickle from .txt file name. - # Create directories if they do not exist. + # Create directories if they do no exist. if "./experiments/logs" in new_path2 and not os.path.exists("./experiments/logs"): os.makedirs("./experiments", exist_ok=True) os.makedirs("./experiments/logs") @@ -799,10 +799,7 @@ def log_experiment_results(self, print_solutions=True): # and how many replications were taken of them (n_postreps_init_opt). if self.check_postnormalize(): file.write(f"The initial solution is {tuple([round(x, 4) for x in self.x0])}. Its estimated objective is {round(np.mean(self.x0_postreps), 4)}.\n") - if self.xstar is None: - file.write(f"No proxy optimal solution was used. A proxy optimal objective function value of {self.problem.optimal_value[0]} was provided.\n") - else: - file.write(f"The proxy optimal solution is {tuple([round(x, 4) for x in self.xstar])}. Its estimated objective is {round(np.mean(self.xstar_postreps), 4)}.\n") + file.write(f"The proxy optimal solution is {tuple([round(x, 4) for x in self.xstar])}. Its estimated objective is {round(np.mean(self.xstar_postreps), 4)}.\n") file.write(f"{self.n_postreps_init_opt} postreplications were taken at x0 and x_star.\n\n") # Display recommended solution at each budget value for each macroreplication. file.write('Macroreplication Results:\n') @@ -899,7 +896,6 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p elif getattr(experiment, "n_postreps", None) != getattr(ref_experiment, "n_postreps", None): print("At least two experiments have different numbers of post-replications.") print("Estimation of optimal solution x* may be based on different numbers of post-replications.") - print(f"Postnormalizing on Problem {ref_experiment.problem.name}.") # Take post-replications at common x0. # Create, initialize, and attach RNGs for model. # Stream 0: reserved for post-replications. @@ -920,17 +916,11 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p # objective function value. If deterministic (proxy for) f(x*), # create duplicate post-replicates to facilitate later bootstrapping. # If proxy for f(x*) is specified... - print("Finding f(x*)...") if proxy_opt_val is not None: - if proxy_opt_x is None: - xstar = None - else: - xstar = proxy_opt_x # Assuming the provided x is optimal in this case. - print("\t...using provided proxy f(x*).") + xstar = None xstar_postreps = [proxy_opt_val] * n_postreps_init_opt # ...else if proxy for x* is specified... elif proxy_opt_x is not None: - print("\t...using provided proxy x*.") xstar = proxy_opt_x # Take post-replications at xstar. opt_soln = Solution(xstar, ref_experiment.problem) @@ -939,14 +929,10 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective # ...else if f(x*) is known... elif ref_experiment.problem.optimal_value is not None: - print("\t...using coded f(x*).") xstar = None - # NOTE: optimal_value is a tuple. - # Currently hard-coded for single objective case, i.e., optimal_value[0]. - xstar_postreps = [ref_experiment.problem.optimal_value[0]] * n_postreps_init_opt + xstar_postreps = [ref_experiment.problem.optimal_value] * n_postreps_init_opt # ...else if x* is known... elif ref_experiment.problem.optimal_solution is not None: - print("\t...using coded x*.") xstar = ref_experiment.problem.optimal_solution # Take post-replications at xstar. opt_soln = Solution(xstar, ref_experiment.problem) @@ -956,7 +942,6 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p # ...else determine x* empirically as estimated best solution # found by any solver on any macroreplication. else: - print("\t...using best postreplicated solution as proxy for x*.") # TO DO: Simplify this block of code. best_est_objectives = np.zeros(len(experiments)) for experiment_idx in range(len(experiments)): @@ -1079,7 +1064,7 @@ def bootstrap_procedure(experiments, n_bootstraps, conf_level, plot_type, beta=N "quantile_solvability" : quantile solvability profile; "diff_cdf_solvability" : difference of cdf solvability profiles; - + "diff_quantile_solvability" : difference of quantile solvability profiles. beta : float, optional Quantile to plot, e.g., beta quantile; in (0, 1). @@ -1155,7 +1140,7 @@ def functional_of_curves(bootstrap_curves, plot_type, beta=0.5, solve_tol=0.1): "solve_time_quantile" : beta quantile of solve time; "solve_time_cdf" : cdf of solve time; - + "cdf_solvability" : cdf solvability profile; "quantile_solvability" : quantile solvability profile; @@ -2036,7 +2021,7 @@ def plot_terminal_progress(experiments, plot_type="violin", normalize=True, all_ ProblemSolver pairs of different solvers on a common problem. plot_type : str, default="violin" String indicating which type of plot to produce: - + "box" : comparative box plots; "violin" : comparative violin plots. @@ -2469,6 +2454,14 @@ def __init__(self, solver_names=None, problem_names=None, solver_renames=None, p self.problem_names = [problem.name for problem in self.problems] self.n_solvers = len(self.solvers) self.n_problems = len(self.problems) + elif solvers is None and problems is not None: # Method by providing solver and problem names + self.experiments = [[ProblemSolver(solver_name=solver_name, problem=problem) for problem in problems] for solver_name in solver_names] + self.solvers = [solver_directory[solver_name](name=solver_name) for solver_name in solver_names] + self.solver_names = solver_names + self.problems = problems + self.problem_names = [problem.name for problem in self.problems] + self.n_solvers = len(self.solvers) + self.n_problems = len(self.problems) else: # Method #1 if solver_renames is None: self.solver_names = solver_names @@ -2622,56 +2615,6 @@ def record_group_experiment_results(self): with open(self.file_name_path, "wb") as file: pickle.dump(self, file, pickle.HIGHEST_PROTOCOL) - def log_group_experiment_results(self): - """Create readable .txt file describing the solvers and problems that make up the ProblemSolvers object. - """ - # Create a new text file in experiments/logs folder with correct name. - new_path = self.file_name_path.replace("outputs", "logs") # Adjust file_path_name to correct folder. - new_path = new_path.replace(".pickle", "") # Remove .pickle from .txt file name. - - # Create directories if they do no exist. - if "./experiments/logs" in new_path and not os.path.exists("./experiments/logs"): - os.makedirs("./experiments", exist_ok=True) - os.makedirs("./experiments/logs") - # Create text file. - with open(new_path + "_group_experiment_results.txt", "w") as file: - # Title text file with experiment information. - file.write(self.file_name_path) - file.write('\n') - # Write the name of each problem. - file.write("----------------------------------------------------------------------------------------------") - file.write("\nProblems:\n\n") - for i in range(self.n_problems): - file.write(f"{self.problem_names[i]}\n\t") - # Write model factors for each problem. - file.write("Model Factors:\n") - for key, value in self.problems[i].model.factors.items(): - # Excluding model factors corresponding to decision variables. - if key not in self.problems[i].model_decision_factors: - file.write(f"\t\t{key}: {value}\n") - # Write problem factors for each problem. - file.write("\n\tProblem Factors:\n") - for key, value in self.problems[i].factors.items(): - file.write(f"\t\t{key}: {value}\n") - file.write("\n") - file.write("----------------------------------------------------------------------------------------------") - # Write the name of each Solver. - file.write("\nSolvers:\n\n") - # Write solver factors for each solver. - for j in range(self.n_solvers): - file.write(f"{self.solver_names[j]}\n\t") - file.write("Solver Factors:\n") - for key, value in self.solvers[i].factors.items(): - file.write(f"\t\t{key}: {value}\n") - file.write("\n") - file.write("----------------------------------------------------------------------------------------------") - # Write the name of pickle files for each Problem-Solver pair. - file.write("\nThe .pickle files for the associated Problem-Solver pairs are:\n") - for p in self.problem_names: - for s in self.solver_names: - file.write(f"\t{s}_on_{p}.pickle\n") - file.close() - def read_group_experiment_results(file_name_path): """Read in ``experiment_base.ProblemsSolvers`` object from .pickle file. From d4d9032d59e6efcbb497af4c8bd80379b8ccbd7d Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:16:45 -0400 Subject: [PATCH 07/21] Delete demo_radom_model.py --- demo_radom_model.py | 68 --------------------------------------------- 1 file changed, 68 deletions(-) delete mode 100644 demo_radom_model.py diff --git a/demo_radom_model.py b/demo_radom_model.py deleted file mode 100644 index 2c955a31f..000000000 --- a/demo_radom_model.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -This script is intended to help with debugging a random model. -It imports a model, initializes a model object with given factors, -sets up pseudorandom number generators, and runs one or more replications. -""" - -""" -Instead of modifying the problem and model class, we modify the demo_model and demo_problems. -""" - -import sys -import os.path as o -sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) - -import numpy as np -# Import random number generator. -# from mrg32k3a.mrg32k3a import MRG32k3a -from rng.mrg32k3a import MRG32k3a - -# Import model. -from simopt.models.san_2 import SAN - -fixed_factors = {} -mymodel = SAN(fixed_factors = fixed_factors, random=True) - -# from models. import -# Replace with name of .py file containing model class. -# Replace with name of model class. - -# Fix factors of model. Specify a dictionary of factors. - -# fixed_factors = {} # Resort to all default values. -# Look at Model class definition to get names of factors. - -# Initialize an instance of the specified model class. - -# mymodel = (fixed_factors) -# Replace with name of model class. - -# Working example for MM1 model. -# ----------------------------------------------- -# from simopt.models.mm1queue import MM1Queue -# fixed_factors = {"lambda": 3.0, "mu": 8.0} -# mymodel = MM1Queue(fixed_factors) -# ----------------------------------------------- - -# The rest of this script requires no changes. - -# Check that all factors describe a simulatable model. -# Check fixed factors individually. - -for key, value in mymodel.factors.items(): - print(f"The factor {key} is set as {value}. Is this simulatable? {bool(mymodel.check_simulatable_factor(key))}.") -# Check all factors collectively. -print(f"Is the specified model simulatable? {bool(mymodel.check_simulatable_factors())}.") - -# Create a list of RNG objects for the simulation model to use when -# running replications. -rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(mymodel.n_rngs)] -rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4 + ss, 0]) for ss in range(mymodel.n_random)] - -mymodel.attach_rng(rng_list2) -responses, gradients = mymodel.replicate(rng_list) -print("\nFor a single replication:") -print("\nResponses:") -for key, value in responses.items(): - print(f"\t {key} is {value}.") - From 887a14b93686a8494d95b6f5d50ef8f4b286f9a7 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:16:58 -0400 Subject: [PATCH 08/21] Delete demo_random_problem.py --- demo_random_problem.py | 103 ----------------------------------------- 1 file changed, 103 deletions(-) delete mode 100644 demo_random_problem.py diff --git a/demo_random_problem.py b/demo_random_problem.py deleted file mode 100644 index 8dfdab754..000000000 --- a/demo_random_problem.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -This script is intended to help with debugging a random problem. -It imports a random problem, initializes a problem object with given factors, -sets up pseudorandom number generators, and runs multiple replications -at a given solution. -""" - -import sys -import os.path as o -sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) - -# Import random number generator. -# from mrg32k3a.mrg32k3a import MRG32k3a -from rng.mrg32k3a import MRG32k3a - -# Import the Solution class. -from simopt.base import Solution - -# Import problem. -# from models. import -# Replace with name of .py file containing problem class. -# Replace with name of problem class. - -# Fix factors of problem. Specify a dictionary of factors. - -# fixed_factors = {} # Resort to all default values. -# Look at Problem class definition to get names of factors. - -# Initialize an instance of the specified problem class. - -# myproblem = (fixed_factors=fixed_factors) -# Replace with name of problem class. - -# Initialize a solution x corresponding to the problem. - -# Look at the Problem class definition to identify the decision variables. -# x will be a tuple consisting of the decision variables. - -# The following line does not need to be changed. -# mysolution = Solution(x, myproblem) - -# ----------------------------------------------- - -from simopt.models.san_2 import SANLongestPath # Change this import command correspondingly - -def rebase(random_rng, n): - new_rngs = [] - for rng in random_rng: - stream_index = rng.s_ss_sss_index[0] - substream_index = rng.s_ss_sss_index[1] - subsubstream_index = rng.s_ss_sss_index[2] - new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index])) - random_rng = new_rngs - return random_rng - -n_inst = 5 # The number of random instances you want to generate - -model_fixed_factors = {"num_nodes": 9, "num_arcs": 14} # Change to empty {} if want to use the default value -myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True) # Change to the imported problem - -rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myproblem.model.n_rngs)] -random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)] -rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)] - -# Generate n_inst random problem instances -for i in range(n_inst): - random_rng = rebase(random_rng, 1) - rng_list2 = rebase(rng_list2, 1) - myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True, random_rng=rng_list2) # Change to the imported problem - myproblem.attach_rngs(random_rng) - x = (8,) * myproblem.dim # Change the initial value according to the dimension - mysolution = Solution(x, myproblem) - mysolution.attach_rngs(rng_list, copy=False) - - # Simulate a fixed number of replications (n_reps) at the solution x. - n_reps = 10 - - myproblem.simulate(mysolution, m=n_reps) - - # Print results to console. - print(mysolution.objectives_mean[0]) - print(type(mysolution)) - print(f"Ran {n_reps} replications of the {myproblem.name} problem at solution x = {x}.\n") - # print(f"The mean objective estimate was {round(mysolution.objectives_mean[0], 4)} with standard error {round(mysolution.objectives_stderr[0], 4)}.") - print("The individual observations of the objective were:") - for idx in range(n_reps): - print(f"\t {round(mysolution.objectives[idx][0], 4)}") - if myproblem.gradient_available: - print("\nThe individual observations of the gradients of the objective were:") - for idx in range(n_reps): - print(f"\t {[round(g, 4) for g in mysolution.objectives_gradients[idx][0]]}") - else: - print("\nThis problem has no known gradients.") - if myproblem.n_stochastic_constraints > 0: - print(f"\nThis problem has {myproblem.n_stochastic_constraints} stochastic constraints of the form E[LHS] <= 0.") - for stc_idx in range(myproblem.n_stochastic_constraints): - print(f"\tFor stochastic constraint #{stc_idx + 1}, the mean of the LHS was {round(mysolution.stoch_constraints_mean[stc_idx], 4)} with standard error {round(mysolution.stoch_constraints_stderr[stc_idx], 4)}.") - print("\tThe observations of the LHSs were:") - for idx in range(n_reps): - print(f"\t\t {round(mysolution.stoch_constraints[idx][stc_idx], 4)}") - else: - print("\nThis problem has no stochastic constraints.") - From f23e305a59df125c36b75519283db42331fbf00f Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:17:08 -0400 Subject: [PATCH 09/21] Delete demo_random_problem_solver.py --- demo_random_problem_solver.py | 100 ---------------------------------- 1 file changed, 100 deletions(-) delete mode 100644 demo_random_problem_solver.py diff --git a/demo_random_problem_solver.py b/demo_random_problem_solver.py deleted file mode 100644 index dc37c07f2..000000000 --- a/demo_random_problem_solver.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -This script is intended to help with debugging random problems and solvers. -It create a problem-solver pairing by importing problems and runs multiple -macroreplications of the solver on the problem. -""" - -import sys -import os.path as o -import numpy as np -import os -sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) - -# Import the ProblemSolver class and other useful functions -from simopt.experiment_base import ProblemSolver, read_experiment_results, post_normalize, plot_progress_curves, plot_solvability_cdfs -from rng.mrg32k3a import MRG32k3a -from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr - -# !! When testing a new solver/problem, first go to directory.py. -# See directory.py for more details. -# Specify the names of the solver to test. - -# ----------------------------------------------- -solver_name = "RNDSRCH" # Random search solver -# ----------------------------------------------- - - -def rebase(random_rng, n): - new_rngs = [] - for rng in random_rng: - stream_index = rng.s_ss_sss_index[0] - substream_index = rng.s_ss_sss_index[1] - subsubstream_index = rng.s_ss_sss_index[2] - new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index])) - random_rng = new_rngs - return random_rng - -def strtobool(t): - t = t.lower() - if t == "t": - return True - else: - return False - -n_inst = int(input('Please enter the number of instance you want to generate: ')) -rand = input('Please decide whether you want to generate random instances or determinent instances (T/F): ') -rand = strtobool(rand) - -model_fixed_factors = {} # Override model factors - -myproblem = SANLongestPathConstr(random=True, model_fixed_factors=model_fixed_factors) - -random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)] -rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)] - -# Generate 5 random problem instances -for i in range(n_inst): - random_rng = rebase(random_rng, 1) - rng_list2 = rebase(rng_list2, 1) - myproblem = SANLongestPathConstr(random=rand, random_rng=rng_list2, model_fixed_factors=model_fixed_factors) - myproblem.attach_rngs(random_rng) - problem_name = myproblem.model.name + str(i) - print('-------------------------------------------------------') - print(f"Testing solver {solver_name} on problem {problem_name}.") - - # Specify file path name for storing experiment outputs in .pickle file. - file_name_path = "experiments/outputs/" + solver_name + "_on_" + problem_name + ".pickle" - print(f"Results will be stored as {file_name_path}.") - - # Initialize an instance of the experiment class. - myexperiment = ProblemSolver(solver_name=solver_name, problem=myproblem) - - # Run a fixed number of macroreplications of the solver on the problem. - myexperiment.run(n_macroreps=100) - - # If the solver runs have already been performed, uncomment the - # following pair of lines (and uncommmen the myexperiment.run(...) - # line above) to read in results from a .pickle file. - # myexperiment = read_experiment_results(file_name_path) - - print("Post-processing results.") - # Run a fixed number of postreplications at all recommended solutions. - myexperiment.post_replicate(n_postreps=1) #200, 10 - # Find an optimal solution x* for normalization. - post_normalize([myexperiment], n_postreps_init_opt=1) #200, 5 - - # Log results. - myexperiment.log_experiment_results() - - print("Optimal solution: ",np.array(myexperiment.xstar)) - print("Optimal Value: ", myexperiment.all_est_objectives[0]) - - print("Plotting results.") - # Produce basic plots of the solver on the problem. - plot_progress_curves(experiments=[myexperiment], plot_type="all", normalize=False) - plot_progress_curves(experiments=[myexperiment], plot_type="mean", normalize=False) - plot_progress_curves(experiments=[myexperiment], plot_type="quantile", beta=0.90, normalize=False) - plot_solvability_cdfs(experiments=[myexperiment], solve_tol=0.1) - - # Plots will be saved in the folder experiments/plots. - print("Finished. Plots can be found in experiments/plots folder.") \ No newline at end of file From b78068df7a4a7edcb79fd8c5ed03ee0dcca29c85 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:17:29 -0400 Subject: [PATCH 10/21] Delete demo_user.py --- demo_user.py | 167 --------------------------------------------------- 1 file changed, 167 deletions(-) delete mode 100644 demo_user.py diff --git a/demo_user.py b/demo_user.py deleted file mode 100644 index 1872aa0a0..000000000 --- a/demo_user.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -This script is the user interface for generating multiple random problem instances and -solve them by specified solvers. -It create problem-solver groups and runs multiple -macroreplications of each problem-solver pair. To run the file, user need -to import the solver and probelm they want to build random instances at the beginning, -and also provide an input file, which include the information needed to -build random instances (the name of problem, number of random instances to -generate, and some overriding factors). -""" - -import sys -import os.path as o -import os -import re -sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) - -# Import the ProblemsSolvers class and other useful functions -from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles -from rng.mrg32k3a import MRG32k3a -from simopt.base import Solution -from simopt.models.smf import SMF_Max -from simopt.models.rmitd import RMITDMaxRevenue -from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr -from simopt.models.mm1queue import MM1MinMeanSojournTime - - -# !! When testing a new solver/problem, first import problems from the random code file, -# Then create a test_input.txt file in your computer. -# There you should add the import statement and an entry in the file -# You need to specify name of solvers and problems you want to test in the file by 'solver_name' -# And specify the problem related informations by problem = [...] -# All lines start with '#' will be counted as commend and will not be implemented -# See the following example for more details. - -# Ex: -# To create two random instance of SAN and three random instances of SMF: -# In the demo_user.py, modify: -# from simopt.models.smf import SMF_Max -# from simopt.models.san_2 import SANLongestPath -# In the input information file (test_input.txt), include the following lines: -# solver_names = ["RNDSRCH", "ASTRODF", "NELDMD"] -# problem1 = [SANLongestPath, 2, {'num_nodes':8, 'num_arcs':12}] -# problem2 = [SMF_Max, 3, {'num_nodes':7, 'num_arcs':16}] - -# Grab information from the input file -def get_info(path): - L = [] - with open(path) as file: - lines = [line.rstrip() for line in file] - for line in lines: - if not line.startswith("#") and line: - L.append(line) - lines = L - command_lines = [] - problem_sets = [] - for line in lines: - if 'import' in line: - command_lines.append(line) - elif 'solver_names' in line: - solver_names = line - else: - problem_sets.append(line) - - for i in command_lines: - exec(i) - - problems = [] - solver_names = eval(re.findall(r'\[.*?\]', solver_names)[0]) - for l in problem_sets: - o = re.findall(r'\[.*?\]', l)[0] - problems.append(eval(o)) - - problem_sets = [p[0] for p in problems] - L_num = [p[1] for p in problems] - L_para = [p[2] for p in problems] - - return solver_names, problem_sets, L_num, L_para - -# Read input file and process information -path = input('Please input the path of the input file: ') -if "'" in path: # If the input path already has quotation marks - path = path.replace("'", "") - -solver_names, problem_set, L_num, L_para = get_info(path) -rands = [True for i in range(len(problem_set))] - -# Check whether the input file is valid -if len(L_num) != len(problem_set) or len(L_para) != len(problem_set): - print('Invalid input. The input number of random instances does not match with the number of problems you want.') - print('Please check your input file') - -def rebase(random_rng, n): - new_rngs = [] - for rng in random_rng: - stream_index = rng.s_ss_sss_index[0] - substream_index = rng.s_ss_sss_index[1] - subsubstream_index = rng.s_ss_sss_index[2] - new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index])) - random_rng = new_rngs - return random_rng - -myproblems = problem_set - -# Check whether the problem is random -for i in range(len(problem_set)): - if L_num[i] == 0: - L_num[i] = 1 - rands[i] = False - else: - rands[i] = True - -problems = [] -problem_names = [] - -def generate_problem(i, myproblems, rands, problems, L_num, L_para): - print('For problem ', myproblems[i]().name, ':') - model_fixed_factors = L_para[i] - - name = myproblems[i] - myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i]) - random_rng = [MRG32k3a(s_ss_sss_index=[2, 4 + L_num[i], ss]) for ss in range(myproblem.n_rngs)] - rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)] - - if rands[i] == False: # Determinant case - problems.append(myproblem) - myproblem.name = str(myproblem.model.name) + str(0) - problem_names.append(myproblem.name) - print('') - - else: - for j in range(L_num[i]): - random_rng = rebase(random_rng, 1) # Advance the substream for different instances - rng_list2 = rebase(rng_list2, 1) - name = myproblems[i] - myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i], random_rng=rng_list2) - myproblem.attach_rngs(random_rng) - # myproblem.name = str(myproblem.model.name) + str(j) - myproblem.name = str(myproblem.name) + '-' + str(j) - problems.append(myproblem) - problem_names.append(myproblem.name) - print('') - - return problems, problem_names - -# Generate problems -for i in range(len(L_num)): - problems, problem_names = generate_problem(i, myproblems, rands, problems, L_num, L_para) - -# Initialize an instance of the experiment class. -mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems) - -# Run a fixed number of macroreplications of each solver on each problem. -mymetaexperiment.run(n_macroreps=3) - -print("Post-processing results.") -# Run a fixed number of postreplications at all recommended solutions. -mymetaexperiment.post_replicate(n_postreps=20) -# Find an optimal solution x* for normalization. -mymetaexperiment.post_normalize(n_postreps_init_opt=20) - -print("Plotting results.") -# Produce basic plots of the solvers on the problems. -plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="cdf_solvability") - -# Plots will be saved in the folder experiments/plots. -print("Finished. Plots can be found in experiments/plots folder.") \ No newline at end of file From 46d67f4bc3a22c6cad7c214ff98499a4daaa2d9e Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:19:00 -0400 Subject: [PATCH 11/21] Add files via upload demo files for random problem instances --- demo/demo_radom_model.py | 1 + demo/demo_random_problem.py | 1 + demo/demo_random_problem_solver.py | 1 + demo/demo_user.py | 1 + 4 files changed, 4 insertions(+) create mode 100644 demo/demo_radom_model.py create mode 100644 demo/demo_random_problem.py create mode 100644 demo/demo_random_problem_solver.py create mode 100644 demo/demo_user.py diff --git a/demo/demo_radom_model.py b/demo/demo_radom_model.py new file mode 100644 index 000000000..fc55255e5 --- /dev/null +++ b/demo/demo_radom_model.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":2.965958,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_radom_model.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is intended to help with debugging a random model.","It imports a model, initializes a model object with given factors,","sets up pseudorandom number generators, and runs one or more replications.","\"\"\"","","\"\"\"","Instead of modifying the problem and model class, we modify the demo_model and demo_problems.","\"\"\"","","import sys","import os.path as o","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","import numpy as np","# Import random number generator.","# from mrg32k3a.mrg32k3a import MRG32k3a","from rng.mrg32k3a import MRG32k3a","","# Import model. ","from simopt.models.san_2 import SAN","","fixed_factors = {}","mymodel = SAN(fixed_factors = fixed_factors, random=True)","","# from models. import ","# Replace with name of .py file containing model class.","# Replace with name of model class.","","# Fix factors of model. Specify a dictionary of factors.","","# fixed_factors = {} # Resort to all default values.","# Look at Model class definition to get names of factors.","","# Initialize an instance of the specified model class.","","# mymodel = (fixed_factors)","# Replace with name of model class.","","# Working example for MM1 model.","# -----------------------------------------------","# from simopt.models.mm1queue import MM1Queue","# fixed_factors = {\"lambda\": 3.0, \"mu\": 8.0}","# mymodel = MM1Queue(fixed_factors)","# -----------------------------------------------","","# The rest of this script requires no changes.","","# Check that all factors describe a simulatable model.","# Check fixed factors individually.","","for key, value in mymodel.factors.items():"," print(f\"The factor {key} is set as {value}. Is this simulatable? {bool(mymodel.check_simulatable_factor(key))}.\")","# Check all factors collectively.","print(f\"Is the specified model simulatable? {bool(mymodel.check_simulatable_factors())}.\")","","# Create a list of RNG objects for the simulation model to use when","# running replications.","rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(mymodel.n_rngs)]","rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4 + ss, 0]) for ss in range(mymodel.n_random)]","","mymodel.attach_rng(rng_list2)","responses, gradients = mymodel.replicate(rng_list)","print(\"\\nFor a single replication:\")","print(\"\\nResponses:\")","for key, value in responses.items():"," print(f\"\\t {key} is {value}.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":62,"cssClass":"pl-s"}],[{"start":0,"end":66,"cssClass":"pl-s"}],[{"start":0,"end":74,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":93,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-k"},{"start":16,"end":18,"cssClass":"pl-s1"}],[{"start":0,"end":33,"cssClass":"pl-c"}],[{"start":0,"end":40,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[],[{"start":0,"end":19,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":35,"cssClass":"pl-v"}],[],[{"start":0,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"}],[{"start":0,"end":7,"cssClass":"pl-s1"},{"start":8,"end":9,"cssClass":"pl-c1"},{"start":10,"end":13,"cssClass":"pl-v"},{"start":14,"end":27,"cssClass":"pl-s1"},{"start":28,"end":29,"cssClass":"pl-c1"},{"start":30,"end":43,"cssClass":"pl-s1"},{"start":45,"end":51,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":52,"end":56,"cssClass":"pl-c1"}],[],[{"start":0,"end":50,"cssClass":"pl-c"}],[{"start":0,"end":66,"cssClass":"pl-c"}],[{"start":0,"end":54,"cssClass":"pl-c"}],[],[{"start":0,"end":56,"cssClass":"pl-c"}],[],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[],[{"start":0,"end":45,"cssClass":"pl-c"}],[{"start":0,"end":54,"cssClass":"pl-c"}],[],[{"start":0,"end":32,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":45,"cssClass":"pl-c"}],[{"start":0,"end":44,"cssClass":"pl-c"}],[{"start":0,"end":35,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[],[{"start":0,"end":46,"cssClass":"pl-c"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[{"start":0,"end":35,"cssClass":"pl-c"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":7,"cssClass":"pl-s1"},{"start":9,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-c1"},{"start":18,"end":25,"cssClass":"pl-s1"},{"start":26,"end":33,"cssClass":"pl-s1"},{"start":34,"end":39,"cssClass":"pl-en"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":116,"cssClass":"pl-s"},{"start":23,"end":28,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-kos"},{"start":24,"end":27,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-kos"},{"start":39,"end":46,"cssClass":"pl-s1"},{"start":39,"end":40,"cssClass":"pl-kos"},{"start":40,"end":45,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-kos"},{"start":69,"end":114,"cssClass":"pl-s1"},{"start":69,"end":70,"cssClass":"pl-kos"},{"start":70,"end":74,"cssClass":"pl-en"},{"start":75,"end":82,"cssClass":"pl-s1"},{"start":83,"end":107,"cssClass":"pl-en"},{"start":108,"end":111,"cssClass":"pl-s1"},{"start":113,"end":114,"cssClass":"pl-kos"}],[{"start":0,"end":33,"cssClass":"pl-c"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":89,"cssClass":"pl-s"},{"start":44,"end":87,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-kos"},{"start":45,"end":49,"cssClass":"pl-en"},{"start":50,"end":57,"cssClass":"pl-s1"},{"start":58,"end":83,"cssClass":"pl-en"},{"start":86,"end":87,"cssClass":"pl-kos"}],[],[{"start":0,"end":67,"cssClass":"pl-c"}],[{"start":0,"end":23,"cssClass":"pl-c"}],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":12,"end":20,"cssClass":"pl-v"},{"start":21,"end":35,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-c1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":40,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":51,"cssClass":"pl-k"},{"start":52,"end":54,"cssClass":"pl-s1"},{"start":55,"end":57,"cssClass":"pl-c1"},{"start":58,"end":63,"cssClass":"pl-en"},{"start":64,"end":71,"cssClass":"pl-s1"},{"start":72,"end":78,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":21,"cssClass":"pl-v"},{"start":22,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":39,"cssClass":"pl-c1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":45,"end":47,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-c1"},{"start":53,"end":56,"cssClass":"pl-k"},{"start":57,"end":59,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-en"},{"start":69,"end":76,"cssClass":"pl-s1"},{"start":77,"end":85,"cssClass":"pl-s1"}],[],[{"start":0,"end":7,"cssClass":"pl-s1"},{"start":8,"end":18,"cssClass":"pl-en"},{"start":19,"end":28,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":11,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":30,"cssClass":"pl-s1"},{"start":31,"end":40,"cssClass":"pl-en"},{"start":41,"end":49,"cssClass":"pl-s1"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":35,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-cce"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":20,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-cce"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":7,"cssClass":"pl-s1"},{"start":9,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-c1"},{"start":18,"end":27,"cssClass":"pl-s1"},{"start":28,"end":33,"cssClass":"pl-en"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":33,"cssClass":"pl-s"},{"start":12,"end":14,"cssClass":"pl-cce"},{"start":15,"end":20,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-kos"},{"start":16,"end":19,"cssClass":"pl-s1"},{"start":19,"end":20,"cssClass":"pl-kos"},{"start":24,"end":31,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-kos"},{"start":25,"end":30,"cssClass":"pl-s1"},{"start":30,"end":31,"cssClass":"pl-kos"}],[]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_radom_model.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_radom_model.py?raw=true","headerInfo":{"blobSize":"2.31 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_radom_model.py","gitLfsPath":null,"onBranch":true,"shortPath":"2c955a3","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_radom_model.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"68","truncatedSloc":"51"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_radom_model.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"fixed_factors","kind":"constant","identStart":619,"identEnd":632,"extentStart":619,"extentEnd":637,"fullyQualifiedName":"fixed_factors","identUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":18}}},{"name":"mymodel","kind":"constant","identStart":638,"identEnd":645,"extentStart":638,"extentEnd":695,"fullyQualifiedName":"mymodel","identUtf16":{"start":{"lineNumber":23,"utf16Col":0},"end":{"lineNumber":23,"utf16Col":7}},"extentUtf16":{"start":{"lineNumber":23,"utf16Col":0},"end":{"lineNumber":23,"utf16Col":57}}},{"name":"rng_list","kind":"constant","identStart":1980,"identEnd":1988,"extentStart":1980,"extentEnd":2060,"fullyQualifiedName":"rng_list","identUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":80}}},{"name":"rng_list2","kind":"constant","identStart":2061,"identEnd":2070,"extentStart":2061,"extentEnd":2148,"fullyQualifiedName":"rng_list2","identUtf16":{"start":{"lineNumber":59,"utf16Col":0},"end":{"lineNumber":59,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":59,"utf16Col":0},"end":{"lineNumber":59,"utf16Col":87}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"gVU58KclJBN61sDrSwOOUJiybVpLcV12s3LAhNPf6fQpWv1_SSZaFIlQyu69yIJl3x2kP1olFHzV3v_ewpZEyw"},"/repos/preferences":{"post":"3OBW80b3l3llVA6hLg_ye5J6A1VDX2zjpk7KenloWtpICcN373IG90UWVbebxESBrNN6w737tvradEkTjKGfXg"}}},"title":"simopt/demo_radom_model.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/demo/demo_random_problem.py b/demo/demo_random_problem.py new file mode 100644 index 000000000..aadc6f039 --- /dev/null +++ b/demo/demo_random_problem.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":5.38909,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_random_problem.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is intended to help with debugging a random problem.","It imports a random problem, initializes a problem object with given factors,","sets up pseudorandom number generators, and runs multiple replications","at a given solution.","\"\"\"","","import sys","import os.path as o","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import random number generator.","# from mrg32k3a.mrg32k3a import MRG32k3a","from rng.mrg32k3a import MRG32k3a","","# Import the Solution class.","from simopt.base import Solution","","# Import problem.","# from models. import ","# Replace with name of .py file containing problem class.","# Replace with name of problem class.","","# Fix factors of problem. Specify a dictionary of factors.","","# fixed_factors = {} # Resort to all default values.","# Look at Problem class definition to get names of factors.","","# Initialize an instance of the specified problem class.","","# myproblem = (fixed_factors=fixed_factors)","# Replace with name of problem class.","","# Initialize a solution x corresponding to the problem.","","# Look at the Problem class definition to identify the decision variables.","# x will be a tuple consisting of the decision variables.","","# The following line does not need to be changed.","# mysolution = Solution(x, myproblem)","","# -----------------------------------------------","","from simopt.models.san_2 import SANLongestPath # Change this import command correspondingly","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","n_inst = 5 # The number of random instances you want to generate","","model_fixed_factors = {\"num_nodes\": 9, \"num_arcs\": 14} # Change to empty {} if want to use the default value ","myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True) # Change to the imported problem","","rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myproblem.model.n_rngs)]","random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)]","rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]","","# Generate n_inst random problem instances","for i in range(n_inst):"," random_rng = rebase(random_rng, 1)"," rng_list2 = rebase(rng_list2, 1)"," myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True, random_rng=rng_list2) # Change to the imported problem"," myproblem.attach_rngs(random_rng)"," x = (8,) * myproblem.dim # Change the initial value according to the dimension"," mysolution = Solution(x, myproblem)"," mysolution.attach_rngs(rng_list, copy=False)"," "," # Simulate a fixed number of replications (n_reps) at the solution x."," n_reps = 10",""," myproblem.simulate(mysolution, m=n_reps)",""," # Print results to console."," print(mysolution.objectives_mean[0])"," print(type(mysolution))"," print(f\"Ran {n_reps} replications of the {myproblem.name} problem at solution x = {x}.\\n\")"," # print(f\"The mean objective estimate was {round(mysolution.objectives_mean[0], 4)} with standard error {round(mysolution.objectives_stderr[0], 4)}.\") "," print(\"The individual observations of the objective were:\")"," for idx in range(n_reps):"," print(f\"\\t {round(mysolution.objectives[idx][0], 4)}\")"," if myproblem.gradient_available:"," print(\"\\nThe individual observations of the gradients of the objective were:\")"," for idx in range(n_reps):"," print(f\"\\t {[round(g, 4) for g in mysolution.objectives_gradients[idx][0]]}\")"," else:"," print(\"\\nThis problem has no known gradients.\")"," if myproblem.n_stochastic_constraints > 0:"," print(f\"\\nThis problem has {myproblem.n_stochastic_constraints} stochastic constraints of the form E[LHS] <= 0.\")"," for stc_idx in range(myproblem.n_stochastic_constraints):"," print(f\"\\tFor stochastic constraint #{stc_idx + 1}, the mean of the LHS was {round(mysolution.stoch_constraints_mean[stc_idx], 4)} with standard error {round(mysolution.stoch_constraints_stderr[stc_idx], 4)}.\")"," print(\"\\tThe observations of the LHSs were:\")"," for idx in range(n_reps):"," print(f\"\\t\\t {round(mysolution.stoch_constraints[idx][stc_idx], 4)}\")"," else:"," print(\"\\nThis problem has no stochastic constraints.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":64,"cssClass":"pl-s"}],[{"start":0,"end":77,"cssClass":"pl-s"}],[{"start":0,"end":70,"cssClass":"pl-s"}],[{"start":0,"end":20,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":33,"cssClass":"pl-c"}],[{"start":0,"end":40,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[],[{"start":0,"end":28,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-k"},{"start":24,"end":32,"cssClass":"pl-v"}],[],[{"start":0,"end":17,"cssClass":"pl-c"}],[{"start":0,"end":52,"cssClass":"pl-c"}],[{"start":0,"end":68,"cssClass":"pl-c"}],[{"start":0,"end":58,"cssClass":"pl-c"}],[],[{"start":0,"end":58,"cssClass":"pl-c"}],[],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":59,"cssClass":"pl-c"}],[],[{"start":0,"end":56,"cssClass":"pl-c"}],[],[{"start":0,"end":63,"cssClass":"pl-c"}],[{"start":0,"end":58,"cssClass":"pl-c"}],[],[{"start":0,"end":55,"cssClass":"pl-c"}],[],[{"start":0,"end":74,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":37,"cssClass":"pl-c"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":92,"cssClass":"pl-c"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":6,"cssClass":"pl-s1"},{"start":7,"end":8,"cssClass":"pl-c1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":12,"end":65,"cssClass":"pl-c"}],[],[{"start":0,"end":19,"cssClass":"pl-s1"},{"start":20,"end":21,"cssClass":"pl-c1"},{"start":23,"end":34,"cssClass":"pl-s"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":39,"end":49,"cssClass":"pl-s"},{"start":51,"end":53,"cssClass":"pl-c1"},{"start":56,"end":110,"cssClass":"pl-c"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":26,"cssClass":"pl-v"},{"start":27,"end":46,"cssClass":"pl-s1"},{"start":46,"end":47,"cssClass":"pl-c1"},{"start":47,"end":66,"cssClass":"pl-s1"},{"start":68,"end":74,"cssClass":"pl-s1"},{"start":74,"end":75,"cssClass":"pl-c1"},{"start":75,"end":79,"cssClass":"pl-c1"},{"start":82,"end":114,"cssClass":"pl-c"}],[],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":12,"end":20,"cssClass":"pl-v"},{"start":21,"end":35,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-c1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":40,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":51,"cssClass":"pl-k"},{"start":52,"end":54,"cssClass":"pl-s1"},{"start":55,"end":57,"cssClass":"pl-c1"},{"start":58,"end":63,"cssClass":"pl-en"},{"start":64,"end":73,"cssClass":"pl-s1"},{"start":74,"end":79,"cssClass":"pl-s1"},{"start":80,"end":86,"cssClass":"pl-s1"}],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":22,"cssClass":"pl-v"},{"start":23,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":39,"end":40,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":47,"cssClass":"pl-s1"},{"start":50,"end":53,"cssClass":"pl-k"},{"start":54,"end":56,"cssClass":"pl-s1"},{"start":57,"end":59,"cssClass":"pl-c1"},{"start":60,"end":65,"cssClass":"pl-en"},{"start":66,"end":75,"cssClass":"pl-s1"},{"start":76,"end":81,"cssClass":"pl-s1"},{"start":82,"end":90,"cssClass":"pl-s1"},{"start":92,"end":101,"cssClass":"pl-s1"},{"start":102,"end":107,"cssClass":"pl-s1"},{"start":108,"end":116,"cssClass":"pl-s1"},{"start":117,"end":118,"cssClass":"pl-c1"},{"start":119,"end":128,"cssClass":"pl-s1"},{"start":129,"end":135,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":21,"cssClass":"pl-v"},{"start":22,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":39,"cssClass":"pl-c1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":44,"end":46,"cssClass":"pl-s1"},{"start":49,"end":52,"cssClass":"pl-k"},{"start":53,"end":55,"cssClass":"pl-s1"},{"start":56,"end":58,"cssClass":"pl-c1"},{"start":59,"end":64,"cssClass":"pl-en"},{"start":65,"end":74,"cssClass":"pl-s1"},{"start":75,"end":80,"cssClass":"pl-s1"},{"start":81,"end":89,"cssClass":"pl-s1"}],[],[{"start":0,"end":42,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":21,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":22,"cssClass":"pl-en"},{"start":23,"end":32,"cssClass":"pl-s1"},{"start":34,"end":35,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":30,"cssClass":"pl-v"},{"start":31,"end":50,"cssClass":"pl-s1"},{"start":50,"end":51,"cssClass":"pl-c1"},{"start":51,"end":70,"cssClass":"pl-s1"},{"start":72,"end":78,"cssClass":"pl-s1"},{"start":78,"end":79,"cssClass":"pl-c1"},{"start":79,"end":83,"cssClass":"pl-c1"},{"start":85,"end":95,"cssClass":"pl-s1"},{"start":95,"end":96,"cssClass":"pl-c1"},{"start":96,"end":105,"cssClass":"pl-s1"},{"start":108,"end":140,"cssClass":"pl-c"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-en"},{"start":26,"end":36,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":13,"end":14,"cssClass":"pl-c1"},{"start":15,"end":24,"cssClass":"pl-s1"},{"start":25,"end":28,"cssClass":"pl-s1"},{"start":30,"end":83,"cssClass":"pl-c"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-v"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":29,"end":38,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":26,"cssClass":"pl-en"},{"start":27,"end":35,"cssClass":"pl-s1"},{"start":37,"end":41,"cssClass":"pl-s1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":42,"end":47,"cssClass":"pl-c1"}],[],[{"start":4,"end":73,"cssClass":"pl-c"}],[{"start":4,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":13,"end":15,"cssClass":"pl-c1"}],[],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":22,"cssClass":"pl-en"},{"start":23,"end":33,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":37,"end":43,"cssClass":"pl-s1"}],[],[{"start":4,"end":31,"cssClass":"pl-c"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":20,"cssClass":"pl-s1"},{"start":21,"end":36,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":14,"cssClass":"pl-en"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":93,"cssClass":"pl-s"},{"start":16,"end":24,"cssClass":"pl-s1"},{"start":16,"end":17,"cssClass":"pl-kos"},{"start":17,"end":23,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-kos"},{"start":45,"end":61,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-kos"},{"start":46,"end":55,"cssClass":"pl-s1"},{"start":56,"end":60,"cssClass":"pl-s1"},{"start":60,"end":61,"cssClass":"pl-kos"},{"start":86,"end":89,"cssClass":"pl-s1"},{"start":86,"end":87,"cssClass":"pl-kos"},{"start":87,"end":88,"cssClass":"pl-s1"},{"start":88,"end":89,"cssClass":"pl-kos"},{"start":90,"end":92,"cssClass":"pl-cce"}],[{"start":4,"end":158,"cssClass":"pl-c"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":62,"cssClass":"pl-s"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":20,"cssClass":"pl-en"},{"start":21,"end":27,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":61,"cssClass":"pl-s"},{"start":16,"end":18,"cssClass":"pl-cce"},{"start":19,"end":60,"cssClass":"pl-s1"},{"start":19,"end":20,"cssClass":"pl-kos"},{"start":20,"end":25,"cssClass":"pl-en"},{"start":26,"end":36,"cssClass":"pl-s1"},{"start":37,"end":47,"cssClass":"pl-s1"},{"start":48,"end":51,"cssClass":"pl-s1"},{"start":53,"end":54,"cssClass":"pl-c1"},{"start":57,"end":58,"cssClass":"pl-c1"},{"start":59,"end":60,"cssClass":"pl-kos"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":16,"cssClass":"pl-s1"},{"start":17,"end":35,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":85,"cssClass":"pl-s"},{"start":15,"end":17,"cssClass":"pl-cce"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":15,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-en"},{"start":25,"end":31,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":88,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-cce"},{"start":23,"end":87,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-kos"},{"start":25,"end":30,"cssClass":"pl-en"},{"start":31,"end":32,"cssClass":"pl-s1"},{"start":34,"end":35,"cssClass":"pl-c1"},{"start":37,"end":40,"cssClass":"pl-k"},{"start":41,"end":42,"cssClass":"pl-s1"},{"start":43,"end":45,"cssClass":"pl-c1"},{"start":46,"end":56,"cssClass":"pl-s1"},{"start":57,"end":77,"cssClass":"pl-s1"},{"start":78,"end":81,"cssClass":"pl-s1"},{"start":83,"end":84,"cssClass":"pl-c1"},{"start":86,"end":87,"cssClass":"pl-kos"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":54,"cssClass":"pl-s"},{"start":15,"end":17,"cssClass":"pl-cce"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":16,"cssClass":"pl-s1"},{"start":17,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":44,"end":45,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":120,"cssClass":"pl-s"},{"start":16,"end":18,"cssClass":"pl-cce"},{"start":35,"end":71,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-kos"},{"start":36,"end":45,"cssClass":"pl-s1"},{"start":46,"end":70,"cssClass":"pl-s1"},{"start":70,"end":71,"cssClass":"pl-kos"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":63,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":221,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-cce"},{"start":49,"end":62,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-kos"},{"start":50,"end":57,"cssClass":"pl-s1"},{"start":58,"end":59,"cssClass":"pl-c1"},{"start":60,"end":61,"cssClass":"pl-c1"},{"start":61,"end":62,"cssClass":"pl-kos"},{"start":88,"end":142,"cssClass":"pl-s1"},{"start":88,"end":89,"cssClass":"pl-kos"},{"start":89,"end":94,"cssClass":"pl-en"},{"start":95,"end":105,"cssClass":"pl-s1"},{"start":106,"end":128,"cssClass":"pl-s1"},{"start":129,"end":136,"cssClass":"pl-s1"},{"start":139,"end":140,"cssClass":"pl-c1"},{"start":141,"end":142,"cssClass":"pl-kos"},{"start":163,"end":219,"cssClass":"pl-s1"},{"start":163,"end":164,"cssClass":"pl-kos"},{"start":164,"end":169,"cssClass":"pl-en"},{"start":170,"end":180,"cssClass":"pl-s1"},{"start":181,"end":205,"cssClass":"pl-s1"},{"start":206,"end":213,"cssClass":"pl-s1"},{"start":216,"end":217,"cssClass":"pl-c1"},{"start":218,"end":219,"cssClass":"pl-kos"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":56,"cssClass":"pl-s"},{"start":19,"end":21,"cssClass":"pl-cce"}],[{"start":12,"end":15,"cssClass":"pl-k"},{"start":16,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":35,"cssClass":"pl-s1"}],[{"start":16,"end":21,"cssClass":"pl-en"},{"start":22,"end":84,"cssClass":"pl-s"},{"start":24,"end":26,"cssClass":"pl-cce"},{"start":26,"end":28,"cssClass":"pl-cce"},{"start":29,"end":83,"cssClass":"pl-s1"},{"start":29,"end":30,"cssClass":"pl-kos"},{"start":30,"end":35,"cssClass":"pl-en"},{"start":36,"end":46,"cssClass":"pl-s1"},{"start":47,"end":64,"cssClass":"pl-s1"},{"start":65,"end":68,"cssClass":"pl-s1"},{"start":70,"end":77,"cssClass":"pl-s1"},{"start":80,"end":81,"cssClass":"pl-c1"},{"start":82,"end":83,"cssClass":"pl-kos"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":61,"cssClass":"pl-s"},{"start":15,"end":17,"cssClass":"pl-cce"}],[]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_random_problem.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem.py?raw=true","headerInfo":{"blobSize":"4.57 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_random_problem.py","gitLfsPath":null,"onBranch":true,"shortPath":"8dfdab7","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_random_problem.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"103","truncatedSloc":"81"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"rebase","kind":"function","identStart":1519,"identEnd":1525,"extentStart":1515,"extentEnd":1884,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":45,"utf16Col":4},"end":{"lineNumber":45,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":45,"utf16Col":0},"end":{"lineNumber":53,"utf16Col":21}}},{"name":"n_inst","kind":"constant","identStart":1886,"identEnd":1892,"extentStart":1886,"extentEnd":1896,"fullyQualifiedName":"n_inst","identUtf16":{"start":{"lineNumber":55,"utf16Col":0},"end":{"lineNumber":55,"utf16Col":6}},"extentUtf16":{"start":{"lineNumber":55,"utf16Col":0},"end":{"lineNumber":55,"utf16Col":10}}},{"name":"model_fixed_factors","kind":"constant","identStart":1953,"identEnd":1972,"extentStart":1953,"extentEnd":2007,"fullyQualifiedName":"model_fixed_factors","identUtf16":{"start":{"lineNumber":57,"utf16Col":0},"end":{"lineNumber":57,"utf16Col":19}},"extentUtf16":{"start":{"lineNumber":57,"utf16Col":0},"end":{"lineNumber":57,"utf16Col":54}}},{"name":"myproblem","kind":"constant","identStart":2064,"identEnd":2073,"extentStart":2064,"extentEnd":2144,"fullyQualifiedName":"myproblem","identUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":80}}},{"name":"rng_list","kind":"constant","identStart":2180,"identEnd":2188,"extentStart":2180,"extentEnd":2268,"fullyQualifiedName":"rng_list","identUtf16":{"start":{"lineNumber":60,"utf16Col":0},"end":{"lineNumber":60,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":60,"utf16Col":0},"end":{"lineNumber":60,"utf16Col":88}}},{"name":"random_rng","kind":"constant","identStart":2269,"identEnd":2279,"extentStart":2269,"extentEnd":2406,"fullyQualifiedName":"random_rng","identUtf16":{"start":{"lineNumber":61,"utf16Col":0},"end":{"lineNumber":61,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":61,"utf16Col":0},"end":{"lineNumber":61,"utf16Col":137}}},{"name":"rng_list2","kind":"constant","identStart":2407,"identEnd":2416,"extentStart":2407,"extentEnd":2498,"fullyQualifiedName":"rng_list2","identUtf16":{"start":{"lineNumber":62,"utf16Col":0},"end":{"lineNumber":62,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":62,"utf16Col":0},"end":{"lineNumber":62,"utf16Col":91}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"nLnJdnqZpSi40o4UgPmHr1FE3pTCl3uVWXeekMnECEA0tg35lJrbL0tUhBF2MouaFusX8dPDMp8_26HK2I2lfw"},"/repos/preferences":{"post":"3WtmyQcDpjPpPbCcgZqvqe0tVJyv8s-Ofs-U8MOk9PRJgvNNroY3vcl_64o0URlT04QtClFWFZcC9ReZNm0xcA"}}},"title":"simopt/demo_random_problem.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/demo/demo_random_problem_solver.py b/demo/demo_random_problem_solver.py new file mode 100644 index 000000000..4cdbd770d --- /dev/null +++ b/demo/demo_random_problem_solver.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":3.607858,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_random_problem_solver.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is intended to help with debugging random problems and solvers.","It create a problem-solver pairing by importing problems and runs multiple","macroreplications of the solver on the problem.","\"\"\"","","import sys","import os.path as o","import numpy as np","import os","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import the ProblemSolver class and other useful functions","from simopt.experiment_base import ProblemSolver, read_experiment_results, post_normalize, plot_progress_curves, plot_solvability_cdfs","from rng.mrg32k3a import MRG32k3a","from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr","","# !! When testing a new solver/problem, first go to directory.py.","# See directory.py for more details.","# Specify the names of the solver to test.","","# -----------------------------------------------","solver_name = \"RNDSRCH\" # Random search solver","# -----------------------------------------------","","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","def strtobool(t):"," t = t.lower()"," if t == \"t\":"," return True"," else:"," return False","","n_inst = int(input('Please enter the number of instance you want to generate: '))","rand = input('Please decide whether you want to generate random instances or determinent instances (T/F): ')","rand = strtobool(rand)","","model_fixed_factors = {} # Override model factors","","myproblem = SANLongestPathConstr(random=True, model_fixed_factors=model_fixed_factors)","","random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)]","rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]","","# Generate 5 random problem instances","for i in range(n_inst):"," random_rng = rebase(random_rng, 1)"," rng_list2 = rebase(rng_list2, 1)"," myproblem = SANLongestPathConstr(random=rand, random_rng=rng_list2, model_fixed_factors=model_fixed_factors)"," myproblem.attach_rngs(random_rng)"," problem_name = myproblem.model.name + str(i)"," print('-------------------------------------------------------')"," print(f\"Testing solver {solver_name} on problem {problem_name}.\")",""," # Specify file path name for storing experiment outputs in .pickle file."," file_name_path = \"experiments/outputs/\" + solver_name + \"_on_\" + problem_name + \".pickle\""," print(f\"Results will be stored as {file_name_path}.\")",""," # Initialize an instance of the experiment class."," myexperiment = ProblemSolver(solver_name=solver_name, problem=myproblem)",""," # Run a fixed number of macroreplications of the solver on the problem."," myexperiment.run(n_macroreps=100)",""," # If the solver runs have already been performed, uncomment the"," # following pair of lines (and uncommmen the myexperiment.run(...)"," # line above) to read in results from a .pickle file."," # myexperiment = read_experiment_results(file_name_path)",""," print(\"Post-processing results.\")"," # Run a fixed number of postreplications at all recommended solutions."," myexperiment.post_replicate(n_postreps=1) #200, 10"," # Find an optimal solution x* for normalization."," post_normalize([myexperiment], n_postreps_init_opt=1) #200, 5",""," # Log results."," myexperiment.log_experiment_results()",""," print(\"Optimal solution: \",np.array(myexperiment.xstar))"," print(\"Optimal Value: \", myexperiment.all_est_objectives[0])",""," print(\"Plotting results.\")"," # Produce basic plots of the solver on the problem."," plot_progress_curves(experiments=[myexperiment], plot_type=\"all\", normalize=False)"," plot_progress_curves(experiments=[myexperiment], plot_type=\"mean\", normalize=False)"," plot_progress_curves(experiments=[myexperiment], plot_type=\"quantile\", beta=0.90, normalize=False)"," plot_solvability_cdfs(experiments=[myexperiment], solve_tol=0.1)",""," # Plots will be saved in the folder experiments/plots."," print(\"Finished. Plots can be found in experiments/plots folder.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":75,"cssClass":"pl-s"}],[{"start":0,"end":74,"cssClass":"pl-s"}],[{"start":0,"end":47,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-k"},{"start":16,"end":18,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":59,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":48,"cssClass":"pl-v"},{"start":50,"end":73,"cssClass":"pl-s1"},{"start":75,"end":89,"cssClass":"pl-s1"},{"start":91,"end":111,"cssClass":"pl-s1"},{"start":113,"end":134,"cssClass":"pl-s1"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":68,"cssClass":"pl-v"}],[],[{"start":0,"end":65,"cssClass":"pl-c"}],[{"start":0,"end":36,"cssClass":"pl-c"}],[{"start":0,"end":42,"cssClass":"pl-c"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":11,"cssClass":"pl-s1"},{"start":12,"end":13,"cssClass":"pl-c1"},{"start":14,"end":23,"cssClass":"pl-s"},{"start":25,"end":47,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":13,"cssClass":"pl-en"},{"start":14,"end":15,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":15,"cssClass":"pl-en"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":8,"cssClass":"pl-s1"},{"start":9,"end":11,"cssClass":"pl-c1"},{"start":12,"end":15,"cssClass":"pl-s"}],[{"start":8,"end":14,"cssClass":"pl-k"},{"start":15,"end":19,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":14,"cssClass":"pl-k"},{"start":15,"end":20,"cssClass":"pl-c1"}],[],[{"start":0,"end":6,"cssClass":"pl-s1"},{"start":7,"end":8,"cssClass":"pl-c1"},{"start":9,"end":12,"cssClass":"pl-en"},{"start":13,"end":18,"cssClass":"pl-en"},{"start":19,"end":79,"cssClass":"pl-s"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":12,"cssClass":"pl-en"},{"start":13,"end":107,"cssClass":"pl-s"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":16,"cssClass":"pl-en"},{"start":17,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":19,"cssClass":"pl-s1"},{"start":20,"end":21,"cssClass":"pl-c1"},{"start":26,"end":50,"cssClass":"pl-c"}],[],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":32,"cssClass":"pl-v"},{"start":33,"end":39,"cssClass":"pl-s1"},{"start":39,"end":40,"cssClass":"pl-c1"},{"start":40,"end":44,"cssClass":"pl-c1"},{"start":46,"end":65,"cssClass":"pl-s1"},{"start":65,"end":66,"cssClass":"pl-c1"},{"start":66,"end":85,"cssClass":"pl-s1"}],[],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":22,"cssClass":"pl-v"},{"start":23,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":39,"end":40,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":47,"cssClass":"pl-s1"},{"start":50,"end":53,"cssClass":"pl-k"},{"start":54,"end":56,"cssClass":"pl-s1"},{"start":57,"end":59,"cssClass":"pl-c1"},{"start":60,"end":65,"cssClass":"pl-en"},{"start":66,"end":75,"cssClass":"pl-s1"},{"start":76,"end":81,"cssClass":"pl-s1"},{"start":82,"end":90,"cssClass":"pl-s1"},{"start":92,"end":101,"cssClass":"pl-s1"},{"start":102,"end":107,"cssClass":"pl-s1"},{"start":108,"end":116,"cssClass":"pl-s1"},{"start":117,"end":118,"cssClass":"pl-c1"},{"start":119,"end":128,"cssClass":"pl-s1"},{"start":129,"end":135,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":21,"cssClass":"pl-v"},{"start":22,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":39,"cssClass":"pl-c1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":44,"end":46,"cssClass":"pl-s1"},{"start":49,"end":52,"cssClass":"pl-k"},{"start":53,"end":55,"cssClass":"pl-s1"},{"start":56,"end":58,"cssClass":"pl-c1"},{"start":59,"end":64,"cssClass":"pl-en"},{"start":65,"end":74,"cssClass":"pl-s1"},{"start":75,"end":80,"cssClass":"pl-s1"},{"start":81,"end":89,"cssClass":"pl-s1"}],[],[{"start":0,"end":37,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":21,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":22,"cssClass":"pl-en"},{"start":23,"end":32,"cssClass":"pl-s1"},{"start":34,"end":35,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":36,"cssClass":"pl-v"},{"start":37,"end":43,"cssClass":"pl-s1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":44,"end":48,"cssClass":"pl-s1"},{"start":50,"end":60,"cssClass":"pl-s1"},{"start":60,"end":61,"cssClass":"pl-c1"},{"start":61,"end":70,"cssClass":"pl-s1"},{"start":72,"end":91,"cssClass":"pl-s1"},{"start":91,"end":92,"cssClass":"pl-c1"},{"start":92,"end":111,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-en"},{"start":26,"end":36,"cssClass":"pl-s1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":28,"cssClass":"pl-s1"},{"start":29,"end":34,"cssClass":"pl-s1"},{"start":35,"end":39,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":42,"end":45,"cssClass":"pl-en"},{"start":46,"end":47,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":67,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":68,"cssClass":"pl-s"},{"start":27,"end":40,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-kos"},{"start":28,"end":39,"cssClass":"pl-s1"},{"start":39,"end":40,"cssClass":"pl-kos"},{"start":52,"end":66,"cssClass":"pl-s1"},{"start":52,"end":53,"cssClass":"pl-kos"},{"start":53,"end":65,"cssClass":"pl-s1"},{"start":65,"end":66,"cssClass":"pl-kos"}],[],[{"start":4,"end":76,"cssClass":"pl-c"}],[{"start":4,"end":18,"cssClass":"pl-s1"},{"start":19,"end":20,"cssClass":"pl-c1"},{"start":21,"end":43,"cssClass":"pl-s"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":46,"end":57,"cssClass":"pl-s1"},{"start":58,"end":59,"cssClass":"pl-c1"},{"start":60,"end":66,"cssClass":"pl-s"},{"start":67,"end":68,"cssClass":"pl-c1"},{"start":69,"end":81,"cssClass":"pl-s1"},{"start":82,"end":83,"cssClass":"pl-c1"},{"start":84,"end":93,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":56,"cssClass":"pl-s"},{"start":38,"end":54,"cssClass":"pl-s1"},{"start":38,"end":39,"cssClass":"pl-kos"},{"start":39,"end":53,"cssClass":"pl-s1"},{"start":53,"end":54,"cssClass":"pl-kos"}],[],[{"start":4,"end":53,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":32,"cssClass":"pl-v"},{"start":33,"end":44,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":45,"end":56,"cssClass":"pl-s1"},{"start":58,"end":65,"cssClass":"pl-s1"},{"start":65,"end":66,"cssClass":"pl-c1"},{"start":66,"end":75,"cssClass":"pl-s1"}],[],[{"start":4,"end":75,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":33,"end":36,"cssClass":"pl-c1"}],[],[{"start":4,"end":67,"cssClass":"pl-c"}],[{"start":4,"end":70,"cssClass":"pl-c"}],[{"start":4,"end":57,"cssClass":"pl-c"}],[{"start":4,"end":60,"cssClass":"pl-c"}],[],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":36,"cssClass":"pl-s"}],[{"start":4,"end":74,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":46,"end":54,"cssClass":"pl-c"}],[{"start":4,"end":52,"cssClass":"pl-c"}],[{"start":4,"end":18,"cssClass":"pl-en"},{"start":20,"end":32,"cssClass":"pl-s1"},{"start":35,"end":54,"cssClass":"pl-s1"},{"start":54,"end":55,"cssClass":"pl-c1"},{"start":55,"end":56,"cssClass":"pl-c1"},{"start":58,"end":65,"cssClass":"pl-c"}],[],[{"start":4,"end":18,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":39,"cssClass":"pl-en"}],[],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":30,"cssClass":"pl-s"},{"start":31,"end":33,"cssClass":"pl-s1"},{"start":34,"end":39,"cssClass":"pl-en"},{"start":40,"end":52,"cssClass":"pl-s1"},{"start":53,"end":58,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":27,"cssClass":"pl-s"},{"start":29,"end":41,"cssClass":"pl-s1"},{"start":42,"end":60,"cssClass":"pl-s1"},{"start":61,"end":62,"cssClass":"pl-c1"}],[],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":29,"cssClass":"pl-s"}],[{"start":4,"end":55,"cssClass":"pl-c"}],[{"start":4,"end":24,"cssClass":"pl-en"},{"start":25,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":50,"cssClass":"pl-s1"},{"start":53,"end":62,"cssClass":"pl-s1"},{"start":62,"end":63,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-s"},{"start":70,"end":79,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":80,"end":85,"cssClass":"pl-c1"}],[{"start":4,"end":24,"cssClass":"pl-en"},{"start":25,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":50,"cssClass":"pl-s1"},{"start":53,"end":62,"cssClass":"pl-s1"},{"start":62,"end":63,"cssClass":"pl-c1"},{"start":63,"end":69,"cssClass":"pl-s"},{"start":71,"end":80,"cssClass":"pl-s1"},{"start":80,"end":81,"cssClass":"pl-c1"},{"start":81,"end":86,"cssClass":"pl-c1"}],[{"start":4,"end":24,"cssClass":"pl-en"},{"start":25,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":50,"cssClass":"pl-s1"},{"start":53,"end":62,"cssClass":"pl-s1"},{"start":62,"end":63,"cssClass":"pl-c1"},{"start":63,"end":73,"cssClass":"pl-s"},{"start":75,"end":79,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":80,"end":84,"cssClass":"pl-c1"},{"start":86,"end":95,"cssClass":"pl-s1"},{"start":95,"end":96,"cssClass":"pl-c1"},{"start":96,"end":101,"cssClass":"pl-c1"}],[{"start":4,"end":25,"cssClass":"pl-en"},{"start":26,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":39,"end":51,"cssClass":"pl-s1"},{"start":54,"end":63,"cssClass":"pl-s1"},{"start":63,"end":64,"cssClass":"pl-c1"},{"start":64,"end":67,"cssClass":"pl-c1"}],[],[{"start":4,"end":58,"cssClass":"pl-c"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":69,"cssClass":"pl-s"}]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_random_problem_solver.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem_solver.py?raw=true","headerInfo":{"blobSize":"4.17 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_random_problem_solver.py","gitLfsPath":null,"onBranch":true,"shortPath":"dc37c07","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_random_problem_solver.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"100","truncatedSloc":"79"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem_solver.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"solver_name","kind":"constant","identStart":849,"identEnd":860,"extentStart":849,"extentEnd":872,"fullyQualifiedName":"solver_name","identUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":11}},"extentUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":23}}},{"name":"rebase","kind":"function","identStart":953,"identEnd":959,"extentStart":949,"extentEnd":1318,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":26,"utf16Col":4},"end":{"lineNumber":26,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":26,"utf16Col":0},"end":{"lineNumber":34,"utf16Col":21}}},{"name":"strtobool","kind":"function","identStart":1324,"identEnd":1333,"extentStart":1320,"extentEnd":1423,"fullyQualifiedName":"strtobool","identUtf16":{"start":{"lineNumber":36,"utf16Col":4},"end":{"lineNumber":36,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":36,"utf16Col":0},"end":{"lineNumber":41,"utf16Col":20}}},{"name":"n_inst","kind":"constant","identStart":1425,"identEnd":1431,"extentStart":1425,"extentEnd":1506,"fullyQualifiedName":"n_inst","identUtf16":{"start":{"lineNumber":43,"utf16Col":0},"end":{"lineNumber":43,"utf16Col":6}},"extentUtf16":{"start":{"lineNumber":43,"utf16Col":0},"end":{"lineNumber":43,"utf16Col":81}}},{"name":"rand","kind":"constant","identStart":1507,"identEnd":1511,"extentStart":1507,"extentEnd":1615,"fullyQualifiedName":"rand","identUtf16":{"start":{"lineNumber":44,"utf16Col":0},"end":{"lineNumber":44,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":44,"utf16Col":0},"end":{"lineNumber":44,"utf16Col":108}}},{"name":"rand","kind":"constant","identStart":1616,"identEnd":1620,"extentStart":1616,"extentEnd":1638,"fullyQualifiedName":"rand","identUtf16":{"start":{"lineNumber":45,"utf16Col":0},"end":{"lineNumber":45,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":45,"utf16Col":0},"end":{"lineNumber":45,"utf16Col":22}}},{"name":"model_fixed_factors","kind":"constant","identStart":1640,"identEnd":1659,"extentStart":1640,"extentEnd":1664,"fullyQualifiedName":"model_fixed_factors","identUtf16":{"start":{"lineNumber":47,"utf16Col":0},"end":{"lineNumber":47,"utf16Col":19}},"extentUtf16":{"start":{"lineNumber":47,"utf16Col":0},"end":{"lineNumber":47,"utf16Col":24}}},{"name":"myproblem","kind":"constant","identStart":1692,"identEnd":1701,"extentStart":1692,"extentEnd":1778,"fullyQualifiedName":"myproblem","identUtf16":{"start":{"lineNumber":49,"utf16Col":0},"end":{"lineNumber":49,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":49,"utf16Col":0},"end":{"lineNumber":49,"utf16Col":86}}},{"name":"random_rng","kind":"constant","identStart":1780,"identEnd":1790,"extentStart":1780,"extentEnd":1917,"fullyQualifiedName":"random_rng","identUtf16":{"start":{"lineNumber":51,"utf16Col":0},"end":{"lineNumber":51,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":51,"utf16Col":0},"end":{"lineNumber":51,"utf16Col":137}}},{"name":"rng_list2","kind":"constant","identStart":1918,"identEnd":1927,"extentStart":1918,"extentEnd":2009,"fullyQualifiedName":"rng_list2","identUtf16":{"start":{"lineNumber":52,"utf16Col":0},"end":{"lineNumber":52,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":52,"utf16Col":0},"end":{"lineNumber":52,"utf16Col":91}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"bo50Ta0pqezuFqdyhIuxc4A08ugUj2YisoOlcEleflvGgbDCQyrX6x2QrXdyQL1Gx5s7jQXbLyjUL5oqWBfTZA"},"/repos/preferences":{"post":"wcttIkjLR6fY-zNC2Fo7-UJslO5L-3dymWrHjxwpae5VIvim4U7WKfi5aFRtkY0DfMXteLVfrWvlUETm6eCsag"}}},"title":"simopt/demo_random_problem_solver.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/demo/demo_user.py b/demo/demo_user.py new file mode 100644 index 000000000..ee336c814 --- /dev/null +++ b/demo/demo_user.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":2.910284,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_user.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is the user interface for generating multiple random problem instances and","solve them by specified solvers.","It create problem-solver groups and runs multiple","macroreplications of each problem-solver pair. To run the file, user need","to import the solver and probelm they want to build random instances at the beginning,","and also provide an input file, which include the information needed to ","build random instances (the name of problem, number of random instances to ","generate, and some overriding factors).","\"\"\"","","import sys","import os.path as o","import os","import re","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import the ProblemsSolvers class and other useful functions","from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles","from rng.mrg32k3a import MRG32k3a","from simopt.base import Solution","from simopt.models.smf import SMF_Max","from simopt.models.rmitd import RMITDMaxRevenue","from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr","from simopt.models.mm1queue import MM1MinMeanSojournTime","","","# !! When testing a new solver/problem, first import problems from the random code file,","# Then create a test_input.txt file in your computer.","# There you should add the import statement and an entry in the file","# You need to specify name of solvers and problems you want to test in the file by 'solver_name'","# And specify the problem related informations by problem = [...]","# All lines start with '#' will be counted as commend and will not be implemented","# See the following example for more details.","","# Ex:","# To create two random instance of SAN and three random instances of SMF:","# In the demo_user.py, modify:","# from simopt.models.smf import SMF_Max","# from simopt.models.san_2 import SANLongestPath","# In the input information file (test_input.txt), include the following lines:","# solver_names = [\"RNDSRCH\", \"ASTRODF\", \"NELDMD\"]","# problem1 = [SANLongestPath, 2, {'num_nodes':8, 'num_arcs':12}]","# problem2 = [SMF_Max, 3, {'num_nodes':7, 'num_arcs':16}]","","# Grab information from the input file","def get_info(path):"," L = []"," with open(path) as file:"," lines = [line.rstrip() for line in file]"," for line in lines:"," if not line.startswith(\"#\") and line:"," L.append(line)"," lines = L"," command_lines = []"," problem_sets = []"," for line in lines:"," if 'import' in line:"," command_lines.append(line)"," elif 'solver_names' in line:"," solver_names = line"," else:"," problem_sets.append(line)",""," for i in command_lines:"," exec(i)"," "," problems = []"," solver_names = eval(re.findall(r'\\[.*?\\]', solver_names)[0])"," for l in problem_sets:"," o = re.findall(r'\\[.*?\\]', l)[0]"," problems.append(eval(o))"," "," problem_sets = [p[0] for p in problems]"," L_num = [p[1] for p in problems]"," L_para = [p[2] for p in problems]"," "," return solver_names, problem_sets, L_num, L_para","","# Read input file and process information","path = input('Please input the path of the input file: ')","if \"'\" in path: # If the input path already has quotation marks"," path = path.replace(\"'\", \"\")"," ","solver_names, problem_set, L_num, L_para = get_info(path)","rands = [True for i in range(len(problem_set))]","","# Check whether the input file is valid","if len(L_num) != len(problem_set) or len(L_para) != len(problem_set):"," print('Invalid input. The input number of random instances does not match with the number of problems you want.')"," print('Please check your input file')","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","myproblems = problem_set","","# Check whether the problem is random","for i in range(len(problem_set)):"," if L_num[i] == 0:"," L_num[i] = 1"," rands[i] = False"," else:"," rands[i] = True","","problems = []","problem_names = []","","def generate_problem(i, myproblems, rands, problems, L_num, L_para):"," print('For problem ', myproblems[i]().name, ':') "," model_fixed_factors = L_para[i]"," "," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i])"," random_rng = [MRG32k3a(s_ss_sss_index=[2, 4 + L_num[i], ss]) for ss in range(myproblem.n_rngs)]"," rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]"," "," if rands[i] == False: # Determinant case"," problems.append(myproblem)"," myproblem.name = str(myproblem.model.name) + str(0)"," problem_names.append(myproblem.name)"," print('')"," "," else:"," for j in range(L_num[i]):"," random_rng = rebase(random_rng, 1) # Advance the substream for different instances"," rng_list2 = rebase(rng_list2, 1)"," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i], random_rng=rng_list2)"," myproblem.attach_rngs(random_rng)"," # myproblem.name = str(myproblem.model.name) + str(j)"," myproblem.name = str(myproblem.name) + '-' + str(j)"," problems.append(myproblem)"," problem_names.append(myproblem.name)"," print('')"," "," return problems, problem_names"," ","# Generate problems","for i in range(len(L_num)):"," problems, problem_names = generate_problem(i, myproblems, rands, problems, L_num, L_para)","","# Initialize an instance of the experiment class.","mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems)","","# Run a fixed number of macroreplications of each solver on each problem.","mymetaexperiment.run(n_macroreps=3)","","print(\"Post-processing results.\")","# Run a fixed number of postreplications at all recommended solutions.","mymetaexperiment.post_replicate(n_postreps=20)","# Find an optimal solution x* for normalization.","mymetaexperiment.post_normalize(n_postreps_init_opt=20)","","print(\"Plotting results.\")","# Produce basic plots of the solvers on the problems.","plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type=\"cdf_solvability\")","","# Plots will be saved in the folder experiments/plots.","print(\"Finished. Plots can be found in experiments/plots folder.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":49,"cssClass":"pl-s"}],[{"start":0,"end":73,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":72,"cssClass":"pl-s"}],[{"start":0,"end":75,"cssClass":"pl-s"}],[{"start":0,"end":39,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":61,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":50,"cssClass":"pl-v"},{"start":52,"end":77,"cssClass":"pl-s1"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-k"},{"start":24,"end":32,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":22,"cssClass":"pl-s1"},{"start":23,"end":29,"cssClass":"pl-k"},{"start":30,"end":37,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":47,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":68,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":56,"cssClass":"pl-v"}],[],[],[{"start":0,"end":88,"cssClass":"pl-c"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":68,"cssClass":"pl-c"}],[{"start":0,"end":96,"cssClass":"pl-c"}],[{"start":0,"end":65,"cssClass":"pl-c"}],[{"start":0,"end":81,"cssClass":"pl-c"}],[{"start":0,"end":45,"cssClass":"pl-c"}],[],[{"start":0,"end":5,"cssClass":"pl-c"}],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":30,"cssClass":"pl-c"}],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":78,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":64,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":38,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":12,"cssClass":"pl-en"},{"start":13,"end":17,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-v"},{"start":6,"end":7,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"},{"start":9,"end":13,"cssClass":"pl-en"},{"start":14,"end":18,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-k"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":31,"end":34,"cssClass":"pl-k"},{"start":35,"end":39,"cssClass":"pl-s1"},{"start":40,"end":42,"cssClass":"pl-c1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":19,"cssClass":"pl-c1"},{"start":20,"end":25,"cssClass":"pl-s1"}],[{"start":12,"end":14,"cssClass":"pl-k"},{"start":15,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-en"},{"start":35,"end":38,"cssClass":"pl-s"},{"start":40,"end":43,"cssClass":"pl-c1"},{"start":44,"end":48,"cssClass":"pl-s1"}],[{"start":16,"end":17,"cssClass":"pl-v"},{"start":18,"end":24,"cssClass":"pl-en"},{"start":25,"end":29,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":13,"cssClass":"pl-v"}],[{"start":4,"end":17,"cssClass":"pl-s1"},{"start":18,"end":19,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-c1"},{"start":16,"end":21,"cssClass":"pl-s1"}],[{"start":8,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":37,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"},{"start":13,"end":27,"cssClass":"pl-s"},{"start":28,"end":30,"cssClass":"pl-c1"},{"start":31,"end":35,"cssClass":"pl-s1"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":26,"cssClass":"pl-c1"},{"start":27,"end":31,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":26,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-en"},{"start":13,"end":14,"cssClass":"pl-s1"}],[],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-en"},{"start":24,"end":26,"cssClass":"pl-s1"},{"start":27,"end":34,"cssClass":"pl-en"},{"start":35,"end":45,"cssClass":"pl-s"},{"start":47,"end":59,"cssClass":"pl-s1"},{"start":61,"end":62,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":14,"cssClass":"pl-s1"},{"start":15,"end":22,"cssClass":"pl-en"},{"start":23,"end":33,"cssClass":"pl-s"},{"start":35,"end":36,"cssClass":"pl-s1"},{"start":38,"end":39,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":30,"cssClass":"pl-s1"}],[],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":20,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-k"},{"start":29,"end":30,"cssClass":"pl-s1"},{"start":31,"end":33,"cssClass":"pl-c1"},{"start":34,"end":42,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-v"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":21,"cssClass":"pl-k"},{"start":22,"end":23,"cssClass":"pl-s1"},{"start":24,"end":26,"cssClass":"pl-c1"},{"start":27,"end":35,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-v"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":16,"end":17,"cssClass":"pl-c1"},{"start":19,"end":22,"cssClass":"pl-k"},{"start":23,"end":24,"cssClass":"pl-s1"},{"start":25,"end":27,"cssClass":"pl-c1"},{"start":28,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":23,"cssClass":"pl-s1"},{"start":25,"end":37,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-v"},{"start":46,"end":52,"cssClass":"pl-v"}],[],[{"start":0,"end":41,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":12,"cssClass":"pl-en"},{"start":13,"end":56,"cssClass":"pl-s"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-c1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":17,"end":64,"cssClass":"pl-c"}],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":15,"cssClass":"pl-s1"},{"start":16,"end":23,"cssClass":"pl-en"},{"start":24,"end":27,"cssClass":"pl-s"},{"start":29,"end":31,"cssClass":"pl-s"}],[],[{"start":0,"end":12,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-s1"},{"start":27,"end":32,"cssClass":"pl-v"},{"start":34,"end":40,"cssClass":"pl-v"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":51,"cssClass":"pl-en"},{"start":52,"end":56,"cssClass":"pl-s1"}],[{"start":0,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":9,"end":13,"cssClass":"pl-c1"},{"start":14,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":44,"cssClass":"pl-s1"}],[],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-en"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":34,"end":36,"cssClass":"pl-c1"},{"start":37,"end":40,"cssClass":"pl-en"},{"start":41,"end":47,"cssClass":"pl-v"},{"start":49,"end":51,"cssClass":"pl-c1"},{"start":52,"end":55,"cssClass":"pl-en"},{"start":56,"end":67,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":116,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":40,"cssClass":"pl-s"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":13,"end":24,"cssClass":"pl-s1"}],[],[{"start":0,"end":37,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":30,"cssClass":"pl-s1"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-v"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-c1"}],[],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"}],[{"start":0,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":20,"cssClass":"pl-en"},{"start":21,"end":22,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":41,"cssClass":"pl-s1"},{"start":43,"end":51,"cssClass":"pl-s1"},{"start":53,"end":58,"cssClass":"pl-v"},{"start":60,"end":66,"cssClass":"pl-v"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":24,"cssClass":"pl-s"},{"start":26,"end":36,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-s1"},{"start":42,"end":46,"cssClass":"pl-s1"},{"start":48,"end":51,"cssClass":"pl-s"}],[{"start":4,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":32,"cssClass":"pl-v"},{"start":33,"end":34,"cssClass":"pl-s1"}],[],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":20,"cssClass":"pl-en"},{"start":21,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":41,"end":60,"cssClass":"pl-s1"},{"start":62,"end":68,"cssClass":"pl-s1"},{"start":68,"end":69,"cssClass":"pl-c1"},{"start":69,"end":74,"cssClass":"pl-s1"},{"start":75,"end":76,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":26,"cssClass":"pl-v"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":46,"end":47,"cssClass":"pl-c1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":50,"end":55,"cssClass":"pl-v"},{"start":56,"end":57,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-s1"},{"start":65,"end":68,"cssClass":"pl-k"},{"start":69,"end":71,"cssClass":"pl-s1"},{"start":72,"end":74,"cssClass":"pl-c1"},{"start":75,"end":80,"cssClass":"pl-en"},{"start":81,"end":90,"cssClass":"pl-s1"},{"start":91,"end":97,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-v"},{"start":26,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":46,"cssClass":"pl-c1"},{"start":48,"end":50,"cssClass":"pl-s1"},{"start":53,"end":56,"cssClass":"pl-k"},{"start":57,"end":59,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-en"},{"start":69,"end":78,"cssClass":"pl-s1"},{"start":79,"end":84,"cssClass":"pl-s1"},{"start":85,"end":93,"cssClass":"pl-s1"}],[],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"},{"start":27,"end":45,"cssClass":"pl-c"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":33,"cssClass":"pl-s1"}],[{"start":8,"end":17,"cssClass":"pl-s1"},{"start":18,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-s1"},{"start":45,"end":49,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":53,"end":56,"cssClass":"pl-en"},{"start":57,"end":58,"cssClass":"pl-c1"}],[{"start":8,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":43,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":16,"cssClass":"pl-s"}],[],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":13,"cssClass":"pl-s1"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":22,"cssClass":"pl-en"},{"start":23,"end":28,"cssClass":"pl-v"},{"start":29,"end":30,"cssClass":"pl-s1"}],[{"start":12,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":95,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":30,"cssClass":"pl-en"},{"start":31,"end":40,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":29,"cssClass":"pl-s1"},{"start":30,"end":31,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":48,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":49,"end":68,"cssClass":"pl-s1"},{"start":70,"end":76,"cssClass":"pl-s1"},{"start":76,"end":77,"cssClass":"pl-c1"},{"start":77,"end":82,"cssClass":"pl-s1"},{"start":83,"end":84,"cssClass":"pl-s1"},{"start":87,"end":97,"cssClass":"pl-s1"},{"start":97,"end":98,"cssClass":"pl-c1"},{"start":98,"end":107,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":33,"cssClass":"pl-en"},{"start":34,"end":44,"cssClass":"pl-s1"}],[{"start":12,"end":65,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-c1"},{"start":51,"end":54,"cssClass":"pl-s"},{"start":55,"end":56,"cssClass":"pl-c1"},{"start":57,"end":60,"cssClass":"pl-en"},{"start":61,"end":62,"cssClass":"pl-s1"}],[{"start":12,"end":20,"cssClass":"pl-s1"},{"start":21,"end":27,"cssClass":"pl-en"},{"start":28,"end":37,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":20,"cssClass":"pl-s"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s1"},{"start":21,"end":34,"cssClass":"pl-s1"}],[],[{"start":0,"end":19,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":24,"cssClass":"pl-v"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":18,"end":31,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":34,"end":50,"cssClass":"pl-en"},{"start":51,"end":52,"cssClass":"pl-s1"},{"start":54,"end":64,"cssClass":"pl-s1"},{"start":66,"end":71,"cssClass":"pl-s1"},{"start":73,"end":81,"cssClass":"pl-s1"},{"start":83,"end":88,"cssClass":"pl-v"},{"start":90,"end":96,"cssClass":"pl-v"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":34,"cssClass":"pl-v"},{"start":35,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":48,"end":60,"cssClass":"pl-s1"},{"start":62,"end":70,"cssClass":"pl-s1"},{"start":71,"end":72,"cssClass":"pl-c1"},{"start":73,"end":81,"cssClass":"pl-s1"}],[],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":33,"end":34,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":70,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":43,"end":45,"cssClass":"pl-c1"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":51,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":52,"end":54,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":25,"cssClass":"pl-s"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":25,"cssClass":"pl-en"},{"start":26,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":38,"end":54,"cssClass":"pl-s1"},{"start":55,"end":66,"cssClass":"pl-s1"},{"start":68,"end":77,"cssClass":"pl-s1"},{"start":77,"end":78,"cssClass":"pl-c1"},{"start":78,"end":95,"cssClass":"pl-s"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":65,"cssClass":"pl-s"}]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_user.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_user.py?raw=true","headerInfo":{"blobSize":"6.31 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_user.py","gitLfsPath":null,"onBranch":true,"shortPath":"1872aa0","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_user.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"167","truncatedSloc":"139"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_user.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"get_info","kind":"function","identStart":2086,"identEnd":2094,"extentStart":2082,"extentEnd":2985,"fullyQualifiedName":"get_info","identUtf16":{"start":{"lineNumber":46,"utf16Col":4},"end":{"lineNumber":46,"utf16Col":12}},"extentUtf16":{"start":{"lineNumber":46,"utf16Col":0},"end":{"lineNumber":77,"utf16Col":52}}},{"name":"path","kind":"constant","identStart":3029,"identEnd":3033,"extentStart":3029,"extentEnd":3086,"fullyQualifiedName":"path","identUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":57}}},{"name":"rands","kind":"constant","identStart":3248,"identEnd":3253,"extentStart":3248,"extentEnd":3295,"fullyQualifiedName":"rands","identUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":5}},"extentUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":47}}},{"name":"rebase","kind":"function","identStart":3572,"identEnd":3578,"extentStart":3568,"extentEnd":3937,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":92,"utf16Col":4},"end":{"lineNumber":92,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":92,"utf16Col":0},"end":{"lineNumber":100,"utf16Col":21}}},{"name":"myproblems","kind":"constant","identStart":3939,"identEnd":3949,"extentStart":3939,"extentEnd":3963,"fullyQualifiedName":"myproblems","identUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":24}}},{"name":"problems","kind":"constant","identStart":4140,"identEnd":4148,"extentStart":4140,"extentEnd":4153,"fullyQualifiedName":"problems","identUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":13}}},{"name":"problem_names","kind":"constant","identStart":4154,"identEnd":4167,"extentStart":4154,"extentEnd":4172,"fullyQualifiedName":"problem_names","identUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":18}}},{"name":"generate_problem","kind":"function","identStart":4178,"identEnd":4194,"extentStart":4174,"extentEnd":5505,"fullyQualifiedName":"generate_problem","identUtf16":{"start":{"lineNumber":115,"utf16Col":4},"end":{"lineNumber":115,"utf16Col":20}},"extentUtf16":{"start":{"lineNumber":115,"utf16Col":0},"end":{"lineNumber":143,"utf16Col":34}}},{"name":"mymetaexperiment","kind":"constant","identStart":5707,"identEnd":5723,"extentStart":5707,"extentEnd":5789,"fullyQualifiedName":"mymetaexperiment","identUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":16}},"extentUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":82}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"gb0BMxr-fXyz6kun91_9a83dZUM5fg4_5o-QJh56D-QpssW89P0De0BsQaIBlPFeinKsJigqRzWAI698DzOi2w"},"/repos/preferences":{"post":"EIlhdtGF6ei2TTLwdZupeDLV6yHP0AEgzvpCLVPSf8WEYPTyeAB4ZpYPaebAUB-CDHyStzF02zmywMFEphu6QQ"}}},"title":"simopt/demo_user.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file From 13a3a8a52c0cfe409cfc70866b3e2d9d4b52f5f4 Mon Sep 17 00:00:00 2001 From: liulitong-Jessie <46491025+liulitong-Jessie@users.noreply.github.com> Date: Tue, 15 Aug 2023 18:19:35 -0400 Subject: [PATCH 12/21] Add files via upload --- simopt/demo_user.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 simopt/demo_user.py diff --git a/simopt/demo_user.py b/simopt/demo_user.py new file mode 100644 index 000000000..ee336c814 --- /dev/null +++ b/simopt/demo_user.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":2.910284,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_user.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is the user interface for generating multiple random problem instances and","solve them by specified solvers.","It create problem-solver groups and runs multiple","macroreplications of each problem-solver pair. To run the file, user need","to import the solver and probelm they want to build random instances at the beginning,","and also provide an input file, which include the information needed to ","build random instances (the name of problem, number of random instances to ","generate, and some overriding factors).","\"\"\"","","import sys","import os.path as o","import os","import re","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import the ProblemsSolvers class and other useful functions","from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles","from rng.mrg32k3a import MRG32k3a","from simopt.base import Solution","from simopt.models.smf import SMF_Max","from simopt.models.rmitd import RMITDMaxRevenue","from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr","from simopt.models.mm1queue import MM1MinMeanSojournTime","","","# !! When testing a new solver/problem, first import problems from the random code file,","# Then create a test_input.txt file in your computer.","# There you should add the import statement and an entry in the file","# You need to specify name of solvers and problems you want to test in the file by 'solver_name'","# And specify the problem related informations by problem = [...]","# All lines start with '#' will be counted as commend and will not be implemented","# See the following example for more details.","","# Ex:","# To create two random instance of SAN and three random instances of SMF:","# In the demo_user.py, modify:","# from simopt.models.smf import SMF_Max","# from simopt.models.san_2 import SANLongestPath","# In the input information file (test_input.txt), include the following lines:","# solver_names = [\"RNDSRCH\", \"ASTRODF\", \"NELDMD\"]","# problem1 = [SANLongestPath, 2, {'num_nodes':8, 'num_arcs':12}]","# problem2 = [SMF_Max, 3, {'num_nodes':7, 'num_arcs':16}]","","# Grab information from the input file","def get_info(path):"," L = []"," with open(path) as file:"," lines = [line.rstrip() for line in file]"," for line in lines:"," if not line.startswith(\"#\") and line:"," L.append(line)"," lines = L"," command_lines = []"," problem_sets = []"," for line in lines:"," if 'import' in line:"," command_lines.append(line)"," elif 'solver_names' in line:"," solver_names = line"," else:"," problem_sets.append(line)",""," for i in command_lines:"," exec(i)"," "," problems = []"," solver_names = eval(re.findall(r'\\[.*?\\]', solver_names)[0])"," for l in problem_sets:"," o = re.findall(r'\\[.*?\\]', l)[0]"," problems.append(eval(o))"," "," problem_sets = [p[0] for p in problems]"," L_num = [p[1] for p in problems]"," L_para = [p[2] for p in problems]"," "," return solver_names, problem_sets, L_num, L_para","","# Read input file and process information","path = input('Please input the path of the input file: ')","if \"'\" in path: # If the input path already has quotation marks"," path = path.replace(\"'\", \"\")"," ","solver_names, problem_set, L_num, L_para = get_info(path)","rands = [True for i in range(len(problem_set))]","","# Check whether the input file is valid","if len(L_num) != len(problem_set) or len(L_para) != len(problem_set):"," print('Invalid input. The input number of random instances does not match with the number of problems you want.')"," print('Please check your input file')","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","myproblems = problem_set","","# Check whether the problem is random","for i in range(len(problem_set)):"," if L_num[i] == 0:"," L_num[i] = 1"," rands[i] = False"," else:"," rands[i] = True","","problems = []","problem_names = []","","def generate_problem(i, myproblems, rands, problems, L_num, L_para):"," print('For problem ', myproblems[i]().name, ':') "," model_fixed_factors = L_para[i]"," "," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i])"," random_rng = [MRG32k3a(s_ss_sss_index=[2, 4 + L_num[i], ss]) for ss in range(myproblem.n_rngs)]"," rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]"," "," if rands[i] == False: # Determinant case"," problems.append(myproblem)"," myproblem.name = str(myproblem.model.name) + str(0)"," problem_names.append(myproblem.name)"," print('')"," "," else:"," for j in range(L_num[i]):"," random_rng = rebase(random_rng, 1) # Advance the substream for different instances"," rng_list2 = rebase(rng_list2, 1)"," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i], random_rng=rng_list2)"," myproblem.attach_rngs(random_rng)"," # myproblem.name = str(myproblem.model.name) + str(j)"," myproblem.name = str(myproblem.name) + '-' + str(j)"," problems.append(myproblem)"," problem_names.append(myproblem.name)"," print('')"," "," return problems, problem_names"," ","# Generate problems","for i in range(len(L_num)):"," problems, problem_names = generate_problem(i, myproblems, rands, problems, L_num, L_para)","","# Initialize an instance of the experiment class.","mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems)","","# Run a fixed number of macroreplications of each solver on each problem.","mymetaexperiment.run(n_macroreps=3)","","print(\"Post-processing results.\")","# Run a fixed number of postreplications at all recommended solutions.","mymetaexperiment.post_replicate(n_postreps=20)","# Find an optimal solution x* for normalization.","mymetaexperiment.post_normalize(n_postreps_init_opt=20)","","print(\"Plotting results.\")","# Produce basic plots of the solvers on the problems.","plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type=\"cdf_solvability\")","","# Plots will be saved in the folder experiments/plots.","print(\"Finished. Plots can be found in experiments/plots folder.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":49,"cssClass":"pl-s"}],[{"start":0,"end":73,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":72,"cssClass":"pl-s"}],[{"start":0,"end":75,"cssClass":"pl-s"}],[{"start":0,"end":39,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":61,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":50,"cssClass":"pl-v"},{"start":52,"end":77,"cssClass":"pl-s1"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-k"},{"start":24,"end":32,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":22,"cssClass":"pl-s1"},{"start":23,"end":29,"cssClass":"pl-k"},{"start":30,"end":37,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":47,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":68,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":56,"cssClass":"pl-v"}],[],[],[{"start":0,"end":88,"cssClass":"pl-c"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":68,"cssClass":"pl-c"}],[{"start":0,"end":96,"cssClass":"pl-c"}],[{"start":0,"end":65,"cssClass":"pl-c"}],[{"start":0,"end":81,"cssClass":"pl-c"}],[{"start":0,"end":45,"cssClass":"pl-c"}],[],[{"start":0,"end":5,"cssClass":"pl-c"}],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":30,"cssClass":"pl-c"}],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":78,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":64,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":38,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":12,"cssClass":"pl-en"},{"start":13,"end":17,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-v"},{"start":6,"end":7,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"},{"start":9,"end":13,"cssClass":"pl-en"},{"start":14,"end":18,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-k"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":31,"end":34,"cssClass":"pl-k"},{"start":35,"end":39,"cssClass":"pl-s1"},{"start":40,"end":42,"cssClass":"pl-c1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":19,"cssClass":"pl-c1"},{"start":20,"end":25,"cssClass":"pl-s1"}],[{"start":12,"end":14,"cssClass":"pl-k"},{"start":15,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-en"},{"start":35,"end":38,"cssClass":"pl-s"},{"start":40,"end":43,"cssClass":"pl-c1"},{"start":44,"end":48,"cssClass":"pl-s1"}],[{"start":16,"end":17,"cssClass":"pl-v"},{"start":18,"end":24,"cssClass":"pl-en"},{"start":25,"end":29,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":13,"cssClass":"pl-v"}],[{"start":4,"end":17,"cssClass":"pl-s1"},{"start":18,"end":19,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-c1"},{"start":16,"end":21,"cssClass":"pl-s1"}],[{"start":8,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":37,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"},{"start":13,"end":27,"cssClass":"pl-s"},{"start":28,"end":30,"cssClass":"pl-c1"},{"start":31,"end":35,"cssClass":"pl-s1"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":26,"cssClass":"pl-c1"},{"start":27,"end":31,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":26,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-en"},{"start":13,"end":14,"cssClass":"pl-s1"}],[],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-en"},{"start":24,"end":26,"cssClass":"pl-s1"},{"start":27,"end":34,"cssClass":"pl-en"},{"start":35,"end":45,"cssClass":"pl-s"},{"start":47,"end":59,"cssClass":"pl-s1"},{"start":61,"end":62,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":14,"cssClass":"pl-s1"},{"start":15,"end":22,"cssClass":"pl-en"},{"start":23,"end":33,"cssClass":"pl-s"},{"start":35,"end":36,"cssClass":"pl-s1"},{"start":38,"end":39,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":30,"cssClass":"pl-s1"}],[],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":20,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-k"},{"start":29,"end":30,"cssClass":"pl-s1"},{"start":31,"end":33,"cssClass":"pl-c1"},{"start":34,"end":42,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-v"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":21,"cssClass":"pl-k"},{"start":22,"end":23,"cssClass":"pl-s1"},{"start":24,"end":26,"cssClass":"pl-c1"},{"start":27,"end":35,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-v"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":16,"end":17,"cssClass":"pl-c1"},{"start":19,"end":22,"cssClass":"pl-k"},{"start":23,"end":24,"cssClass":"pl-s1"},{"start":25,"end":27,"cssClass":"pl-c1"},{"start":28,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":23,"cssClass":"pl-s1"},{"start":25,"end":37,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-v"},{"start":46,"end":52,"cssClass":"pl-v"}],[],[{"start":0,"end":41,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":12,"cssClass":"pl-en"},{"start":13,"end":56,"cssClass":"pl-s"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-c1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":17,"end":64,"cssClass":"pl-c"}],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":15,"cssClass":"pl-s1"},{"start":16,"end":23,"cssClass":"pl-en"},{"start":24,"end":27,"cssClass":"pl-s"},{"start":29,"end":31,"cssClass":"pl-s"}],[],[{"start":0,"end":12,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-s1"},{"start":27,"end":32,"cssClass":"pl-v"},{"start":34,"end":40,"cssClass":"pl-v"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":51,"cssClass":"pl-en"},{"start":52,"end":56,"cssClass":"pl-s1"}],[{"start":0,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":9,"end":13,"cssClass":"pl-c1"},{"start":14,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":44,"cssClass":"pl-s1"}],[],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-en"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":34,"end":36,"cssClass":"pl-c1"},{"start":37,"end":40,"cssClass":"pl-en"},{"start":41,"end":47,"cssClass":"pl-v"},{"start":49,"end":51,"cssClass":"pl-c1"},{"start":52,"end":55,"cssClass":"pl-en"},{"start":56,"end":67,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":116,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":40,"cssClass":"pl-s"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":13,"end":24,"cssClass":"pl-s1"}],[],[{"start":0,"end":37,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":30,"cssClass":"pl-s1"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-v"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-c1"}],[],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"}],[{"start":0,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":20,"cssClass":"pl-en"},{"start":21,"end":22,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":41,"cssClass":"pl-s1"},{"start":43,"end":51,"cssClass":"pl-s1"},{"start":53,"end":58,"cssClass":"pl-v"},{"start":60,"end":66,"cssClass":"pl-v"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":24,"cssClass":"pl-s"},{"start":26,"end":36,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-s1"},{"start":42,"end":46,"cssClass":"pl-s1"},{"start":48,"end":51,"cssClass":"pl-s"}],[{"start":4,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":32,"cssClass":"pl-v"},{"start":33,"end":34,"cssClass":"pl-s1"}],[],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":20,"cssClass":"pl-en"},{"start":21,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":41,"end":60,"cssClass":"pl-s1"},{"start":62,"end":68,"cssClass":"pl-s1"},{"start":68,"end":69,"cssClass":"pl-c1"},{"start":69,"end":74,"cssClass":"pl-s1"},{"start":75,"end":76,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":26,"cssClass":"pl-v"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":46,"end":47,"cssClass":"pl-c1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":50,"end":55,"cssClass":"pl-v"},{"start":56,"end":57,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-s1"},{"start":65,"end":68,"cssClass":"pl-k"},{"start":69,"end":71,"cssClass":"pl-s1"},{"start":72,"end":74,"cssClass":"pl-c1"},{"start":75,"end":80,"cssClass":"pl-en"},{"start":81,"end":90,"cssClass":"pl-s1"},{"start":91,"end":97,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-v"},{"start":26,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":46,"cssClass":"pl-c1"},{"start":48,"end":50,"cssClass":"pl-s1"},{"start":53,"end":56,"cssClass":"pl-k"},{"start":57,"end":59,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-en"},{"start":69,"end":78,"cssClass":"pl-s1"},{"start":79,"end":84,"cssClass":"pl-s1"},{"start":85,"end":93,"cssClass":"pl-s1"}],[],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"},{"start":27,"end":45,"cssClass":"pl-c"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":33,"cssClass":"pl-s1"}],[{"start":8,"end":17,"cssClass":"pl-s1"},{"start":18,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-s1"},{"start":45,"end":49,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":53,"end":56,"cssClass":"pl-en"},{"start":57,"end":58,"cssClass":"pl-c1"}],[{"start":8,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":43,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":16,"cssClass":"pl-s"}],[],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":13,"cssClass":"pl-s1"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":22,"cssClass":"pl-en"},{"start":23,"end":28,"cssClass":"pl-v"},{"start":29,"end":30,"cssClass":"pl-s1"}],[{"start":12,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":95,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":30,"cssClass":"pl-en"},{"start":31,"end":40,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":29,"cssClass":"pl-s1"},{"start":30,"end":31,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":48,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":49,"end":68,"cssClass":"pl-s1"},{"start":70,"end":76,"cssClass":"pl-s1"},{"start":76,"end":77,"cssClass":"pl-c1"},{"start":77,"end":82,"cssClass":"pl-s1"},{"start":83,"end":84,"cssClass":"pl-s1"},{"start":87,"end":97,"cssClass":"pl-s1"},{"start":97,"end":98,"cssClass":"pl-c1"},{"start":98,"end":107,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":33,"cssClass":"pl-en"},{"start":34,"end":44,"cssClass":"pl-s1"}],[{"start":12,"end":65,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-c1"},{"start":51,"end":54,"cssClass":"pl-s"},{"start":55,"end":56,"cssClass":"pl-c1"},{"start":57,"end":60,"cssClass":"pl-en"},{"start":61,"end":62,"cssClass":"pl-s1"}],[{"start":12,"end":20,"cssClass":"pl-s1"},{"start":21,"end":27,"cssClass":"pl-en"},{"start":28,"end":37,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":20,"cssClass":"pl-s"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s1"},{"start":21,"end":34,"cssClass":"pl-s1"}],[],[{"start":0,"end":19,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":24,"cssClass":"pl-v"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":18,"end":31,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":34,"end":50,"cssClass":"pl-en"},{"start":51,"end":52,"cssClass":"pl-s1"},{"start":54,"end":64,"cssClass":"pl-s1"},{"start":66,"end":71,"cssClass":"pl-s1"},{"start":73,"end":81,"cssClass":"pl-s1"},{"start":83,"end":88,"cssClass":"pl-v"},{"start":90,"end":96,"cssClass":"pl-v"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":34,"cssClass":"pl-v"},{"start":35,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":48,"end":60,"cssClass":"pl-s1"},{"start":62,"end":70,"cssClass":"pl-s1"},{"start":71,"end":72,"cssClass":"pl-c1"},{"start":73,"end":81,"cssClass":"pl-s1"}],[],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":33,"end":34,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":70,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":43,"end":45,"cssClass":"pl-c1"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":51,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":52,"end":54,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":25,"cssClass":"pl-s"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":25,"cssClass":"pl-en"},{"start":26,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":38,"end":54,"cssClass":"pl-s1"},{"start":55,"end":66,"cssClass":"pl-s1"},{"start":68,"end":77,"cssClass":"pl-s1"},{"start":77,"end":78,"cssClass":"pl-c1"},{"start":78,"end":95,"cssClass":"pl-s"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":65,"cssClass":"pl-s"}]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_user.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_user.py?raw=true","headerInfo":{"blobSize":"6.31 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_user.py","gitLfsPath":null,"onBranch":true,"shortPath":"1872aa0","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_user.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"167","truncatedSloc":"139"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_user.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"get_info","kind":"function","identStart":2086,"identEnd":2094,"extentStart":2082,"extentEnd":2985,"fullyQualifiedName":"get_info","identUtf16":{"start":{"lineNumber":46,"utf16Col":4},"end":{"lineNumber":46,"utf16Col":12}},"extentUtf16":{"start":{"lineNumber":46,"utf16Col":0},"end":{"lineNumber":77,"utf16Col":52}}},{"name":"path","kind":"constant","identStart":3029,"identEnd":3033,"extentStart":3029,"extentEnd":3086,"fullyQualifiedName":"path","identUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":57}}},{"name":"rands","kind":"constant","identStart":3248,"identEnd":3253,"extentStart":3248,"extentEnd":3295,"fullyQualifiedName":"rands","identUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":5}},"extentUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":47}}},{"name":"rebase","kind":"function","identStart":3572,"identEnd":3578,"extentStart":3568,"extentEnd":3937,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":92,"utf16Col":4},"end":{"lineNumber":92,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":92,"utf16Col":0},"end":{"lineNumber":100,"utf16Col":21}}},{"name":"myproblems","kind":"constant","identStart":3939,"identEnd":3949,"extentStart":3939,"extentEnd":3963,"fullyQualifiedName":"myproblems","identUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":24}}},{"name":"problems","kind":"constant","identStart":4140,"identEnd":4148,"extentStart":4140,"extentEnd":4153,"fullyQualifiedName":"problems","identUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":13}}},{"name":"problem_names","kind":"constant","identStart":4154,"identEnd":4167,"extentStart":4154,"extentEnd":4172,"fullyQualifiedName":"problem_names","identUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":18}}},{"name":"generate_problem","kind":"function","identStart":4178,"identEnd":4194,"extentStart":4174,"extentEnd":5505,"fullyQualifiedName":"generate_problem","identUtf16":{"start":{"lineNumber":115,"utf16Col":4},"end":{"lineNumber":115,"utf16Col":20}},"extentUtf16":{"start":{"lineNumber":115,"utf16Col":0},"end":{"lineNumber":143,"utf16Col":34}}},{"name":"mymetaexperiment","kind":"constant","identStart":5707,"identEnd":5723,"extentStart":5707,"extentEnd":5789,"fullyQualifiedName":"mymetaexperiment","identUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":16}},"extentUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":82}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"gb0BMxr-fXyz6kun91_9a83dZUM5fg4_5o-QJh56D-QpssW89P0De0BsQaIBlPFeinKsJigqRzWAI698DzOi2w"},"/repos/preferences":{"post":"EIlhdtGF6ei2TTLwdZupeDLV6yHP0AEgzvpCLVPSf8WEYPTyeAB4ZpYPaebAUB-CDHyStzF02zmywMFEphu6QQ"}}},"title":"simopt/demo_user.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file From 1826a3491e3dd53fa89610ea50b1fffc3d373806 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Fri, 29 Sep 2023 11:49:31 -0400 Subject: [PATCH 13/21] Add files via upload --- simopt/models/san_2.py | 1210 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1210 insertions(+) create mode 100644 simopt/models/san_2.py diff --git a/simopt/models/san_2.py b/simopt/models/san_2.py new file mode 100644 index 000000000..a4bf8896a --- /dev/null +++ b/simopt/models/san_2.py @@ -0,0 +1,1210 @@ +""" +Summary +------- +Simulate duration of a stochastic activity network (SAN). +A detailed description of the model/problem can be found +`here `_. +""" +import numpy as np +from scipy.optimize import linprog + +from ..base import Model, Problem + + +class SAN(Model): + """ + A model that simulates a stochastic activity network problem with + tasks that have exponentially distributed durations, and the selected + means come with a cost. + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None, random=False): + if fixed_factors is None: + fixed_factors = {} + self.name = "SAN" + self.n_rngs = 1 + self.n_responses = 1 + self.n_random = 2 # Number of rng used for the random instance + self.random = random + self.specifications = { + "num_nodes": { + "description": "number of nodes", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(1, 2), (1, 3), (2, 3), (2, 4), (2, 6), (3, 6), (4, 5), + (4, 7), (5, 6), (5, 8), (6, 9), (7, 8), (8, 9)] + }, + + "arc_means": { + "description": "mean task durations for each arc", + "datatype": tuple, + "default": (1,) * 13 + }, + "num_arcs": { + "description": "number of arcs to be generated", + "datatype": int, + "default": 13 + }, + "set_arcs": { + "description": "list of all possible arcs", + "datatype": list, + "default": [(1, 2), (1, 3),(1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), + (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), + (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), + (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), + (5, 6), (5, 7), (5, 8), (5, 9), + (6, 7), (6, 8), (6, 9), + (7, 8), (7, 9), + (8, 9)] + } + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "arc_means": self.check_arc_means, + "num_arcs": self.check_num_arcs, + "set_arcs": self.check_set_arcs + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check graph is connected. + graph = {node: set() for node in range(1, self.factors["num_nodes"] + 1)} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, 1) + if self.factors["num_nodes"] in visited: + return True + return False + + def check_arc_means(self): + positive = True + for x in list(self.factors["arc_means"]): + positive = positive & (x > 0) + return (len(self.factors["arc_means"]) == len(self.factors["arcs"])) & positive + + def check_num_arcs(self): + return self.factors["num_arcs"] > 0 + + def check_set_arcs(self): + return True + + def allPathsStartEnd(self, graph): + end = len(graph) + + def dfs(node, path, output): + if node == end: + output.append(path) + + for nx in graph[node]: + dfs(nx, path+[nx], output) + + output = [] + dfs(1,[1],output) + return output + + def get_arcs(self, num_nodes, num_arcs, uni_rng): + """ + Getting a random set of valid arcs. + + Arguments + --------- + num_nodes: int + number of nodes for the random graph + num_arcs: int + number of arcs for the random graph + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + arcs : list + Generated random arcs to be used in the following simulation + """ + # Calculate the total set of possible arcs in the graph + set_arcs = [] + for n1 in range(1, num_nodes): + for n2 in range(n1 + 1, num_nodes + 1): + set_arcs.append((n1, n2)) + + # Assign the arcs set with the necessary arcs + arcs = [(1, 2), (num_nodes - 1, num_nodes)] + remove = [] + def get_in(arcs, num_nodes, ind, in_ind=True): + global remove + if len(arcs) <= 0: + return False + graph = {node: set() for node in range(1, num_nodes + 1)} + for a in arcs: + if in_ind == True: + graph[a[0]].add(a[1]) + else: + graph[a[1]].add(a[0]) + set0 = graph[ind] + for i in graph[ind]: + set0 = {*set0, *graph[i]} + for j in graph[i]: + set0 = {*set0, *graph[j]} + + if in_ind == True: + for j in set0 - graph[ind]: + if j in graph[ind]: + remove.append((ind, j)) + + set0 = {*set0, ind} + return set0 + + # Check whether the first node can reach all other nodes + set0 = get_in(arcs, num_nodes, 1) + for i in range(2, num_nodes+1): + set0 = get_in(arcs, num_nodes, 1) # Get the set of nodes that starter node can reach + if i not in set0: + set1 = list(get_in(arcs, num_nodes, i, False)) # Get the set of nodes that can reach node i + n2 = set1[uni_rng.randint(0, len(set1)-1)] # Randomly choose one + set2 = [i for i in set0 if i < n2] + n1 = list(set2)[uni_rng.randint(0, len(set2)-1)] + arc = (n1, n2) # Connect the two nodes so that starter node can reach node i + arcs = {*arcs, arc} + + # Check whether each node can reach the end node + for i in range(2, num_nodes): + set9 = get_in(arcs, num_nodes, i) + if num_nodes not in set9: + set_out = list(get_in(arcs, num_nodes, num_nodes, False)) + n1 = list(set9)[uni_rng.randint(0, len(set9)-1)] + set2 = [i for i in set_out if i > n1] + n2 = set2[uni_rng.randint(0, len(set2)-1)] + arc = (n1, n2) + arcs = {*arcs, arc} + + if len(arcs) < num_arcs: # If the current arc set has less arcs than the input lower bound + remain_num = num_arcs - len(arcs) + remain = list(set(set_arcs) - set(arcs)) + idx = uni_rng.sample(range(0, len(remain)), remain_num) + aa = set([remain[i] for i in idx]) + arcs = {*arcs, *aa} + + else: + return list(arcs) + + return list(arcs) + + def attach_rng(self, random_rng): + """ + Attach rng to random model class and generate random factors and update corresponding problem dimension. + + Arguments + --------- + random_rng : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when generating random factors + + Returns + ------- + arcs : list + Generated random arcs to be used in the following simulation + """ + self.random_rng = random_rng + arcs_set = self.get_arcs(self.factors["num_nodes"], self.factors["num_arcs"], random_rng[0]) + + arcs_set.sort(key=lambda a: a[1]) + arcs_set.sort(key=lambda a: a[0]) + self.factors["arcs"] = arcs_set + print('arcs: ', arcs_set) + self.factors["num_arcs"] = len(self.factors["arcs"]) + self.factors["arc_means"] = (1,) * len(self.factors["arcs"]) + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + + # Designate separate random number generators. + exp_rng = rng_list[0] + + # Topological sort. + graph_in = {node: set() for node in range(1, self.factors["num_nodes"] + 1)} + graph_out = {node: set() for node in range(1, self.factors["num_nodes"] + 1)} + for a in self.factors["arcs"]: + graph_in[a[1]].add(a[0]) + graph_out[a[0]].add(a[1]) + indegrees = [len(graph_in[n]) for n in range(1, self.factors["num_nodes"] + 1)] + queue = [] + topo_order = [] + for n in range(self.factors["num_nodes"]): + if indegrees[n] == 0: + queue.append(n + 1) + while len(queue) != 0: + u = queue.pop(0) + topo_order.append(u) + for n in graph_out[u]: + indegrees[n - 1] -= 1 + if indegrees[n - 1] == 0: + queue.append(n) + + # Generate arc lengths. + arc_length = {} + for i in range(len(self.factors["arcs"])): + arc_length[str(self.factors["arcs"][i])] = exp_rng.expovariate(1 / self.factors["arc_means"][i]) + + ## Calculate the length of the longest path. + allpaths = self.allPathsStartEnd(graph_out) + L = [] + for p in allpaths: + l = 0 + for j in range(len(p)-1): + l += arc_length[str((p[j], p[j+1]))] + L.append(l) + longest_path = np.max(L) + longest_P = allpaths[np.argmax(L)] + + # Calculate the IPA gradient w.r.t. arc means. + # If an arc is on the longest path, the component of the gradient + # is the length of the length of that arc divided by its mean. + # If an arc is not on the longest path, the component of the gradient is zero. + gradient = np.zeros(len(self.factors["arcs"])) + + for i in range(len(longest_P)-1,0,-1): + backtrack = longest_P[i-1] + current = longest_P[i] + idx = self.factors["arcs"].index((backtrack, current)) + gradient[idx] = arc_length[str((backtrack, current))] / (self.factors["arc_means"][idx]) + + # Compose responses and gradients. + responses = {"longest_path_length": longest_path} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["longest_path_length"]["arc_means"] = gradient + return responses, gradients + + +""" +Summary +------- +Minimize the duration of the longest path from a to i plus cost. +""" + + +class SANLongestPath(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : float + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + random: bool + indicator of whether user want to build a random problem or a deterministic model + n_rng: int + Number of random number generator needed to build a random problem instance + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SAN-1", fixed_factors=None, model_fixed_factors=None, random=False, random_rng=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"arc_means"} + self.factors = fixed_factors + self.random = random # Randomlize problem and model or not + self.n_rngs = 3 # Number of rngs used for the random instance + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (8,) * 13 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 100000 + }, + "c": { + "description": "cost associated to each arc", + "datatype": tuple, + "default": (1,) * 13 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "c": self.check_arc_costs + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SAN(self.model_fixed_factors, random) + # If random, generate random model factors and update model class + if random==True and random_rng != None: + self.model.attach_rng(random_rng) + self.dim = len(self.model.factors["arcs"]) + # Update every value and dimension according to the randomly generated case + self.factors["initial_solution"] = (8,) * self.dim + self.factors["c"] = (1,) * self.dim + self.lower_bounds = (1e-2,) * self.dim + self.upper_bounds = (np.inf,) * self.dim + self.Ci = None + self.Ce = None + self.di = None + self.de = None + + def check_arc_costs(self): + """ + Check if the arc costs are positive. + + Returns + ------- + bool + indicates if arc costs are positive + """ + positive = True + for x in list(self.factors["c"]): + positive = positive & x > 0 + return (len(self.factors["c"]) != self.dim) & positive + + def check_budget(self): + """ + Check if the budget is positive. + + Returns + ------- + bool + indicates if the budget is positive + """ + return self.factors["budget"] > 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "arc_means": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["arc_means"]) + return vector + + def get_coefficient(self, exp_rng): + """ + Generate random coefficients for each arc. + + Arguments + --------- + exp_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random coefficients + + Returns + ------- + c : list + vector of coefficients + """ + c = [] + for i in range(len(self.factors["c"])): + ci = exp_rng.expovariate(1) + c.append(ci) + + return c + + def random_budget(self, uni_rng): + """ + Generate random budget for the problem, proportional to the dimension. + + Arguments + --------- + uni_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random budget + """ + l = [100, 200, 300, 400, 500] + budget = uni_rng.choice(l) * self.dim + return budget + + def attach_rngs(self, random_rng): + """ + Attach random-number generators to the problem. + + Arguments + --------- + random_rng : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of rngs for problem to use when generating random instances + """ + # Attach rng for problem class and generate random problem factors for random instances + self.random_rng = random_rng + + if self.random == True: + self.factors["budget"] = self.random_budget(random_rng[0]) + self.factors["c"] = self.get_coefficient(random_rng[1]) + + print('budget: ', self.factors['budget']) + print('c: ', self.factors["c"]) + + return random_rng + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["longest_path_length"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (np.sum(np.array(self.factors["c"]) / np.array(x))/len(x),) + det_objectives_gradients = (-np.array(self.factors["c"]) / np.array(x) ** 2 / len(x),) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + return np.all(np.array(x) >= 0) + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables· + """ + x = tuple([rand_sol_rng.lognormalvariate(lq=0.1, uq=10) for _ in range(self.dim)]) + return x + + +""" +Summary +------- +Minimize the duration of the longest path from a to i subject to some lower bounds in sum of arc_means. +""" + +class SANLongestPathConstr(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : float + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + random: bool + indicator of whether user want to build a random problem or a deterministic model + n_rng: int + Number of random number generator needed to build a random problem instance + random_const: bool + indicator of whether to generate random constraints for the random problem instance or not + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SAN-2", fixed_factors=None, model_fixed_factors=None, random=False, random_rng=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"arc_means"} + self.factors = fixed_factors + self.random = random + self.random_const = False # Turn on if want to generate random constraints for random problem instance + if self.random_const: + self.num_con = 3 # Number of random constraints to generate + else: + self.num_con = 1 + self.n_rngs = 3 # Number of rngs used for the random instance + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (15,) * 13 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 100000 + }, + "arc_costs": { + "description": "cost associated to each arc", + "datatype": tuple, + "default": (1,) * 13 + }, + "r_const": { + "description": "random constraint for arc rates", + 'datatype': int, + "default": 0 + }, + "sum_lb": { + "description": "Lower bound for the sum of arc means", + "datatype": float, + "default": 100.0 + }, + "lbs":{ + "description": "Lower bounds for the selected sum of arc means", + "datatype": float, + "default": 0.0, + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "arc_costs": self.check_arc_costs, + "r_const": self.check_const, + "sum_lb": self.check_lb, + "lbs": self.check_lbs + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SAN(self.model_fixed_factors, random) + if random==True and random_rng != None: + self.model.attach_rng(random_rng) + self.dim = len(self.model.factors["arcs"]) + self.factors["initial_solution"] = (15,) * self.dim + self.factors["arc_costs"] = (1,) * self.dim + self.lower_bounds = (1e-2,) * self.dim + self.upper_bounds = (np.inf,) * self.dim + self.Ci = -1 * np.ones(self.dim) + self.Ce = None + self.di = -1 * np.array([self.factors["sum_lb"]]) + self.de = None + + def check_arc_costs(self): + """ + Check if the arc costs are positive. + + Returns + ------- + bool + indicates if arc costs are positive + """ + positive = True + for x in list(self.factors["arc_costs"]): + positive = positive & x > 0 + return (len(self.factors["arc_costs"]) != self.dim) & positive + + def check_budget(self): + """ + Check if the budget is positive. + + Returns + ------- + bool + indicates if the budget is positive + """ + return self.factors["budget"] > 0 + + def check_const(self): + """ + Check if the random constraint is positive. + + Returns + ------- + bool + indicates if the random constraint is positive + """ + return self.factors["r_const"] >= 0 + + def check_lb(self): + """ + Check if the lower bound for sum of all arc rates is positive. + + Returns + ------- + bool + indicates if the lower bound is positive + """ + return self.factors["sum_lb"] >= 0 + + def check_lbs(self): + """ + Check if other potential lower bound is positive. + + Returns + ------- + bool + indicates if other lower bound is positive + """ + return self.factors["lbs"] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "arc_means": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["arc_means"]) + return vector + + def get_coefficient(self, exp_rng): + """ + Generate random coefficients for each arc. + + Arguments + --------- + exp_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random coefficients + """ + if self.random == True: + c = [] + for i in range(len(self.factors["arc_costs"])): + ci = exp_rng.expovariate(1) + c.append(ci) + return c + else: + return self.factors['arc_costs'] + + def random_budget(self, random_rng): + """ + Generate random budget for the problem, proportional to the dimension. + + Arguments + --------- + uni_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random budget + """ + if self.random == True: + l = [10000, 20000] + budget = random_rng.choice(l) * self.dim + return budget + else: + return self.factors['budget'] + + def get_const(self, n, uni_rng): + """ + Generate random constraint for the problem, proportional to the dimension. + + Arguments + --------- + n : int + number of constraints want to generate + uni_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random budget + """ + # Randomly choose a subset of arcs that have limited budget + C = [] + L = [] + for i in range(n): + if self.random_const == True: + const = uni_rng.sample(range(0, self.dim), int(self.dim/4)) + # lb = uni_rng.uniform(0, int(self.dim/4)) * uni_rng.uniform(1, 6) + lb = int(self.dim/4) * uni_rng.uniform(1, int(self.factors["sum_lb"]/self.dim)) + C.append(const) + L.append(lb) + else: + return [[i for i in range(self.dim)]], self.factors['sum_lb'] + return C, L + + def attach_rngs(self, random_rng): + """ + Attach random-number generators to the problem. + + Arguments + --------- + random_rng : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of rngs for problem to use when generating random instances + """ + # Attach rng for problem class and generate random problem factors for random instances + self.random_rng = random_rng + + if self.random: + self.factors["budget"] = self.random_budget(random_rng[0]) + + self.factors["arc_costs"] = self.get_coefficient(random_rng[1]) + print('c: ', self.factors["arc_costs"]) + + # Random constraint + if self.random_const: + self.factors["r_const"], self.factors['lbs'] = self.get_const(self.num_con, random_rng[2]) + self.factors["lbs"].append(self.factors["sum_lb"]) # Combine the sum_lb with the partial_lb + self.factors["r_const"].append([i for i in range(self.dim)]) # Combine the index related to sum_lb with the r_const + else: + self.factors["r_const"], self.factors['sum_lb'] = self.get_const(self.num_con, random_rng[2]) + self.factors["lbs"] = [self.factors["sum_lb"]] + else: + self.factors["r_const"] = [[i for i in range(self.dim)]] + self.factors["lbs"] = [self.factors["sum_lb"]] + + self.factors["lbs"] += [0 for i in range(self.dim)] # Require each arc means larger or equal to 0 + self.factors["r_const"] += [[i] for i in range(self.dim)] + print('r_const: ', self.factors["r_const"]) + print('lbs: ', self.factors['lbs']) + + return random_rng + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["longest_path_length"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return np.all(np.array(x) >= 0) + + def find_feasible(self): + """ + Find an initial feasible solution (if not user-provided) + by interior point method. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + """ + c = [0 for i in range(self.dim)] + l1 = [-1 for i in range(self.dim)] + A = [l1] + b = [-self.factors["sum_lb"]] + if self.random_const: + b.extend([-plb for plb in self.factors["lbs"]]) + for idx in self.factors["r_const"]: + l2 = [-1 if i in idx else 0 for i in range(self.dim)] + A.append(l2) + + res = linprog(c, A_ub=A, b_ub=b, bounds=(0, None), method='interior-point') + + return res.x + + def check_feasible(self, x): + """ + Check whether a solution is feasible or not. + + Arguments + --------- + x : ndarray/list + current point + + Returns + ------- + feasible : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + if sum(x) >= self.factors['sum_lb']: + for i in range(len(self.factors["lbs"])): + if sum([x[j] for j in self.factors["r_const"][i]]) < self.factors["lbs"][i]: + return False + return True + else: + return False + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + if self.check_feasible(self.factors["initial_solution"]): + x0 = self.factors["initial_solution"] + else: + x0 = self.find_feasible() + x = rand_sol_rng.hit_and_run(x0, [10 * self.factors['sum_lb']], [[i for i in range(self.dim)]], self.factors["lbs"], self.factors["r_const"], self.dim, 20) + + x = tuple(x) + return x \ No newline at end of file From 0895c3e2e860ff4543110379cc3f9ece9d79c641 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Thu, 16 Nov 2023 19:40:35 -0500 Subject: [PATCH 14/21] Add files via upload --- docs/gasso.rst | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 docs/gasso.rst diff --git a/docs/gasso.rst b/docs/gasso.rst new file mode 100644 index 000000000..a4a86decb --- /dev/null +++ b/docs/gasso.rst @@ -0,0 +1,61 @@ +Solver: Gradient-Based Adaptive Stochastic Search for Simulation Optimization Over Continuous Space (GASSO) +================================================================ + +Description: +------------ +The solver iteratively generates population of candidate solutions from a sample distribution, +and uses the performance of sample distribution to update the sampling dsitribution. +GASSO has two stages in each iteration: +1. Stage I: Generate candidate solutions from some exponential family of distribution, and the +2. Stage II: Evaluate candidate solutions, and update the parameter of sampling distribution via +direct gradient search. + +Scope: +------ +* objective_type: single + +* constraint_type: box + +* variable_type: continuous + +Solver Factors: +--------------- +* crn_across_solns: Use CRN across solutions? + + * Default: True + +* N: Number of candidate solutions + + * Default: :math:`50 * \sqrt(dim)` + +* M: Number of function evaluations per candidate + + * Default: 10 + +* K: Number of iterations + + * Default: Budget/(N * M) + +* alpha_0: Determines the initial step size + + * Default: 50 + +* alpha_c: Determines the speed at which the step size decreases + + * Default: 1500 + +* alpha_p: Determines the rate at which step size gets smaller + + * Default: 0.6 + +* alpha_k: Step size + + * Default: :math:`\frac{alpha_0}{(k + \alpha_c) ^ {alpha_p}}` + + +References: +=========== +This solver is adapted from the article Enlu Zhou, Shalabh Bhatnagar (2018). +Zhou, E., & Bhatnagar, S. (2017). Gradient-based adaptive stochastic search for simulation optimization over continuous space. +*INFORMS Journal on Computing, 30(1), 154-167. +(https://doi.org/10.1287/ijoc.2017.0771) From dec8264e4de0c22cff1ef351922b047269bf1729 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Thu, 16 Nov 2023 19:45:32 -0500 Subject: [PATCH 15/21] Add files via upload Implementation of solver GASSO --- simopt/solvers/gasso.py | 276 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 276 insertions(+) create mode 100644 simopt/solvers/gasso.py diff --git a/simopt/solvers/gasso.py b/simopt/solvers/gasso.py new file mode 100644 index 000000000..504555341 --- /dev/null +++ b/simopt/solvers/gasso.py @@ -0,0 +1,276 @@ +""" +Summary +------- +Iteratively generates population of candidate solutions from a sample distribution and use its performance +to update the sample distribution. +A detailed description of the solver can be found `here `_. +""" +from ..base import Solver +import numpy as np +import warnings +warnings.filterwarnings("ignore") + +class GASSO(Solver): + """ + A solver that iteratively generates population of candidate solutions from a sample distribution + and use its performance to update the sample distribution. + + Attributes + ---------- + name : string + name of solver + objective_type : string + description of objective types: + "single" or "multi" + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + gradient_needed : bool + indicates if gradient of objective function is needed + factors : dict + changeable factors (i.e., parameters) of the solver + specifications : dict + details of each factor (for GUI, data validation, and defaults) + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used for the solver's internal purposes + + Arguments + --------- + name : str + user-specified name for solver + fixed_factors : dict + fixed_factors of the solver + + See also + -------- + base.Solver + """ + def __init__(self, name="GASSO", fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = name + self.objective_type = "single" + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_needed = True + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "sample_size": { + "description": "sample size per solution", + "datatype": int, + "default": 10 + }, + "max_iter": { + "description": "maximum number of iterations", + "datatype": int, + "default": 10000 + }, + "rho": { + "description": "quantile parameter", + "datatype": float, + "default": 0.15 + }, + "M": { + "description": "times of simulations for each candidate solution", + "datatype": int, + "default": 15 + }, + "alpha_0": { + "description": "step size numerator", + "datatype": int, + "default": 15 + }, + "alpha_c": { + "description": "step size denominator constant", + "datatype": int, + "default": 150 + }, + "alpha_p": { + "description": "step size denominator exponent", + "datatype": float, + "default": 0.6 + }, + "MaxNumSoln": { + "description": "maximum number of solutions that can be reported within max budget", + "datatype": int, + "default": 10002 + } + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "sample_size": self.check_sample_size, + "max_iter": self.check_max_iter, + "rho": self.check_rho, + "M": self.check_M, + "alpha_0": self.check_alpha_0, + "alpha_c": self.check_alpha_c, + "alpha_p": self.check_alpha_p, + "MaxNumSoln": self.check_MaxNumSoln + } + super().__init__(fixed_factors) + + def check_sample_size(self): + return self.factors["sample_size"] > 0 + + def check_max_iter(self): + return self.factors["max_iter"] > 0 + + def check_rho(self): + return 0 < self.factors["rho"] < 1 + + def check_M(self): + return self.factors["M"] > 0 + + def check_alpha_0(self): + return self.factors["alpha_0"] > 0 + + def check_alpha_c(self): + return self.factors["alpha_c"] > 0 + + def check_alpha_p(self): + return 0 < self.factors["alpha_p"] < 1 + + def check_MaxNumSoln(self): + return self.factors["MaxNumSoln"] > 0 + + def solve(self, problem): + """ + Run a single macroreplication of a solver on a problem. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + + Returns + ------- + recommended_solns : list of Solution objects + list of solutions recommended throughout the budget + intermediate_budgets : list of ints + list of intermediate budgets when recommended solutions changes + """ + dim = problem.dim + rand_sol_rng = self.rng_list[0] + x_ini = [problem.get_random_solution(rand_sol_rng) for i in range(9)] + x_ini.append(problem.factors["initial_solution"]) + + # Initialize sampling distribution based on the initial solution population + mu_k = np.mean(x_ini, axis = 0) # The mean for each dim + var_k = np.var(x_ini, axis = 0) + theta1_k = mu_k/var_k + theta2_k = -0.5 * np.ones(problem.dim) / var_k + theta_k = np.append(theta1_k, theta2_k) + N = int(50 * np.sqrt(dim)) + K = int(np.floor(problem.factors['budget']/(N * self.factors['M']))) + MaxNumSoln = K + 2 + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + # Designate random number generator for random sampling. + find_next_soln_rng = self.rng_list[1] + + # Get random solutions from normal distribution (truncated) + x = np.zeros((N, dim)) + kk = 0 + while kk < N: + normal_vec = np.array([find_next_soln_rng.normalvariate() for i in range(N * dim)]).reshape((N,dim)) + X_k = np.multiply(normal_vec, np.sqrt(var_k)) + np.ones((N, dim)) * mu_k + for i in range(N): + if all(X_k[i, :] >= problem.lower_bounds) and all(X_k[i, :] <= problem.upper_bounds) and kk < N: + x[kk, :] = X_k[i, :] + kk += 1 + X_k = x + + # Create the initial solution based on the truncated normal + new_solution = self.create_new_solution(problem.factors['initial_solution'], problem) + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + problem.simulate(new_solution, self.factors['M']) + expended_budget += self.factors['M'] + + # Track the internal updates + Hbar = np.zeros(K) + xbar = np.zeros((K, dim)) + hvar = np.zeros(K) + k = 0 + + # Sequentially generate random solutions and simulate them. + while expended_budget < problem.factors['budget'] and k < K: + # Update alpha + alpha_k = self.factors['alpha_0'] / (k + self.factors['alpha_c']) ** self.factors['alpha_p'] + H = np.zeros(N) + H_var = np.zeros(N) + # Sample N new solution candidates by updated distribution + for i in range(N): + new_solution = self.create_new_solution(X_k[i, :], problem) + X_k[i, :] = new_solution.x + problem.simulate(new_solution, self.factors['M']) + expended_budget += self.factors['M'] + H[i] = problem.minmax * new_solution.objectives_mean + H_var[i] = np.var(new_solution.objectives) + # Find the best one among the N candidates + Hbar[k], idx = np.max(H), np.argmax(H) + hvar[k] = H_var[idx] + xbar[k, :] = X_k[idx, :] + new_solution = X_k[idx, :] + + if k >= 1: + # Compare the new best candidate to previous best solution, if better then update + if Hbar[k] < Hbar[k-1] and Hbar[k] != None: + Hbar[k] = Hbar[k-1] + xbar[k, :] = xbar[k-1, :] + hvar[k] = hvar[k-1] + new_solution = xbar[k, :] + + # Track the new candidate and update recommended_sols and budgets + new_solution = self.create_new_solution(tuple(xbar[k, :]), problem) + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + # Shape function + G_sort = np.sort(H)[::-1] + gm = G_sort[int(np.ceil(N * self.factors['rho']))] + S_theta = H > gm + + # Estimate gradient and hessian + w_k = S_theta/sum(S_theta) + CX_k = np.hstack((X_k, X_k * X_k)).T + grad_k = np.matmul(w_k.T, CX_k.T) - np.vstack((mu_k, var_k + mu_k * mu_k)).reshape(1, -1) + Hes_k = -np.cov(CX_k) + Hes_k_inv = np.linalg.inv(Hes_k + 1e-5 * np.eye(2*dim)) @ np.diag(np.ones(2*dim)) + # Update the parameter using an SA iteration + theta_k -= (alpha_k * (Hes_k_inv @ grad_k.T)).reshape(1, -1)[0] + theta1_k = theta_k[:dim] + theta2_k = theta_k[dim: 2 * dim] + var_k = -0.5/theta2_k + mu_k = theta1_k * var_k + + # Project mu_k and var_k to feasible parameter space + for i in range(dim): + if mu_k[i] < problem.lower_bounds[i]: + mu_k[i] = problem.lower_bounds[i] + if mu_k[i] > problem.upper_bounds[i]: + mu_k[i] = problem.upper_bounds[i] + var_k = abs(var_k) + + # Generate new candidate solutions from the truncated normal distribution + x = np.zeros((N, dim)) + kk = 0 + while kk < N: + normal_vec = np.array([find_next_soln_rng.normalvariate() for i in range(N * dim)]).reshape((N,dim)) + X_k = np.multiply(normal_vec, np.sqrt(var_k)) + np.ones((N, dim)) * mu_k + for i in range(N): + if all(X_k[i, :] >= problem.lower_bounds) and all(X_k[i, :] <= problem.upper_bounds) and kk < N: + x[kk, :] = X_k[i, :] + kk += 1 + k += 1 + X_k = x + + return recommended_solns, intermediate_budgets \ No newline at end of file From c7f08b2fbfcffe6c94e5215e1fe4eae104b80057 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Wed, 13 Dec 2023 16:59:31 -0500 Subject: [PATCH 16/21] Add files via upload --- docs/openjackson.rst | 117 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 docs/openjackson.rst diff --git a/docs/openjackson.rst b/docs/openjackson.rst new file mode 100644 index 000000000..fc26b463d --- /dev/null +++ b/docs/openjackson.rst @@ -0,0 +1,117 @@ +Model: Open Jackson Network +=============================================== + +Description: +------------ +This model represents an Open Jackson Network with Poisson arrival time, exponential service time, and probabilistic routing. + +Sources of Randomness: +---------------------- +There are 3 sources of randomness in this model: +1. Exponential inter-arrival time of customers at each station. +2. Exponential service time of customers at each station. +3. Routing of customers at each station after service. + +Model Factors: +-------------- +* number_queues: The number of queues in the network. + * Default: 3 + +* arrival_alphas: The rate parameter of the exponential distribution for the inter-arrival time of customers at each station. + * Default: [1,1,1,1,1] + +* service_mus: The rate parameter of exponential distribution for the service time of customers at each station. + * Default: [2,2,2,2,2] + +* routing_matrix: The routing probabilities for a customer at station i to go to service j after service. + The departure probability from station i is :math: `1 - \sum_{j=1}^{n} (P_{ij})` + where n is the number of stations, and P is the routing matrix. + * Default: [[0.1, 0.1, 0.2, 0.2, 0], + [0.1, 0.1, 0.2, 0.2, 0], + [0.1, 0.1, 0, 0.1, 0.3], + [0.1, 0.1, 0.1, 0, 0.3], + [0.1, 0.1, 0.1, 0.1, 0.2]] + +* t_end: The time at which the simulation ends. + * Default: 200 + +* warm_up: The time at which the warm-up period ends. Relevant only when steady_state_initialization is False. + * Default: 100 + +* steady_state_initialization: Whether to initialize with queues sampled from steady state. + If so, we sample geometric distribution with parameter lambdas/service_mus for each queue and initialize the queues with the sample. + * Default: True + +Below Factors are only relevant when creating random instances of the Model + +* density_p: The probability of an edge existing in the graph in the random instance. Higher the value, denser the graph. + * Default: 0.5 + +* random_arrival_parameter: The parameter for the random arrival rate exponential distribution when creating a random instance. + * Default: 1 + + +Responses: +---------- +* average_queue_length: The time-average queue length at each station. + +References: +=========== +This model is adapted from Jackson, James R. (1957). +"Networks of waiting lines". Operations Research. 4 (4): 518–521. +(doi:10.1287/opre.5.4.518) + +Optimization Problem: OpenJacksonMinQueue (OPENJACKSON-1) +================================================================ + +Decision Variables: +------------------- +* service_mus + +Objectives: +----------- +Minimize the sum of average queue length at each station. + +Constraints: +------------ +We require that the sum of service_mus at each station to be less than service_rates_budget. + +Problem Factors: +---------------- +* budget: Max # of replications for a solver to take. + + * Default: 1000 + +* service_rates_budget: Total budget to be allocated to service_mus_budget. + + * Default: 150 + +Below factors are only relevant when creating random instances of the Problem + +* gamma_mean: Scale of the mean of gamma distribution when generating service rates upper bound in random instances. + + * Default: 0.5 + +* gamma_scale: Shape of gamma distribution when generating service rates upper bound in random instances. + + * Default: 5 + +Fixed Model Factors: +-------------------- +* N/A + +Starting Solution: +------------------ +* initial_solution: lambdas * (service_rates_budget/sum(lambdas)) + +Random Solutions: +----------------- +Sample a Dirichlet distribution that sum to service_rates_budget - sum(lambdas). Then add lambdas to the sample. + +Optimal Solution: +----------------- +Unknown + +Optimal Objective Function Value: +--------------------------------- +Unknown \ No newline at end of file From e19a89161f995641a517fa56beca6015c1034f7a Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Wed, 13 Dec 2023 17:12:00 -0500 Subject: [PATCH 17/21] Add files via upload --- simopt/solvers/active_set.py | 643 +++++++++++++++++++++++++++++++++++ 1 file changed, 643 insertions(+) create mode 100644 simopt/solvers/active_set.py diff --git a/simopt/solvers/active_set.py b/simopt/solvers/active_set.py new file mode 100644 index 000000000..f85a82e27 --- /dev/null +++ b/simopt/solvers/active_set.py @@ -0,0 +1,643 @@ +""" +Summary +------- +ACTIVESET: An active set algorithm for problems with linear constraints i.e., Ce@x = de, Ci@x <= di. +A detailed description of the solver can be found `here `_. +""" +import numpy as np +import cvxpy as cp +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver + + +class ACTIVESET(Solver): + """ + The Active Set solver. + + Attributes + ---------- + name : string + name of solver + objective_type : string + description of objective types: + "single" or "multi" + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + gradient_needed : bool + indicates if gradient of objective function is needed + factors : dict + changeable factors (i.e., parameters) of the solver + specifications : dict + details of each factor (for GUI, data validation, and defaults) + rng_list : list of rng.MRG32k3a objects + list of RNGs used for the solver's internal purposes + + Arguments + --------- + name : str + user-specified name for solver + fixed_factors : dict + fixed_factors of the solver + + See also + -------- + base.Solver + """ + def __init__(self, name="ACTIVESET", fixed_factors={}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 50 #30, 50 + }, + "alpha": { + "description": "tolerance for sufficient decrease condition.", + "datatype": float, + "default": 0.2 #0.2 + }, + "beta": { + "description": "step size reduction factor in line search.", + "datatype": float, + "default": 0.9 #0.9 + }, + "alpha_max": { + "description": "maximum step size.", + "datatype": float, + "default": 5 #10.0, 5 + }, + "lambda": { + "description": "magnifying factor for r inside the finite difference function", + "datatype": int, + "default": 2 #2 + }, + "tol": { + "description": "floating point tolerance for checking tightness of constraints", + "datatype": float, + "default": 1e-7 + }, + "tol2": { + "description": "floating point tolerance for checking closeness of dot product to zero", + "datatype": float, + "default": 1e-5 + }, + "finite_diff_step": { + "description": "step size for finite difference", + "datatype": float, + "default": 1e-5 + } + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "alpha": self.check_alpha, + "beta": self.check_beta, + "alpha_max": self.check_alpha_max, + "lambda": self.check_lambda, + "tol": self.check_tol, + "tol2": self.check_tol2, + "finite_diff_step": self.check_finite_diff_step + } + super().__init__(fixed_factors) + + def check_r(self): + return self.factors["r"] > 0 + + def check_alpha(self): + return self.factors["alpha"] > 0 + + def check_beta(self): + return self.factors["beta"] > 0 & self.factors["beta"] < 1 + + def check_alpha_max(self): + return self.factors["alpha_max"] > 0 + + def check_lambda(self): + return self.factors["lambda"] > 0 + + def check_tol(self): + return self.factors["tol"] > 0 + + def check_tol2(self): + return self.factors["tol2"] > 0 + + def check_finite_diff_step(self): + return self.factors["finite_diff_step"] > 0 + + def solve(self, problem): + """ + Run a single macroreplication of a solver on a problem. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + crn_across_solns : bool + indicates if CRN are used when simulating different solutions + + Returns + ------- + recommended_solns : list of Solution objects + list of solutions recommended throughout the budget + intermediate_budgets : list of ints + list of intermediate budgets when recommended solutions changes + """ + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + + # Default values. + r = self.factors["r"] + alpha = self.factors["alpha"] + beta = self.factors["beta"] + tol = self.factors["tol"] + tol2 = self.factors["tol2"] + max_step = self.factors["alpha_max"] # Maximum step size + + # Upper bound and lower bound. + lower_bound = np.array(problem.lower_bounds) + upper_bound = np.array(problem.upper_bounds) + + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, problem.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Number of equality constraints. + if (Ce is not None) and (de is not None): + neq = len(de) + else: + neq = 0 + + # Checker for whether the problem is unconstrained. + unconstr_flag = (Ce is None) & (Ci is None) & (di is None) & (de is None) & (all(np.isinf(lower_bound))) & (all(np.isinf(upper_bound))) + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + # If the initial solution is not feasible, generate one using phase one simplex. + if (not unconstr_flag) & (not self._feasible(new_x, problem, tol)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di, tol) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + # Active constraint index vector. + acidx = [] + if not unconstr_flag: + # Initialize the active set to be the set of indices of the tight constraints. + cx = np.dot(C, new_x) + for j in range(cx.shape[0]): + if j < neq or np.isclose(cx[j], d[j], rtol=0, atol= tol): + acidx.append(j) + + while expended_budget < problem.factors["budget"]: + new_x = new_solution.x + # # Check variable bounds. + # forward = np.isclose(new_x, lower_bound, atol = tol).astype(int) + # backward = np.isclose(new_x, upper_bound, atol = tol).astype(int) + # # BdsCheck: 1 stands for forward, -1 stands for backward, 0 means central diff. + # BdsCheck = np.subtract(forward, backward) + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + grad, budget_spent = self.finite_diff(new_solution, problem, r, C, d, stepsize = alpha) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + while np.all((grad == 0)): + if expended_budget > problem.factors["budget"]: + break + grad, budget_spent = self.finite_diff(new_solution, problem, r, C, d) + expended_budget += budget_spent + # Update r after each iteration. + r = int(self.factors["lambda"] * r) + + # If the active set is empty, search on negative gradient. + if len(acidx) == 0: + dir = -grad + else: + # Find the search direction and Lagrange multipliers of the direction-finding problem. + dir, lmbd, = self.compute_search_direction(acidx, grad, problem, C) + # If the optimal search direction is 0 + if (np.isclose(np.linalg.norm(dir), 0, rtol=0, atol=tol2)): + print('dir: ', dir) + # Terminate if Lagrange multipliers of the inequality constraints in the active set are all nonnegative. + if unconstr_flag or np.all(lmbd[neq:] >= 0): + print('break') + break + # Otherwise, drop the inequality constraint in the active set with the most negative Lagrange multiplier. + else: + # q = acidx[neq + np.argmin(lmbd[neq:][lmbd[neq:] < 0])] + q = acidx[neq + np.argmin(lmbd[neq:])] + print('q: ', q) + acidx.remove(q) + else: + if not unconstr_flag: + idx = list(set(np.arange(C.shape[0])) - set(acidx)) # Constraints that are not in the active set. + # If all constraints are feasible. + if unconstr_flag or np.all(C[idx,:] @ dir <= 0): + # Line search to determine a step_size. + print('line search 1') + new_solution, step_size, expended_budget, _ = self.line_search(problem, expended_budget, r, grad, new_solution, max_step, dir, alpha, beta) + print('budget: ', expended_budget) + # Update maximum step size for the next iteration. + max_step = step_size + + # Ratio test to determine the maximum step size possible + else: + # Get all indices not in the active set such that Ai^Td>0 + r_idx = list(set(idx).intersection(set((C @ dir > 0).nonzero()[0]))) + # Compute the ratio test + ra = d[r_idx,:].flatten() - C[r_idx, :] @ new_x + ra_d = C[r_idx, :] @ dir + # Initialize maximum step size. + s_star = np.inf + # Initialize blocking constraint index. + q = -1 + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + q = r_idx[i] + # If there is no blocking constraint (i.e., s_star >= 1) + if s_star >= 1: + # print('no blocking c') + # Line search to determine a step_size. + print('line search 2') + new_solution, step_size, expended_budget, _ = self.line_search(problem, expended_budget, r, grad, new_solution, s_star, dir, alpha, beta) + print('budget: ', expended_budget) + # If there is a blocking constraint (i.e., s_star < 1) + else: + # Add blocking constraint to the active set. + if q not in acidx: + acidx.append(q) + # No need to do line search if s_star is 0. + if s_star > 0: + # Line search to determine a step_size. + print('line search 3') + new_solution, step_size, expended_budget, count = self.line_search(problem, expended_budget, r, grad, new_solution, s_star, dir, alpha, beta) + print('budget: ', expended_budget) + # Append new solution. + if (problem.minmax[0] * new_solution.objectives_mean > problem.minmax[0] * best_solution.objectives_mean): + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + print(problem.minmax[0] * new_solution.objectives_mean) + return recommended_solns, intermediate_budgets + + + def compute_search_direction(self, acidx, grad, problem, C): + ''' + Compute a search direction by solving a direction-finding quadratic subproblem at solution x. + + Arguments + --------- + acidx: list + list of indices of active constraints + grad : ndarray + the estimated objective gradient at new_solution + problem : Problem object + simulation-optimization problem to solve + C : ndarray + constraint matrix + + Returns + ------- + d : ndarray + search direction + lmbd : ndarray + Lagrange multipliers for this LP + ''' + # Define variables. + d = cp.Variable(problem.dim) + + # Define constraints. + constraints = [C[acidx, :] @ d == 0] + + # Define objective. + obj = cp.Minimize(grad @ d + 0.5 * cp.quad_form(d, np.identity(problem.dim))) + prob = cp.Problem(obj, constraints) + prob.solve() + # Get Lagrange multipliers + lmbd = prob.constraints[0].dual_value + + dir = np.array(d.value) + dir[np.abs(dir) < self.factors["tol"]] = 0 + + return dir, lmbd + + + def finite_diff(self, new_solution, problem, r, C, d, stepsize = 1e-5, tol = 1e-7): + ''' + Finite difference for approximating objective gradient at new_solution. + + Arguments + --------- + new_solution : Solution object + a solution to the problem + problem : Problem object + simulation-optimization problem to solve + r : int + number of replications taken at each solution + C : ndarray + constraint matrix + d : ndarray + constraint vector + stepsize: float + step size for finite differences + + Returns + ------- + grad : ndarray + the estimated objective gradient at new_solution + budget_spent : int + budget spent in finite difference + ''' + + BdsCheck = np.zeros(problem.dim) + fn = -1 * problem.minmax[0] * new_solution.objectives_mean + new_x = new_solution.x + # Store values for each dimension. + FnPlusMinus = np.zeros((problem.dim, 3)) + grad = np.zeros(problem.dim) + + for i in range(problem.dim): + # Initialization. + x1 = list(new_x) + x2 = list(new_x) + # Forward stepsize. + steph1 = stepsize + # Backward stepsize. + steph2 = stepsize + + dir1 = np.zeros(problem.dim) + dir1[i] = 1 + dir2 = np.zeros(problem.dim) + dir2[i] = -1 + + ra = d.flatten() - C @ new_x + ra_d = C @ dir1 + # Initialize maximum step size. + temp_steph1 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph1: + temp_steph1 = s + steph1 = min(temp_steph1, steph1) + + ra_d = C @ dir2 + # Initialize maximum step size. + temp_steph2 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph2: + temp_steph2 = s + steph2 = min(temp_steph2, steph2) + + if (steph1 != 0) & (steph2 != 0): + BdsCheck[i] = 0 + elif steph1 == 0: + BdsCheck[i] = -1 + else: + BdsCheck[i] = 1 + + # Decide stepsize. + # Central diff. + if BdsCheck[i] == 0: + FnPlusMinus[i, 2] = min(steph1, steph2) + x1[i] = x1[i] + FnPlusMinus[i, 2] + x2[i] = x2[i] - FnPlusMinus[i, 2] + # Forward diff. + elif BdsCheck[i] == 1: + FnPlusMinus[i, 2] = steph1 + x1[i] = x1[i] + FnPlusMinus[i, 2] + # Backward diff. + else: + FnPlusMinus[i, 2] = steph2 + x2[i] = x2[i] - FnPlusMinus[i, 2] + + x1_solution = self.create_new_solution(tuple(x1), problem) + if BdsCheck[i] != -1: + problem.simulate_up_to([x1_solution], r) + fn1 = -1 * problem.minmax[0] * x1_solution.objectives_mean + # First column is f(x+h,y). + FnPlusMinus[i, 0] = fn1 + x2_solution = self.create_new_solution(tuple(x2), problem) + if BdsCheck[i] != 1: + problem.simulate_up_to([x2_solution], r) + fn2 = -1 * problem.minmax[0] * x2_solution.objectives_mean + # Second column is f(x-h,y). + FnPlusMinus[i, 1] = fn2 + # Calculate gradient. + if BdsCheck[i] == 0: + grad[i] = (fn1 - fn2) / (2 * FnPlusMinus[i, 2]) + elif BdsCheck[i] == 1: + grad[i] = (fn1 - fn) / FnPlusMinus[i, 2] + elif BdsCheck[i] == -1: + grad[i] = (fn - fn2) / FnPlusMinus[i, 2] + budget_spent = (2 * problem.dim - np.sum(BdsCheck != 0)) * r + return grad, budget_spent + + def line_search(self, problem, expended_budget, r, grad, cur_sol, alpha_0, d, alpha, beta): + """ + A backtracking line-search along [x, x + rd] assuming all solution on the line are feasible. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + expended_budget: int + current expended budget + r : int + number of replications taken at each solution + grad : ndarray + objective gradient of cur_sol + cur_sol : Solution object + current solution + alpha_0 : float + maximum step size allowed + d : ndarray + search direction + alpha: float + tolerance for sufficient decrease condition + beta: float + step size reduction factor + + Returns + ------- + x_new_solution : Solution + a solution obtained by line search + step_size : float + computed step size + expended_budget : int + updated expended budget + """ + x = cur_sol.x + fx = -1 * problem.minmax[0] * cur_sol.objectives_mean + step_size = alpha_0 + count = 0 + x_new_solution = cur_sol + while True: + if expended_budget > problem.factors["budget"]: + break + x_new = x + step_size * d + # Create a solution object for x_new. + x_new_solution = self.create_new_solution(tuple(x_new), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(x_new_solution, r) + expended_budget += r + # Check the sufficient decrease condition. + f_new = -1 * problem.minmax[0] * x_new_solution.objectives_mean + if f_new < fx + alpha * step_size * np.dot(grad, d): + break + step_size *= beta + count += 1 + if count >= 50: + break + + return x_new_solution, step_size, expended_budget, count + + def find_feasible_initial(self, problem, Ae, Ai, be, bi, tol): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(problem.upper_bounds) + lower_bound = np.array(problem.lower_bounds) + + # Define decision variables. + x = cp.Variable(problem.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound)) + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + if not self._feasible(x0, problem, tol): + raise ValueError("Could not find feasible x0") + + return x0 + + def _feasible(self, x, problem, tol): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + lb = np.asarray(problem.lower_bounds) + ub = np.asarray(problem.upper_bounds) + res = True + if (problem.Ci is not None) and (problem.di is not None): + res = res & np.all(problem.Ci @ x <= problem.di + tol) + if (problem.Ce is not None) and (problem.de is not None): + res = res & (np.allclose(np.dot(problem.Ce, x), problem.de, rtol=0, atol=tol)) + return res & (np.all(x >= lb)) & (np.all(x <= ub)) \ No newline at end of file From 794e201162dc1cae896e16b809127f2639140f20 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Wed, 13 Dec 2023 17:12:39 -0500 Subject: [PATCH 18/21] Add files via upload --- simopt/models/openjackson.py | 1086 ++++++++++++++++++++++++++++++++++ 1 file changed, 1086 insertions(+) create mode 100644 simopt/models/openjackson.py diff --git a/simopt/models/openjackson.py b/simopt/models/openjackson.py new file mode 100644 index 000000000..de5413355 --- /dev/null +++ b/simopt/models/openjackson.py @@ -0,0 +1,1086 @@ +""" +Summary +------- +Simulate an open jackson network +""" +import autograd.numpy as np +import math as math +from collections import deque +from ..auto_diff_util import bi_dict, replicate_wrapper, factor_dict, resp_dict_to_array + +from ..base import Model, Problem + +# generates an erdos renyi graph where each subgraph has an exit +def erdos_renyi(rng, n, p, directed = True): + graph = np.zeros((n,n+1)) + for i in range(n): + for j in range(n+1): + prob = rng.uniform(0,1) + if prob < p: + graph[i][j] = 1 + if not directed: + graph = np.triu(graph) + + #check for exits in each subgraph if there are not valid exits + # then create a new erdos_renyi graph until one is valid + has_exit = set() + checked = False + while(not checked): + numexitable = len(has_exit) + for i in range(n): + if (graph[i][-1]) == 1: + has_exit.add(i) + # print("add original", has_exit) + if len(has_exit) > 0: + has_exit2 = [] + for j in has_exit: + if graph[i][j] == 1 : + has_exit2 += [i] + for a in has_exit2: + has_exit.add(a) + # print("add adjacent", has_exit) + afternumexitable = len(has_exit) + checked = (afternumexitable == n or numexitable == afternumexitable) + # if the graph has nodes that have no path out then add a path out to those nodes + if len(has_exit) != n: + for x in set(range(n)).difference(has_exit): + graph[x][-1] = 1 + + return graph + + +class OpenJackson(Model): + """ + A model of an open jackson network . + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI, data validation, and defaults) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : dict + fixed_factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None, random = False): + if fixed_factors is None: + fixed_factors = {} + self.name = "OPENJACKSON" + self.n_responses = 2 + self.random = random + self.n_random = 2 # Number of rng used for the random instance + # random instance factors: number_queues, arrival_alphas, service_mus, routing_matrix + + self.factors = fixed_factors + self.specifications = { + "number_queues": { + "description": "The number of queues in the network", + "datatype": int, + "default": 5 + }, + "arrival_alphas": { + "description": "The arrival rates to each queue from outside the network", + "datatype": tuple, + "default": (2,3,2,4,3) + }, + "service_mus": { + "description": "The mu values for the exponential service times ", + "datatype": tuple, + "default": (11,11,11,11,11) + }, + "routing_matrix": { + "description": "The routing matrix that describes the probabilities of moving to the next queue after leaving the current one", + "datatype": list, + "default": [[0.1, 0.1, 0.2, 0.2, 0], + [0.1, 0.1, 0.2, 0.2, 0], + [0.2, 0.1, 0, 0.1, 0.2], + [0.1, 0.1, 0.1, 0, 0.2], + [0.1, 0.1, 0.1, 0.1, 0.2]] + }, + "t_end": { + "description": "A number of replications to run", + "datatype": int, + "default": 200 + }, + "warm_up": { + "description": "A number of replications to use as a warm up period", + "datatype": int, + "default": 0 + }, + "steady_state_initialization":{ + "description": "Whether the model will be initialized with steady state values", + "datatype": bool, + "default": False + }, + "density_p":{ + "description": "The probability of an edge existing in the graph in the random instance", + "datatype": float, + "default": 0.5 + }, + "random_arrival_parameter":{ + "description": "The parameter for the random arrival rate exponential distribution when creating a random instance", + "datatype": float, + "default": 1 + } + + + } + self.check_factor_list = { + "number_queues": self.check_number_queues, + "arrival_alphas": self.check_arrival_alphas, + "routing_matrix": self.check_routing_matrix, + "service_mus": self.check_service_mus, + "t_end": self.check_t_end, + "warm_up": self.check_warm_up, + "steady_state_initialization": self.check_steady_state_initialization, + "density_p": self.check_density_p, + "random_arrival_parameter": self.check_random_arrival_parameter + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + self.n_rngs = 3 * (self.factors["number_queues"] + 1) + + + + def check_number_queues(self): + return self.factors["number_queues"]>=0 + def check_arrival_alphas(self): + return all(x >= 0 for x in self.factors["arrival_alphas"]) + def check_service_mus(self): + lambdas = self.calc_lambdas() + return all(x >= 0 for x in self.factors["service_mus"]) and all(self.factors['service_mus'][i] > lambdas[i] for i in range(self.factors["number_queues"])) + def check_routing_matrix(self): + transition_sums = list(map(sum, self.factors["routing_matrix"])) + if all([len(row) == len(self.factors["routing_matrix"]) for row in self.factors["routing_matrix"]]) & \ + all(transition_sums[i] <= 1 for i in range(self.factors["number_queues"])): + return True + else: + return False + def check_t_end(self): + return self.factors["t_end"] >= 0 + def check_warm_up(self): + # Assume f(x) can be evaluated at any x in R^d. + return self.factors["warm_up"] >= 0 + def check_steady_state_initialization(self): + return isinstance(self.factors["steady_state_initialization"], bool) + def check_density_p(self): + return 0 <= self.factors["density_p"] <= 1 + def check_random_arrival_parameter(self): + return self.factors["random_arrival_parameter"] >= 0 + + # function that calulates the lambdas + def calc_lambdas(self): + routing_matrix = np.asarray(self.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.factors['number_queues']) - routing_matrix.T) @ self.factors["arrival_alphas"] + return lambdas + + def check_simulatable_factors(self): + lambdas = self.calc_lambdas() + return all(self.factors['service_mus'][i] > lambdas[i] for i in range(self.factors['number_queues'])) + + def attach_rng(self, random_rng): + #returns a dirichlet distribution of same shape as alpha + def dirichlet(alpha, rng): + gamma_vars = [rng.gammavariate(a, 1) for a in alpha] + sum_gamma_vars = sum(gamma_vars) + dirichlet_vars = [x / sum_gamma_vars for x in gamma_vars] + return dirichlet_vars + + self.random_rng = random_rng + random_num_queue = self.factors['number_queues'] + p = self.factors['density_p'] + random_matrix = erdos_renyi(random_rng[0], random_num_queue,p) + prob_matrix = np.zeros((random_num_queue, random_num_queue + 1)) + for i in range(random_num_queue): + a = int(sum(random_matrix[i]))+1 + probs = dirichlet(np.ones(a), rng = random_rng[0]) + r = 0 + for j in range(random_num_queue+1): + if random_matrix[i][j]==1 or j == random_num_queue: + prob_matrix[i][j] = probs[r] + r += 1 + prob_matrix = np.asarray(prob_matrix) + prob_matrix = prob_matrix[:, :-1] + random_arrival = [] + for i in range(random_num_queue): + random_arrival.append(random_rng[1].expovariate(self.factors['random_arrival_parameter'])) + + self.factors["arrival_alphas"] = random_arrival + self.factors['routing_matrix'] = prob_matrix.tolist() + + return + + def get_IPA(Dl, V, W, q, k, mu, self): # D is the dictionary, St L[i][1]: ith arrive cust's + def I(x, k): + if x==k: + return 1 + else: + return 0 + IA, IW = [[] for i in range(q)], [[-V[i][0]/mu * I(i, k)] for i in range(q)] + for i in range(len(Dl)): + queue = int(Dl[i][0]) + idx = Dl[i][1] + v = V[queue][idx] + if idx == 0: + if Dl[i][2][0] == -1: + IA[queue].append(0) + else: + pre_queue = Dl[i][2][0] + pre_idx = Dl[i][2][1]-1 + print('i: ', i, ', prequeue: ', pre_queue, ', pre_idx: ', pre_idx) + # print('iwww', IW[pre_queue], IA[pre_queue]) + if len(IA[pre_queue]) == 0: # Warm up bug.. + print('warmup') + a = 0 + else: + a = IW[pre_queue][pre_idx] + IA[pre_queue][pre_idx] + IA[queue].append(a) + else: + # Calculate IA + if Dl[i][2][0] == -1: + IA[queue].append(0) + else: + pre_queue = Dl[i][2][0] + pre_idx = Dl[i][2][1]-1 + print(pre_queue, pre_idx, IW[pre_queue], IA[pre_queue]) + if len(IA[pre_queue]) == 0: # Warm up bug.. + print('warmup') + a = 0 + else: + a = IW[pre_queue][pre_idx] + IA[pre_queue][pre_idx] + # print('i: ', i, ', prequeue: ', pre_queue, ', pre_idx: ', pre_idx) + # print('a', a) + IA[queue].append(a) + if W[queue][idx] <= 0: + v = -V[queue][idx]/mu * I(queue, k) + IW[queue].append(v) + else: + v = -V[queue][idx]/mu * I(queue, k) + IW[queue][idx-1] + # print('pre: ', IA[queue][idx-1]) + # print('it: ', IA[queue][idx]) + u = IA[queue][idx-1] - IA[queue][idx] + IW[queue].append(u + v) + + return IA, IW + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : [list] [rng.mrg32k3a.MRG32k3a] + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "average_queue_length": The time-average of queue length at each station + "expected_queue_length": The expected queue length calculated using stationary distribution + """ + # Designate random number generators. + arrival_rng = [rng_list[i] for i in range(self.factors["number_queues"])] + transition_rng = [rng_list[i + self.factors["number_queues"]] for i in range(self.factors["number_queues"])] + time_rng = [rng_list[i + 2*self.factors["number_queues"]] for i in range(self.factors["number_queues"])] + initialization_rng = rng_list[-1] + + def geometric(p): + return math.floor(np.log(1 - initialization_rng.uniform(0,1)) / math.log(p)) + #calculate the steady state of the queues to check the simulation + #calculate lambdas + routing_matrix = np.asarray(self.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.factors['number_queues']) - routing_matrix.T) @ self.factors["arrival_alphas"] + rho = lambdas/self.factors["service_mus"] + #calculate expected value of queue length as rho/(1-rho) + expected_queue_length = (rho)/(1-rho) + + if self.factors["steady_state_initialization"]: + # sample initialized queue lengths + queues = [geometric(rho[i]) for i in range(self.factors["number_queues"])] + completion_times = [math.inf for _ in range(self.factors["number_queues"])] + # Generate all interarrival, network routes, and service times before the simulation run. + next_arrivals = [arrival_rng[i].expovariate(self.factors["arrival_alphas"][i]) for i in range(self.factors["number_queues"])] + for i in range(self.factors["number_queues"]): + if queues[i] > 0: + completion_times[i] = time_rng[i].expovariate(self.factors["service_mus"][i]) + time_sum_queue_length = [0 for _ in range(self.factors["number_queues"])] + + else: + queues = [0 for _ in range(self.factors["number_queues"])] + # Generate all interarrival, network routes, and service times before the simulation run. + next_arrivals = [arrival_rng[i].expovariate(self.factors["arrival_alphas"][i]) + for i in range(self.factors["number_queues"])] + + # create list of each station's next completion time and initialize to infinity. + completion_times = [math.inf for _ in range(self.factors["number_queues"])] + + # initialize list of each station's average queue length + time_sum_queue_length = [0 for _ in range(self.factors["number_queues"])] + + + # Initiate clock variables for statistics tracking and event handling. + clock = 0 + previous_clock = 0 + + # warm-up period + if not self.factors["steady_state_initialization"]: + + while clock < self.factors['warm_up']: + next_arrival = min(next_arrivals) + next_completion = min(completion_times) + clock = min(next_arrival, next_completion) + if next_arrival < next_completion: # next event is an arrival + station = next_arrivals.index(next_arrival) + queues[station] += 1 + next_arrivals[station] += arrival_rng[station].expovariate(self.factors["arrival_alphas"][station]) + if queues[station] == 1: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + else: # next event is a departure + station = completion_times.index(next_completion) + queues[station] -= 1 + if queues[station] > 0: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + else: + completion_times[station] = math.inf + # schedule where the customer will go next + prob = transition_rng[station].random() + + if prob < np.cumsum(self.factors['routing_matrix'][station])[-1]: # customer stay in system + next_station = np.argmax(np.cumsum(self.factors['routing_matrix'][station]) > prob) + queues[next_station] += 1 + if queues[next_station] == 1: + completion_times[next_station] = clock + time_rng[next_station].expovariate(self.factors["service_mus"][next_station]) + next_arrivals = [next_arrivals[i] - clock for i in range(self.factors["number_queues"])] + completion_times = [completion_times[i] - clock for i in range(self.factors["number_queues"])] + clock = 0 + previous_clock = 0 + + # statistics needed for IPA - waiting_record, service_record, arrival_record, transfer_record, IPA_record + # waiting_record: records the waiting time of each customer before entering service. record when scheduling new completion times + # helper list: time_entered. records the time each customer enters the system. record when scheduling new arrival or departing to another station. + # pop when scheduling new completion times + # service_record: records the service time of each customer. record when scheduling new completion times + # arrival_record: records the arrival time of each customer. record when scheduling new arrivals + # transfer_record: records where the customer is transferred from, formatted as [previous station, previous index], if new : [-1] + # record when scheduling departures & new arrivals + # IPA_record: records the customer's index in the queue and the station it is transferred from, each element formatted as [station, index, [previous station, previous index]]. + # record at shceduling new completion times + # collect all statistics starting from warm-up period + waiting_record = [[] for _ in range(self.factors["number_queues"])] + time_entered = [deque() for _ in range(self.factors['number_queues'])] + service_record = [[] for _ in range(self.factors["number_queues"])] + arrival_record = [[] for _ in range(self.factors["number_queues"])] + transfer_record = [deque() for _ in range(self.factors["number_queues"])] + IPA_record = [] + + # Run simulation over time horizon. + while clock < self.factors['t_end']: + next_arrival = min(next_arrivals) + next_completion = min(completion_times) + clock = min(next_arrival, next_completion) + for i in range(self.factors['number_queues']): + time_sum_queue_length[i] += queues[i] * (clock - previous_clock) + + previous_clock = clock + if next_arrival < next_completion: # next event is an arrival + station = next_arrivals.index(next_arrival) + queues[station] += 1 + next_arrivals[station] += arrival_rng[station].expovariate(self.factors["arrival_alphas"][station]) + + time_entered[station].append(clock) + if queues[station] == 1: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + waiting_record[station].append(clock - time_entered[station].popleft()) + else: # next event is a departure + station = completion_times.index(next_completion) + queues[station] -= 1 + if queues[station] > 0: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + waiting_record[station].append(clock - time_entered[station].popleft()) + else: + completion_times[station] = math.inf + # schedule where the customer will go next + prob = transition_rng[station].random() + + if prob < np.cumsum(self.factors['routing_matrix'][station])[-1]: # customer stay in system + next_station = np.argmax(np.cumsum(self.factors['routing_matrix'][station]) > prob) + queues[next_station] += 1 + time_entered[next_station].append(clock) + if queues[next_station] == 1: + completion_times[next_station] = clock + time_rng[next_station].expovariate(self.factors["service_mus"][next_station]) + waiting_record[next_station].append(clock - time_entered[next_station].popleft()) + # end of simulation + # Calculate the IPA gradient + # IPA_gradient = [] + # for j in range(self.factors['number_queues']): + # IPA_gradient.append(self.get_IPA(IPA_record, service_times, waiting_times, self.factors['number_queues'], j, self.factors['service_mus'][j])) + + # calculate average queue length + average_queue_length = [time_sum_queue_length[i]/clock for i in range(self.factors["number_queues"])] + gradient = [-lambdas[i]/(self.factors["service_mus"][i] - lambdas[i])**(2) for i in range(self.factors['number_queues'])] + # lagrange_obj = sum(lambdas[i]/(self.factors["service_mus"][i] - lambdas[i]) for i in range(self.factors['number_queues'])) + 0.5*sum(self.factors['service_mus']) + lagrange_obj = sum(average_queue_length) + 0.5*sum(self.factors['service_mus']) + lagrange_grad = [-lambdas[i]/(self.factors["service_mus"][i] - lambdas[i])**(2) + 1 for i in range(self.factors['number_queues'])] + + responses = {"total_jobs": sum(average_queue_length)} + # responses = {"average_queue_length": average_queue_length, 'lagrange_obj': lagrange_obj, "expected_queue_length" :expected_queue_length, + # "total_jobs": sum(average_queue_length)} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + + # gradients['average_queue_length']['service_mus'] = tuple(gradient) + gradients['total_jobs']['service_mus'] = tuple(gradient) + + return responses, gradients + + +""" +Summary(.) +------- +Minimize the expected total number of jobs in the system at a time +""" + +class OpenJacksonMinQueue(Problem): + """ + Class to Open Jackson simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + service_rates_budget: int + budget for total service rates sum + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : tuple + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="OPENJACKSON-1", fixed_factors=None, model_fixed_factors=None, random = False, random_rng = None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.model_default_factors = {} + self.model_decision_factors = {"service_mus"} + self.factors = fixed_factors + self.random = random + self.n_rngs = 1 + + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (11,11,11,11,11) + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 1000 + }, + "service_rates_budget" :{ + "description": "budget for total service rates sum", + "datatype": int, + "default": 100 # ask later: access model factors when setting default values for budget + }, + "gamma_mean":{ + "description": "scale of the mean of gamma distribution when generating service rates upper bound", + "datatype": float, + "default": 0.5 + }, + "gamma_scale":{ + "description": "shape of gamma distribution when generating service rates upper bound", + "datatype": tuple, + "default": 5 + } + + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "service_rates_budget": self.check_service_rates_budget + } + super().__init__(fixed_factors, model_fixed_factors) + self.model = OpenJackson(self.model_fixed_factors, random) + self.Ci = np.array([1 for _ in range(self.model.factors["number_queues"])]) + self.di = np.array([self.factors['service_rates_budget']]) + self.Ce = None + self.de = None + self.dim = self.model.factors["number_queues"] + self.lower_bounds = tuple(0 for _ in range(self.model.factors["number_queues"])) + self.upper_bounds = tuple(self.factors['service_rates_budget'] for _ in range(self.model.factors["number_queues"])) + # Instantiate model with fixed factors and overwritten defaults. + self.optimal_value = None # Change if f is changed. + self.optimal_solution = None # Change if f is changed. + if random and random_rng: + self.model.attach_rng(random_rng) + + # lambdas = self.model.calc_lambdas() + # r = self.factors["service_rates_budget"]/sum(lambdas) + # self.factors['initial_solution'] = tuple([r*lambda_i for lambda_i in lambdas]) + + def attach_rngs(self, random_rng): + self.random_rng = random_rng + lambdas = self.model.calc_lambdas() + + # generate service rates upper bound as the sum of lambdas plus a gamma random variable with parameter as an input + mean = self.factors["gamma_mean"] * sum(lambdas) + scale = self.factors["gamma_scale"] + gamma = random_rng[0].gammavariate(mean/scale, scale) + self.factors["service_rates_budget"] = sum(lambdas) + gamma + + lambdas = self.model.calc_lambdas() + r = self.factors["service_rates_budget"]/sum(lambdas) + self.factors['initial_solution'] = tuple([r*lambda_i for lambda_i in lambdas]) + + return + + def check_service_rates_budget(self): + routing_matrix = np.asarray(self.model.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.model.factors['number_queues']) - routing_matrix.T) @ self.model.factors["arrival_alphas"] + if sum(self.factors["service_rates_budget"]) < sum(lambdas) : + return False + return True + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "service_mus": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = (factor_dict["service_mus"],) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + if type(response_dict['total_jobs']) == tuple: + objectives = (response_dict['total_jobs'][0],) + else: + objectives = (response_dict['total_jobs'],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints + for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of + stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic + constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + # Superclass method will check box constraints. + # Can add other constraints here. + routing_matrix = np.asarray(self.model.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.model.factors['number_queues']) - routing_matrix.T) @ self.model.factors["arrival_alphas"] + box_feasible = all(x[i] > lambdas[i] for i in range(self.model.factors['number_queues'])) + upper_feasible = (sum(x) <= self.factors['service_rates_budget']) + return super().check_deterministic_constraints(x) * box_feasible * upper_feasible + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : vector of decision variables + """ + if (self.model.factors["steady_state_initialization"]==True): + x = [0]*self.model.factors["number_queues"] + lambdas = self.model.calc_lambdas() + sum_alphas = sum(self.model.factors["arrival_alphas"]) + for i in range(self.model.factors["number_queues"]): + x[i] = lambdas[i] + rand_sol_rng.uniform(0,1) * sum_alphas + else: + x = rand_sol_rng.continuous_random_vector_from_simplex(n_elements=self.model.factors["number_queues"], + summation=self.factors["service_rates_budget"], + exact_sum=False + ) + return x + +""" +Summary(.) +------- +Minimize the expected total number of jobs in the system at a time +""" + +class OpenJacksonMinQueueLagrange(Problem): + """ + Class to Open Jackson simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + service_rates_budget: int + budget for total service rates sum + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : tuple + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="OPENJACKSON-2", fixed_factors=None, model_fixed_factors=None, random = False, random_rng = None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.model_default_factors = {} + self.model_decision_factors = {"service_mus"} + self.factors = fixed_factors + self.random = random + self.n_rngs = 1 + + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (15,15,15,15,15) + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 500 + }, + "service_rates_factor" :{ + "description": "weight of the service rates in the objective function", + "datatype": int, + "default": 0.5 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "service_rates_factor": self.check_service_rates_factor + } + super().__init__(fixed_factors, model_fixed_factors) + self.model = OpenJackson(self.model_fixed_factors, random) + self.dim = self.model.factors["number_queues"] + lambdas = self.model.calc_lambdas() + self.lower_bounds = tuple(lambdas) + self.upper_bounds = (np.inf,) * self.dim + # Instantiate model with fixed factors and overwritten defaults. + self.optimal_value = None # Change if f is changed. + self.optimal_solution = None # Change if f is changed. + if random and random_rng: + self.model.attach_rng(random_rng) + + self.factors['initial_solution'] = tuple([1.1*lambda_i for lambda_i in lambdas]) + + def attach_rngs(self, random_rng): + self.random_rng = random_rng + self.factors["service_rates_factor"] = random_rng[0].uniform(0,5) + + return + + def check_service_rates_factor(self): + + return self.factors['service_rates_factor'] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "service_mus": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = (factor_dict["service_mus"],) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + if type(response_dict['lagrange_obj']) == tuple: + objectives = (response_dict['lagrange_obj'][0],) + else: + objectives = (response_dict['lagrange_obj'],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints + for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of + stochastic constraints + """ + det_stoch_constraints = tuple([0]*self.dim) + det_stoch_constraints_gradients = (0,) + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic + constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + # Superclass method will check box constraints. + # Can add other constraints here. + routing_matrix = np.asarray(self.model.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.model.factors['number_queues']) - routing_matrix.T) @ self.model.factors["arrival_alphas"] + box_feasible = all(x[i] > lambdas[i] for i in range(self.model.factors['number_queues'])) + return super().check_deterministic_constraints(x) * box_feasible + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : vector of decision variables + """ + if (self.model.factors["steady_state_initialization"]==True): + x = [0]*self.model.factors["number_queues"] + lambdas = self.model.calc_lambdas() + sum_alphas = sum(self.model.factors["arrival_alphas"]) + for i in range(self.model.factors["number_queues"]): + x[i] = lambdas[i] + rand_sol_rng.uniform(0,1) * sum_alphas + else: + x = rand_sol_rng.continuous_random_vector_from_simplex(n_elements=self.model.factors["number_queues"], + summation=self.factors["service_rates_budget"], + exact_sum=False + ) + return x \ No newline at end of file From e18fef9811d70d26b149d1ced843e857e130838f Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:45:11 +0800 Subject: [PATCH 19/21] Add files via upload --- simopt/models/cascade.py | 648 +++++++++++++++++++++++++++++++++++++++ simopt/models/network.py | 23 +- simopt/models/san.py | 476 +++++++++++++++++++++++++++- simopt/models/smf.py | 488 +++++++++++++++++++++++++++++ simopt/models/smfcvx.py | 510 ++++++++++++++++++++++++++++++ 5 files changed, 2132 insertions(+), 13 deletions(-) create mode 100644 simopt/models/cascade.py create mode 100644 simopt/models/smf.py create mode 100644 simopt/models/smfcvx.py diff --git a/simopt/models/cascade.py b/simopt/models/cascade.py new file mode 100644 index 000000000..c91d2aeed --- /dev/null +++ b/simopt/models/cascade.py @@ -0,0 +1,648 @@ +""" +Summary +------- +Simulate a progressive cascade process in an infinite time horizon. +`here `_. + +""" +import numpy as np +import networkx as nx +import cvxpy as cp + +from ..base import Model, Problem + + +class Cascade(Model): + """ + Simulate a progressive cascade process in an infinite time horizon. + + Attributes + ---------- + name : str + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI, data validation, and defaults) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + ---------- + fixed_factors : dict + fixed_factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = "CASCADE" + self.n_rngs = 2 + self.n_responses = 1 + self.factors = fixed_factors + self.G = nx.read_graphml('/Users/liulitong/Desktop/simopt-1/DAG.graphml') + self.num_nodes = len(self.G) + self.specifications = { + "num_subgraph": { + "description": "number of subgraphs to generate", + "datatype": int, + "default": 10 + }, + "init_prob": { + "description": "probability of initiating the nodes", + "datatype": np.ndarray, + "default": 0.1 * np.ones(self.num_nodes) + } + } + + self.check_factor_list = { + "num_subgraph": self.check_num_subgraph, + "init_prob": self.check_init_prob, + } + # Set factors of the simulation model + super().__init__(fixed_factors) + + # Check for simulatable factors + def check_num_subgraph(self): + return self.factors["num_subgraph"] > 0 + + def check_init_prob(self): + return np.all(self.factors["init_prob"] >= 0) + + def check_simulatable_factors(self): + return True + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : [list] [mrg32k3a.mrg32k3a.MRG32k3a] + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "mean_num_activated" = Mean number of activated nodes + """ + # Designate random number generators. + seed_rng = rng_list[0] + activate_rng = rng_list[1] + + nodes = list(self.G.nodes) + num_lst = [] + for _ in range(self.factors["num_subgraph"]): + # Create seed nodes. + seeds = [nodes[j] for j in range(self.num_nodes) if seed_rng.uniform(0, 1) < self.factors["init_prob"][j]] + # Set all nodes as not activated. + activated = set() + # Add the seed nodes to the activated set. + activated.update(set(seeds)) + # Initialize the newly activated nodes list with the seed nodes. + newly_activated = set(seeds) + + # Run the model until there are no more newly activated nodes. + while len(newly_activated) != 0: + temp_activated = set() + for v in newly_activated: + # Check for each successor if it gets activated. + for w in self.G.successors(v): + if w not in activated: + u = activate_rng.uniform(0, 1) + if u < self.G[v][w]["weight"]: + temp_activated.add(w) + # Add newly activated nodes to the activated set. + newly_activated = temp_activated + activated.update(newly_activated) + + + num_activated = len(activated) + num_lst.append(num_activated) + + + # Calculate responses from simulation data. + responses = {"mean_num_activated": np.mean(num_lst) + } + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + return responses, gradients + + +""" +Summary +------- +Maximize the expected number of activated nodes. +""" + +class CascadeMax(Problem): + """ + Class to make network cascade simulation-optimization problems. + + Attributes + ---------- + name : str + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : str + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : str + description of variable types: + "discrete", "continuous", "mixed" + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : float + optimal objective function value + optimal_solution : tuple + optimal solution + model : base.Model + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : [list] [mrg32k3a.mrg32k3a.MRG32k3a] + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : tuple + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name of problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="CASCADE-1", fixed_factors=None, model_fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1,) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = False + self.optimal_value = None + self.optimal_solution = None + self.G = nx.read_graphml('/Users/liulitong/Desktop/simopt-1/DAG.graphml') + self.model_default_factors = {} + self.model_decision_factors = {"init_prob"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": tuple(0.001 * np.ones(len(self.G))) + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 10000 + }, + "B": { + "description": "budget for the activation costs", + "datatype": int, + "default": 200 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and overwritten defaults. + self.model = Cascade(self.model_fixed_factors) + self.dim = len(self.model.G) + self.lower_bounds = (0,) * self.dim + self.upper_bounds = (1,) * self.dim + self.Ci = np.array([self.model.G.nodes[node]["cost"] for node in self.model.G.nodes()]) + self.Ce = None + self.di = np.array([self.factors["B"]]) + self.de = None + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dict + dictionary with factor keys and associated values + """ + factor_dict = { + "init_prob": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dict + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = (factor_dict["init_prob"],) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dict + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["mean_num_activated"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dict + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,),) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints + for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of + stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic + constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + return np.dot(self.Ci, x) <= self.factors["B"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + + # Upper bound and lower bound. + lower_bound = np.array(self.lower_bounds) + upper_bound = np.array(self.upper_bounds) + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = self.Ci + di = self.di + Ce = self.Ce + de = self.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, self.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Hit and Run + start_pt = self.find_feasible_initial(None, C, None, d) + tol = 1e-6 + + x = start_pt + # Generate the markov chain for sufficiently long. + for _ in range(20): + # Generate a random direction to travel. + direction = np.array([rand_sol_rng.uniform(0, 1) for _ in range(self.dim)]) + direction = direction / np.linalg.norm(direction) + + dir = direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + + dir = -direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star2 = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star2: + s_star2 = s + + # Generate random point between lambdas. + lam = rand_sol_rng.uniform(-1 * min(1, s_star2), min(1, s_star)) + + # Compute the new point. + x += lam * direction + + x= tuple(x) + return x + + + def get_multiple_random_solution(self, rand_sol_rng, n_samples): + """ + Generate multiple random solutions for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + n_samples: int + number of random solutions to generate + + Returns + ------- + xs : list[tuple] + list of vectors of decision variables + """ + + # Upper bound and lower bound. + lower_bound = np.array(self.lower_bounds) + upper_bound = np.array(self.upper_bounds) + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = self.Ci + di = self.di + Ce = self.Ce + de = self.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, self.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Hit and Run + start_pt = self.find_feasible_initial(None, self.Ci, None, self.di) + xs = [] + x = start_pt + tol = 1e-6 + + # Generate the markov chain for sufficiently long. + for _ in range(20 + n_samples): + # Generate a random direction to travel. + direction = np.array([rand_sol_rng.uniform(0, 1) for _ in range(self.dim)]) + direction = direction / np.linalg.norm(direction) + + dir = direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + + dir = -direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star2 = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star2: + s_star2 = s + + # Generate random point between lambdas. + lam = rand_sol_rng.uniform(-1 * min(1, s_star2), min(1, s_star)) + + # Compute the new point. + x += lam * direction + + xs.append(tuple(x)) + + return xs[: -n_samples] + + + def find_feasible_initial(self, Ae, Ai, be, bi): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(self.upper_bounds) + lower_bound = np.array(self.lower_bounds) + + # Define decision variables. + x = cp.Variable(self.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound)) + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + + return x0 + + diff --git a/simopt/models/network.py b/simopt/models/network.py index f2f2f8582..448aa968c 100644 --- a/simopt/models/network.py +++ b/simopt/models/network.py @@ -49,12 +49,13 @@ def __init__(self, fixed_factors=None): "process_prob": { "description": "probability that a message will go through a particular network i", "datatype": list, - "default": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] + # "default": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] + "default": [1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10] }, "cost_process": { "description": "message processing cost of network i", "datatype": list, - "default": [1, 1 / 2, 1 / 3, 1 / 4, 1 / 5, 1 / 6, 1 / 7, 1 / 8, 1 / 9, 1 / 10] + "default": [1, 1 / 2, 1 / 3, 1 / 4, 1 / 5, 1 / 6, 1 / 7, 1 / 8, 1 / 9, 1 / 10] # Random }, "cost_time": { "description": "cost for the length of time a message spends in a network i per each unit of time", @@ -64,22 +65,22 @@ def __init__(self, fixed_factors=None): "mode_transit_time": { "description": "mode time of transit for network i following a triangular distribution", "datatype": list, - "default": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + "default": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # Random }, "lower_limits_transit_time": { "description": "lower limits for the triangular distribution for the transit time", "datatype": list, - "default": [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5] + "default": [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5] # Random }, "upper_limits_transit_time": { "description": "upper limits for the triangular distribution for the transit time", "datatype": list, - "default": [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5] + "default": [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5] # Random }, "arrival_rate": { "description": "arrival rate of messages following a Poisson process", "datatype": float, - "default": 1.0 + "default": 1.0 # Random }, "n_messages": { "description": "number of messages that arrives and needs to be routed", @@ -183,7 +184,9 @@ def replicate(self, rng_list): # Generate all interarrival, network routes, and service times before the simulation run. arrival_times = [arrival_rng.expovariate(self.factors["arrival_rate"]) for _ in range(total_arrivals)] - network_routes = network_rng.choices(range(self.factors["n_networks"]), weights=self.factors["process_prob"], k=total_arrivals) + network_routes = network_rng.choices(range(self.factors["n_networks"]), weights=self.factors['process_prob'], k=total_arrivals) + # print(self.factors['process_prob']) + # print(len(range(self.factors['n_networks'])), len(list(self.factors['process_prob'])), np.sum(self.factors['process_prob'])) service_times = [transit_rng.triangular(low=self.factors["lower_limits_transit_time"][network_routes[i]], high=self.factors["upper_limits_transit_time"][network_routes[i]], mode=self.factors["mode_transit_time"][network_routes[i]]) @@ -268,7 +271,7 @@ class NetworkMinTotalCost(Problem): upper bound for each decision variable gradient_available : bool indicates if gradient of objective function is available - optimal_value : tuple + optimal_value : float optimal objective function value optimal_solution : tuple optimal solution @@ -340,6 +343,10 @@ def __init__(self, name="NETWORK-1", fixed_factors=None, model_fixed_factors=Non self.dim = self.model.factors["n_networks"] self.lower_bounds = tuple([0 for _ in range(self.model.factors["n_networks"])]) self.upper_bounds = tuple([1 for _ in range(self.model.factors["n_networks"])]) + self.Ci = None + self.Ce = np.array([1 for _ in range(self.model.factors["n_networks"])]) #None + self.di = None + self.de = np.array([1]) #None def vector_to_factor_dict(self, vector): """ diff --git a/simopt/models/san.py b/simopt/models/san.py index 8d219cd3f..7e476594c 100644 --- a/simopt/models/san.py +++ b/simopt/models/san.py @@ -6,6 +6,7 @@ `here `_. """ import numpy as np +import cvxpy as cp from ..base import Model, Problem @@ -15,7 +16,7 @@ class SAN(Model): A model that simulates a stochastic activity network problem with tasks that have exponentially distributed durations, and the selected means come with a cost. - +· Attributes ---------- name : string @@ -129,7 +130,7 @@ def replicate(self, rng_list): graph_in[a[1]].add(a[0]) graph_out[a[0]].add(a[1]) indegrees = [len(graph_in[n]) for n in range(1, self.factors["num_nodes"] + 1)] - # outdegrees = [len(graph_out[n]) for n in range(1, self.factors["num_nodes"]+1)] + queue = [] topo_order = [] for n in range(self.factors["num_nodes"]): @@ -212,6 +213,14 @@ class SANLongestPath(Problem): lower bound for each decision variable upper_bounds : tuple upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de gradient_available : bool indicates if gradient of objective function is available optimal_value : tuple @@ -282,7 +291,7 @@ def __init__(self, name="SAN-1", fixed_factors=None, model_fixed_factors=None): "arc_costs": { "description": "Cost associated to each arc.", "datatype": tuple, - "default": (1,) * 13 + "default": (1, ) * 13 } } self.check_factor_list = { @@ -294,8 +303,13 @@ def __init__(self, name="SAN-1", fixed_factors=None, model_fixed_factors=None): # Instantiate model with fixed factors and over-riden defaults. self.model = SAN(self.model_fixed_factors) self.dim = len(self.model.factors["arcs"]) - self.lower_bounds = (1e-2,) * self.dim - self.upper_bounds = (np.inf,) * self.dim + self.lower_bounds = (1e-2, ) * self.dim + # self.upper_bounds = (np.inf, ) * self.dim + self.upper_bounds = (1000, ) * self.dim + self.Ci = None + self.Ce = None + self.di = None + self.de = None def check_arc_costs(self): positive = True @@ -448,3 +462,455 @@ def get_random_solution(self, rand_sol_rng): """ x = tuple([rand_sol_rng.lognormalvariate(lq=0.1, uq=10) for _ in range(self.dim)]) return x + + +""" +Summary +------- +Minimize the duration of the longest path from a to i subject to a lower bound in sum of arc_means. +""" + + +class SANLongestPathConstr(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SAN-2", fixed_factors=None, model_fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"arc_means"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (20,) * 13 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 10000 + }, + "arc_costs": { + "description": "Cost associated to each arc.", + "datatype": tuple, + "default": (1,) * 13 + }, + "sum_lb": { + "description": "Lower bound for the sum of arc means", + "datatype": float, + "default": 100.0 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "arc_costs": self.check_arc_costs + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SAN(self.model_fixed_factors) + self.dim = len(self.model.factors["arcs"]) + self.lower_bounds = (1e-2,) * self.dim + self.upper_bounds = (100000,) * self.dim #np.inf, + self.Ci = -1 * np.ones(13) + self.Ce = None + self.di = -1 * np.array([self.factors["sum_lb"]]) + self.de = None + + def check_arc_costs(self): + positive = True + for x in list(self.factors["arc_costs"]): + positive = positive & x > 0 + return (len(self.factors["arc_costs"]) != self.model.factors["num_arcs"]) & positive + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "arc_means": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["arc_means"]) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["longest_path_length"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + return np.all(np.array(x) >= 0) + + # def get_random_solution(self, rand_sol_rng): + # """ + # Generate a random solution for starting or restarting solvers. + + # Arguments + # --------- + # rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + # random-number generator used to sample a new random solution + + # Returns + # ------- + # x : tuple + # vector of decision variables + # """ + # while True: + # x = [rand_sol_rng.lognormalvariate(lq = 0.1, uq = 10) for _ in range(self.dim)] + # if np.sum(x) >= self.factors['sum_lb']: + # break + # x= tuple(x) + # return x + + def find_feasible_initial(self, Ae, Ai, be, bi): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(self.upper_bounds) + lower_bound = np.array(self.lower_bounds) + + # Define decision variables. + x = cp.Variable(self.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound)) + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + + return x0 + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + + # Upper bound and lower bound. + lower_bound = np.array(self.lower_bounds) + upper_bound = np.array(self.upper_bounds) + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = self.Ci + di = self.di + Ce = self.Ce + de = self.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, self.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Hit and Run + start_pt = self.find_feasible_initial(None, C, None, d) + tol = 1e-6 + + x = start_pt + # Generate the markov chain for sufficiently long. + for _ in range(20): + # Generate a random direction to travel. + direction = np.array([rand_sol_rng.uniform(0, 1) for _ in range(self.dim)]) + direction = direction / np.linalg.norm(direction) + + dir = direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + + dir = -direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star2 = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star2: + s_star2 = s + + # Generate random point between lambdas. + # lam = rand_sol_rng.uniform(-1 * s_star2, s_star) + lam = rand_sol_rng.uniform(-1 * min(50, s_star2), min(50, s_star)) + + # Compute the new point. + x += lam * direction + + print('sol: ', x) + + x= tuple(x) + return x diff --git a/simopt/models/smf.py b/simopt/models/smf.py new file mode 100644 index 000000000..22cc9f472 --- /dev/null +++ b/simopt/models/smf.py @@ -0,0 +1,488 @@ +""" +Summary +------- +Simulate duration of a stochastic Max-Flow network (SMF). +A detailed description of the model/problem can be found +`here `_. +""" + +import numpy as np +from ortools.graph.python import max_flow +from ..base import Model, Problem + + +class SMF(Model): + """ + A model that simulates a stochastic Max-Flow problem with + capacities deducted with multivariate distributed noise distributed durations + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = "SMF" + self.n_rngs = 1 + self.n_random = 1 + self.n_responses = 1 + cov_fac = np.zeros((20, 20)) + np.fill_diagonal(cov_fac, 4) + cov_fac = cov_fac.tolist() + self.specifications = { + "num_nodes": { + "description": "number of nodes, 0 being the source, highest being the sink", + "datatype": int, + "default": 10 + }, + "source_index": { + "description": "source node index", + "datatype": int, + "default": 0 + }, + "sink_index": { + "description": "sink node index", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 4), (4, 2), (3, 2), (2, 5), (4, 5), (3, 6), (3, 7), (6, 2), (6, 5), (6, 7), (5, 8), (6, 8), (6, 9), (7, 9), (8, 9)] + }, + "assigned_capacities": { + "description": "Assigned capacity of each arc", + "datatype": list, + "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + }, + "mean_noise": { + "description": "The mean noise in reduction of arc capacities", + "datatype": list, + "default": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "cov_noise": { + "description": "Covariance matrix of noise", + "datatype": list, + "default": cov_fac + } + + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "assigned_capacities": self.check_capacities, + "mean_noise": self.check_mean, + "cov_noise": self.check_cov, + "source_index": self.check_s, + "sink_index": self.check_t + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check source is connected to the sink. + graph = {node: set() for node in range(0, self.factors["num_nodes"])} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, self.factors["source_index"]) + if self.factors["source_index"] in visited and self.factors["sink_index"] in visited: + return True + return False + + def check_capacities(self): + positive = True + for x in list(self.factors["assigned_capacities"]): + positive = positive & (x > 0) + return (len(self.factors["assigned_capacities"]) == len(self.factors["arcs"])) & positive + + def check_mean(self): + return len(self.factors["mean_noise"]) == len(self.factors["arcs"]) + + def check_cov(self): + return np.array(self.factors["cov_noise"]).shape == (len(self.factors["arcs"]), len(self.factors["arcs"])) + + def check_s(self): + return self.factors["source_index"] >= 0 and self.factors["source_index"] <= self.factors["num_nodes"] + + def check_t(self): + return self.factors["sink_index"] >= 0 and self.factors["sink_index"] <= self.factors["num_nodes"] + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + # Designate separate random number generators. + solver = max_flow.SimpleMaxFlow() + exp_rng = rng_list[0] + # From input graph generate start end end nodes. + start_nodes = [] + end_nodes = [] + for i, j in self.factors["arcs"]: + start_nodes.append(i) + end_nodes.append(j) + # Generate actual capacity. + for i in range(len(self.factors["arcs"])): + noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + capacities = [] + for i in range(len(noise)): + capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + # Add arcs in bulk. + solver.add_arcs_with_capacity(start_nodes, end_nodes, capacities) + status = solver.solve(self.factors["source_index"], self.factors["sink_index"]) + if status != solver.OPTIMAL: + print('There was an issue with the max flow input.') + print(f'Status: {status}') + exit(1) + + # Construct gradient vector (=1 if has a outflow from min-cut nodes). + gradient = np.zeros(len(self.factors["arcs"])) + grad_arclist = [] + min_cut_nodes = solver.get_source_side_min_cut() + for i in min_cut_nodes: + for j in range(self.factors['num_nodes']): + if j not in min_cut_nodes: + grad_arc = (i, j) + if (i, j) in self.factors['arcs']: + grad_arclist.append(grad_arc) + for arc in grad_arclist: + gradient[self.factors['arcs'].index(arc)] = 1 + + responses = {"Max Flow": solver.optimal_flow() / 1000} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["Max Flow"]["assigned_capacities"] = gradient + return responses, gradients + + +""" +Summary +------- +Maximize the expected max flow from the source node s to the sink node t. +""" + + +class SMF_Max(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SMF-1", fixed_factors=None, model_fixed_factors=None, random=False): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1, ) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.random = random + self.model_default_factors = {} + self.model_decision_factors = {"assigned_capacities"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (1, ) * 20 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 100000 + }, + "cap": { + "description": "total set-capacity to be allocated to arcs.", + "datatype": int, + "default": 100 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "cap": self.check_cap + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SMF(self.model_fixed_factors) + self.dim = len(self.model.factors["arcs"]) + self.lower_bounds = (0, ) * self.dim + # self.upper_bounds = (np.inf, ) * self.dim + self.upper_bounds = (self.factors["cap"], ) * self.dim + self.Ci = np.ones(20) + self.Ce = None + self.di = np.array([self.factors["cap"]]) + self.de = None + + def check_cap(self): + return self.factors["cap"] >= 0 + + def attach_rngs(self, random_rng): + self.random_rng = random_rng + self.model.attach_rng(random_rng) + return random_rng + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "assigned_capacities": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["assigned_capacities"]) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["Max Flow"], ) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0, ) + det_objectives_gradients = ((0, ) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return sum(self.factors["assigned_capacities"]) <= self.factors["cap"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + x = rand_sol_rng.continuous_random_vector_from_simplex(len(self.model.factors["arcs"]), self.factors["cap"], False) + return x diff --git a/simopt/models/smfcvx.py b/simopt/models/smfcvx.py new file mode 100644 index 000000000..27daa23f0 --- /dev/null +++ b/simopt/models/smfcvx.py @@ -0,0 +1,510 @@ +""" +Summary +------- +Simulate duration of a stochastic Max-Flow network (SMF). +A detailed description of the model/problem can be found +`here `_. +""" + +import numpy as np +from ortools.graph.python import max_flow +from ..base import Model, Problem + + +class SMFCVX0(Model): + """ + A model that simulates a stochastic Max-Flow problem with + capacities deducted with multivariate distributed noise distributed durations + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = "SMFCVX" + self.n_rngs = 1 + self.n_responses = 1 + cov_fac = np.zeros((20, 20)) + np.fill_diagonal(cov_fac, 4) + cov_fac = cov_fac.tolist() + self.specifications = { + "num_nodes": { + "description": "number of nodes, 0 being the source, highest being the sink", + "datatype": int, + "default": 10 + }, + "source_index": { + "description": "source node index", + "datatype": int, + "default": 0 + }, + "sink_index": { + "description": "sink node index", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 4), (4, 2), (3, 2), (2, 5), (4, 5), (3, 6), (3, 7), (6, 2), (6, 5), (6, 7), (5, 8), (6, 8), (6, 9), (7, 9), (8, 9)] + }, + "assigned_capacities": { + "description": "Assigned capacity of each arc", + "datatype": list, + "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + }, + # "capacity_bound":{ + # "description": "upper bound capacity function", + # "datatype": 'function', + # "default": self.default_upper_fn + # }, + "mean_noise": { + "description": "The mean noise in reduction of arc capacities", + "datatype": list, + "default": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "cov_noise": { + "description": "Covariance matrix of noise", + "datatype": list, + "default": cov_fac + } + + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "assigned_capacities": self.check_capacities, + "mean_noise": self.check_mean, + "cov_noise": self.check_cov, + "source_index": self.check_s, + "sink_index": self.check_t + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + self.num_arcs = len(self.factors["arcs"]) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def default_upper_fn(self,rng,k = 5,lamb = 1): + #return the upper bound of the capacities for a single edge x + #the multiplier X (xX) is an Erlang ~ (k,lamb) + + capacities = [] + for i in range(self.num_arcs): + capacities.append(1000*self.factors["assigned_capacities"][i]*sum([rng.expovariate(lamb) for j in range(k)])) + return capacities + + def pos_part_capacity(self): + #generate capacity of the form [x - noise]^{+} + for i in range(self.num_arcs): + noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + capacities = [] + for i in range(self.num_arcs): + capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + + return capacities + + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check source is connected to the sink. + graph = {node: set() for node in range(0, self.factors["num_nodes"])} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, self.factors["source_index"]) + if self.factors["source_index"] in visited and self.factors["sink_index"] in visited: + return True + return False + + def check_capacities(self): + positive = True + for x in list(self.factors["assigned_capacities"]): + positive = positive & (x > 0) + return (len(self.factors["assigned_capacities"]) == len(self.factors["arcs"])) & positive + + def check_mean(self): + return len(self.factors["mean_noise"]) == len(self.factors["arcs"]) + + def check_cov(self): + return np.array(self.factors["cov_noise"]).shape == (len(self.factors["arcs"]), len(self.factors["arcs"])) + + def check_s(self): + return self.factors["source_index"] >= 0 and self.factors["source_index"] <= self.factors["num_nodes"] + + def check_t(self): + return self.factors["sink_index"] >= 0 and self.factors["sink_index"] <= self.factors["num_nodes"] + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + # Designate separate random number generators. + solver = max_flow.SimpleMaxFlow() + exp_rng = rng_list[0] + # From input graph generate start end end nodes. + start_nodes = [] + end_nodes = [] + for i, j in self.factors["arcs"]: + start_nodes.append(i) + end_nodes.append(j) + # Generate actual capacity. + #for i in range(len(self.factors["arcs"])): + # noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + #capacities = [] + #for i in range(len(noise)): + #capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + # capacities.append(self.default_upper_fn(self.factors["assigned_capacities"][i],exp_rng)) + capacities = self.default_upper_fn(exp_rng) + #print("capacities: ", capacities) + + # Add arcs in bulk. + solver.add_arcs_with_capacity(start_nodes, end_nodes, capacities) + status = solver.solve(self.factors["source_index"], self.factors["sink_index"]) + if status != solver.OPTIMAL: + print('There was an issue with the max flow input.') + print(f'Status: {status}') + exit(1) + + # Construct gradient vector (=1 if has a outflow from min-cut nodes). + gradient = np.zeros(len(self.factors["arcs"])) + grad_arclist = [] + min_cut_nodes = solver.get_source_side_min_cut() + for i in min_cut_nodes: + for j in range(self.factors['num_nodes']): + if j not in min_cut_nodes: + grad_arc = (i, j) + if (i, j) in self.factors['arcs']: + grad_arclist.append(grad_arc) + for arc in grad_arclist: + gradient[self.factors['arcs'].index(arc)] = 1 + + responses = {"Max Flow": solver.optimal_flow() / 1000} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["Max Flow"]["assigned_capacities"] = gradient + return responses, gradients + + +""" +Summary +------- +Maximize the expected max flow from the source node s to the sink node t. +""" + + +class SMFCVX_Max0(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SMFCVX-1", fixed_factors=None, model_fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1, ) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"assigned_capacities"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (1, ) * 20 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 30000 + }, + "cap": { + "description": "total set-capacity to be allocated to arcs.", + "datatype": int, + "default": 100 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "cap": self.check_cap + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SMFCVX0(self.model_fixed_factors) + self.dim = len(self.model.factors["arcs"]) + self.lower_bounds = (0, ) * self.dim + self.upper_bounds = (1000000, ) * self.dim #np.inf + self.Ci = np.array([20*[1]])#np.ones(20) + self.Ce = None + self.di = np.array([self.factors["cap"]]) + self.de = None + + def check_cap(self): + return self.factors["cap"] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "assigned_capacities": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["assigned_capacities"]) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["Max Flow"], ) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0, ) + det_objectives_gradients = ((0, ) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return sum(self.factors["assigned_capacities"]) <= self.factors["cap"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + x = rand_sol_rng.continuous_random_vector_from_simplex(len(self.model.factors["arcs"]), self.factors["cap"], False) + return x From e4f7723e51b892dad29fafe7fa8abc99187fe7b7 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:53:27 +0800 Subject: [PATCH 20/21] Add files via upload --- simopt/solvers/Boom_FrankWolfe.py | 1547 +++++++++++++++++++++++++++++ simopt/solvers/Boom_ProxGD.py | 904 +++++++++++++++++ simopt/solvers/pgdss.py | 576 +++++++++++ 3 files changed, 3027 insertions(+) create mode 100644 simopt/solvers/Boom_FrankWolfe.py create mode 100644 simopt/solvers/Boom_ProxGD.py create mode 100644 simopt/solvers/pgdss.py diff --git a/simopt/solvers/Boom_FrankWolfe.py b/simopt/solvers/Boom_FrankWolfe.py new file mode 100644 index 000000000..be0511566 --- /dev/null +++ b/simopt/solvers/Boom_FrankWolfe.py @@ -0,0 +1,1547 @@ +import numpy as np +import cvxpy as cp +import gurobipy +import matplotlib.pyplot as plt +#import cdd + + +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver +#env = gurobipy.Env() +#env.setParam('FeasibilityTol', 1e-9) +#env.setParam('MIPGap',0) + + +class BoomFrankWolfe(Solver): + """ + """ + + def __init__(self, name="Boom-FW", fixed_factors={}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 30 + }, + "h": { + "description": "difference in finite difference gradient", + "datatype": float, + "default": 0.1 + }, + "step_f": { + "description": "step size function", + "datatype": "function", + "default": self.default_step_f + }, + "theta": { + "description": "constant in the line search condition", + "datatype": int, + "default": 0.2 + }, + "max_iters": { + "description": "maximum iterations", + "datatype": int, + "default": 300 + }, + "LSmethod":{ + "description": "methods for line search algorithm", + "datatype":str, + "default":self.backtrackLineSearch + }, + "line_search_max_iters": { + "description": "maximum iterations for line search", + "datatype": int, + "default": 20 + }, + "ratio": { + "description": "decay ratio in line search", + "datatype": float, + "default": 0.8 + }, + "curve_const": { + "description": "constant in curvature wolfe conditions, usually greater than theta", + "datatype": float, + "default": 0.3 + }, + "zoom_init_ratio": { + "description": "ratio of the max step size in Zoom lien search", + "datatype": float, + "default": 0.2 + }, + "zoom_inc_ratio": { + "description": "increment ratio in Zoom lien search", + "datatype": float, + "default": 1.1 + }, + "atom_vectors":{ + "description": "atom vectors for away/pairwise frank-wolfe", + "datatype": "matrix", + "default": None + }, + "max_gamma":{ + "description": "max distance to the next iteration", + "datatype": float, + "default": 1 + }, + "backtrack":{ + "description": "an indicator whether we do the backtrack", + "datatype": bool, + "default": 0 + }, + "algorithm":{ + "description": "type of FW algorithm", + "datatype": str, + "default": "normal" + #away, pairwise + } + + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "max_iters": self.check_alpha_max, + } + super().__init__(fixed_factors) + + def check_r(self): + return self.factors["r"] > 0 + + def check_alpha_max(self): + + return self.factors["alha_max"] > 0 + + def check_max_iters(self): + + return self.factors['max_iters'] > 0 + + def default_step_f(self,k): + """ + take in the current iteration k + """ + + return 1/(k+1) + + def is_feasible(self, x, Ci,di,Ce,de,lower, upper, tol = 1e-8): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + res = True + + if(lower is not None): + res = res & np.all(x >= lower) + if(upper is not None): + res = res & np.all(x <= upper) + + if (Ci is not None) and (di is not None): + res = res & np.all(Ci @ x <= di + tol) + if (Ce is not None) and (de is not None): + res = res & (np.allclose(np.dot(Ce, x), de)) + return res + + def get_max_gamma_ratio_test(self, cur_x, d, Ce, Ci, de, di, lower, upper): + ''' + perform a ratio test to find the max step size + ''' + #step = cp.Variable() + #objective = cp.Maximize(step) + #constraints = [step >= 0] + #ratio test: (bi - ai^Tx)/(ai^Td) + ratio_val = [] + denom = [] + dim = len(cur_x) + + if(lower is not None): + #constraints += [(cur_x + step*d) >= lower] + #vals += [(lower[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((cur_x - lower)/-d) + denom += list(-d) + if(upper is not None): + #constraints += [(cur_x + step*d) <= upper] + #vals += [(upper[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((upper - cur_x)/d) + denom += list(d) + if((Ci is not None) and (di is not None)): + #constraints += [Ci@(cur_x + step*d) <= di] + ratio_val += list((di - Ci@cur_x)/(Ci@d)) + denom += list(Ci@d) + + #print("ratio: ", ratio_val) + #print("denom: ", denom) + ratio_val = np.array(ratio_val) + denom = np.array(denom) + #print("denom: ", denom) + #print("ratio_val: ", ratio_val) + + return min(ratio_val[denom > 1e-6]) + #prob = cp.Problem(objective, constraints) + #prob.solve() + + def get_dir(self,g,Ce, Ci, de, di,lower, upper): + ''' + solve for the direction in each iteration + given a gradient vector g, find min_s{sg} + s.t. problem is feasible + ''' + + n = len(g) + s = cp.Variable(n) + + objective = cp.Minimize(s@g) + constraints = [] + + if(lower is not None): + constraints += [s >= lower] + if(upper is not None): + constraints += [s <= upper] + if((Ci is not None) and (di is not None)): + constraints += [Ci@s <= di] + if((Ce is not None) and (de is not None)): + constraints += [Ce@s == de] + + prob = cp.Problem(objective, constraints) + #prob.solve(solver=cp.GUROBI,env=env)#solver=cp.ECOS + prob.solve(solver=cp.SCIPY) + + return s.value + + def get_dir_unbd(self,g,Ce, Ci, de, di,lower, upper): + ''' + solve for the direction in each iteration + given a gradient vector g, find min_s{sg} + s.t. problem is feasible + ''' + + n = len(g) + s = cp.Variable(n) + + objective = cp.Minimize(s@g) + constraints = [] + + if(lower is not None): + constraints += [s >= lower] + if(upper is not None): + constraints += [s <= upper] + if((Ci is not None) and (di is not None)): + constraints += [Ci@s <= di] + if((Ce is not None) and (de is not None)): + constraints += [Ce@s == de] + + prob = cp.Problem(objective, constraints) + #prob.solve(solver=cp.GUROBI,env=env)#solver=cp.ECOS + prob.solve(solver=cp.GUROBI,InfUnbdInfo= 1) + + if('unbounded' in prob.status): + result = np.array([prob.solver_stats.extra_stats.getVars()[j].unbdray for j in range(n)]) + is_bounded = False + else: + result = s.value + is_bounded = True + + return result, is_bounded + + def get_FD_grad(self, x, problem, h, r): + """ + find a finite difference gradient from the problem at + the point x + """ + x = np.array(x) + d = len(x) + + if(d == 1): + #xsol = self.create_new_solution(tuple(x), problem) + x1 = x + h/2 + x2 = x - h/2 + + x1 = self.create_new_solution(tuple(x1), problem) + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate_up_to([x1], r) + problem.simulate_up_to([x2], r) + f1 = -1 * problem.minmax[0] * x1.objectives_mean + f2 = -1 * problem.minmax[0] * x2.objectives_mean + grad = (f1-f2)/h + else: + I = np.eye(d) + grad = 0 + + for i in range(d): + x1 = x + h*I[:,i]/2 + x2 = x - h*I[:,i]/2 + + x1 = self.create_new_solution(tuple(x1), problem) + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate_up_to([x1], r) + problem.simulate_up_to([x2], r) + + f1 = -1 * problem.minmax[0] * x1.objectives_mean + f2 = -1 * problem.minmax[0] * x2.objectives_mean + + grad += ((f1-f2)/h)*I[:,i] + + return grad, (2*d*r) + + #def min_quadratic(div0,f0,): + # """ + # find the (arg)minimum of the quadratic function from + # the given info q'(0), q(0), q(alpha) in the interval + # [a,b] where a < b + # """ + def get_gradient(self,problem,x,sol): + """ + getting the gradient of the function at point x where + sol is the solution data structure + """ + budget = 0 + #get the gradient of the new solution grad f(x + step*d) for curvature condition + if problem.gradient_available: + # Use IPA gradient if available. + gradient = -1 * problem.minmax[0] * sol.objectives_gradients_mean[0] + else: + gradient, budget_spent = self.get_FD_grad(x, problem, self.factors["h"], self.factors["r"]) + gradient = -1 * problem.minmax[0] * gradient + budget += budget_spent + + return gradient, budget + + def get_simulated_values(self,problem,x,value = 'both'): + """ + getting either sample path or gradient. The return "value" + can be specified to "val"|"gradient"|"both" + """ + r = self.factors["r"] + sol = self.create_new_solution(tuple(x), problem) + problem.simulate(sol, r) + budget = 0 + + #getting the function evaluation + if((value == "both") or (value == "val")): + budget += r + Fval = -1 * problem.minmax[0] * sol.objectives_mean + + if((value == "both") or (value == "gradient")): + if problem.gradient_available: + # Use IPA gradient if available. + gradient = -1 * problem.minmax[0] * sol.objectives_gradients_mean[0] + else: + gradient, budget_spent = self.get_FD_grad(x, problem, self.factors["h"], self.factors["r"]) + budget += budget_spent + + if(value == "val"): + return Fval, budget + elif(value == "gradient"): + return gradient, budget + else: + return Fval, gradient, budget + + def full_min_quadratic(self, div_a,Fa,Fb,a,b,problem): + ''' + return the minimum point which is the + next step size usig the quadratic + interpolation with the information q(a), + q(b), q'(a) and q'(b) where a < b + ''' + #print("div: ",div_a) + #print("Fa,Fb: ", (Fa,Fb)) + #print("(a,b): ", (a,b)) + #numerator = (a**2 - b**2)*div_a - 2*a*(Fa - Fb) + #denominator = 2*((a-b)*div_a - (Fa - Fb)) + #result = numerator/denominator + A = div_a/(a - b) - (Fa - Fb)/((a-b)**2) + B = div_a - 2*A*a + result = -B/(2*A) + + if(-problem.minmax[0] == np.sign(A)): + #if A and problem have the same sign, i.e. min and A > 0 + if(result < a): + return a + elif(result > b): + return b + else: + return result + else: + if(problem.minmax[0] > 0): + #maximization but A > 0 + return [a,b][np.argmax([Fa,Fb])] + + else: + #minization but A < 0 + return [a,b][np.argmin([Fa,Fb])] + + def quadratic_interpolate(self,x1,x2,div_x1,div_x2,Fx1,Fx2,problem): + ''' + interpolate the quadratic polynomial using given points + and return the lowest (arg)point + ''' + + if(x2 > x1): + #we use div_x1,x1,x2 + #return min_quadratic(div_x1,Fx1,Fx2,x2) + return self.full_min_quadratic(div_x1,Fx1,Fx2,x1,x2,problem) + else: + #we use div_x2,x2,x1 + #return min_quadratic(div_x2,Fx2,Fx1,x1) + return self.full_min_quadratic(div_x2,Fx2,Fx1,x2,x1,problem) + + def backtrackLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 + + cur_sol: starting point + d: direction + grad: gradient at the point cur_sol + max_step: literally max step + ratio: decay ratio if fails + """ + + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + #print("cur_x: ", cur_x) + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("FW-BT Line Search...") + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_sol.x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + while True: + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + new_x = cur_x + step_size*d + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + #print("newF: ",newF) + + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + step_size = step_size*ratio + cur_iter += 1 + #print("---------------") + #print("step from backtrack: ",step_size) + return step_size, added_budget + + def interpolateLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + """ + #print("Interpolation LS") + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + sign = -problem.minmax[0] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("max_step: ", max_step) + if(max_step == 0): + return max_step, added_budget + + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #line = -1*problem.minmax[0]*curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + + while True: + #while(not suff dec and cur iter) + #while((newF >= curF + self.factors['theta'] * step_size * np.dot(grad, d)) and (cur_iter < max_iter)): + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + #print("cur step size: ", step_size) + new_x = cur_x + step_size*d + #print("LS new x: ",new_x) + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + #sufficient decrease + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + #quadratic interpolation using phi(0), phi'(0), phi(step) + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size,problem) + #print("grad . d: ", grad.dot(d)) + #print("opt new step: ", new_step_size) + if(abs(new_step_size) >= 1e-4): + #if we can make some progress + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + step_size = min(new_step_size,max_step) + #elif(new_step_size == 0): + # step_size = 0 + # break; + else: + #if we did not make progress, use the informaiton {step*ratio} + temp_x = cur_x + (step_size*ratio)*d + temp_sol = self.create_new_solution(tuple(temp_x), problem) + problem.simulate(temp_sol, r) + added_budget += r + newF = -1 * problem.minmax[0] * temp_sol.objectives_mean + #print("another newF: ", newF) + #new_step_size = ((-grad.dot(d))*((step_size*ratio)**2))/(2*(newF-curF-(step_size*ratio)*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size*ratio,problem) + #check if it's in the interval + if(new_step_size <= 0): #outside interval (too small) + step_size = 0 + break; + elif(new_step_size > step_size*ratio): #outside interval (too big) + step_size = step_size*ratio + else: + step_size = new_step_size + + #print("new step: ", step_size) + cur_iter += 1 + #print("iteration: ", cur_iter) + #print("=============") + #print("Inter step: ",step_size) + #print("-------end of LS--------") + return step_size, added_budget + + def zoomLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + + NOTE: in this method, we increase the step size + """ + if(max_step == 0): + return 0,0 + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + sign = -problem.minmax[0] + + cur_iter = 0 + #step_size = max_step + cur_step_size = max_step*self.factors["zoom_init_ratio"] + last_step_size = 0 + last_grad = grad + added_budget = 0 + + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_sol.x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + lastF = curF + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + returned_steps = [0] + returned_vals = [curF] + #line = -curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + + while True: + #while(not suff dec and cur iter) + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + #print("cur_grad: ", grad.dot(d)) + #print("next_grad: ", next_grad.dot(d)) + #sufficient decrease doesn't satisfy, zoom into an interval + if((nextF >= curF + self.factors['theta'] * cur_step_size * np.dot(grad, d))): + #zoom into the interval {last_step,cur_step} + #step_lo, step_hi, Flo, Fhi, div_lo, div_hi + #print("zooming, NO SF") + return self.zoomSearch(last_step_size,cur_step_size,lastF,nextF, + last_grad.dot(d),next_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + #last_grad = next_grad + #get the gradient of the new solution grad f(x + step*d) for curvature condition + #next_grad, B = self.get_gradient(problem,next_x,new_sol) + #added_budget += B + + #check curvature, if satisfies then return + if((abs(next_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + #print("Satisfied - upper") + step_size = cur_step_size + break; + if((next_grad.dot(d)) >= 0): + #zoom + #print("zooming, sign") + return self.zoomSearch(cur_step_size,last_step_size,nextF,lastF, + next_grad.dot(d),last_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + returned_steps.append(cur_step_size) + returned_vals.append(nextF) + #print("new step: ", cur_step_size) + #print("sign*Fval: ",nextF) + + last_step_size = cur_step_size + cur_step_size = min(max_step,cur_step_size*self.factors["zoom_inc_ratio"]) + + if(cur_step_size >= max_step): + break; + # return max_step, added_budget + + lastF = nextF + last_grad = next_grad + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + cur_iter += 1 + #print("new step: ", cur_step_size) + #print("---------------") + if(cur_iter == self.factors["line_search_max_iters"] or (cur_step_size >= max_step)): + #if use all iterations, let's return the step which optimizes the sufficient decrease + return returned_steps[np.argmin(returned_vals)] ,added_budget + #return max_step*self.factors["zoom_init_ratio"], added_budget + else: + return cur_step_size, added_budget + + def zoomSearch(self,step_lo, step_hi, Flo, Fhi, div_lo, div_hi, problem,cur_x,curF, grad,d,added_budget,cur_iter): + """ + carry out the zoom search into the interval {} + *two of these are not ordered* + """ + max_iter = self.factors["line_search_max_iters"] + r = self.factors["r"] + sign = -1*problem.minmax[0] + + while(True): + if(cur_iter >= max_iter): + break; + + m1 = min([step_lo,step_hi]) + m2 = max([step_lo,step_hi]) + #print("zooming:: (",str(m1) + "," + str(m2) + ")") + #print("zooming:: (",str(step_lo) + "," + str(step_hi) + ")") + #use the actual value without the sign + new_step = self.quadratic_interpolate(step_lo,step_hi,sign*div_lo,sign*div_hi,sign*Flo,sign*Fhi,problem) + if(step_lo < step_hi): + left_dif = sign*div_lo;right_dif = sign*div_hi + left_val = sign*Flo;right_val = sign*Fhi + else: + left_dif = sign*div_hi;right_dif = sign*div_lo + left_val = sign*Fhi;right_val = sign*Flo + + #print("left div: ", left_dif) + #print("right div: ", right_dif) + #print("left val: ", left_val) + #print("right val: ", right_val) + + #print("new step: ", new_step) + + #xrange = np.arange(0,1,0.02) + #xrange = np.arange(m1,m2,(m2-m1)/20) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + if(abs(new_step - step_lo) < 1e-4 or abs(new_step - step_hi) < 1e-4): + return new_step, added_budget + + #new_grad = grad_f(cur_x + new_step*d).dot(d) + #newF = F(cur_x + new_step*d) + newF, new_grad, budget_spent = self.get_simulated_values(problem,cur_x + new_step*d,value = 'both') + added_budget += budget_spent + + #is_suff_decrese(nextF, curF, theta, cur_grad, cur_step_size, d) + #if(not is_suff_decrese(newF, curF, theta, grad.dot(d), new_step)): + if((newF >= curF + self.factors['theta'] * new_step * np.dot(grad, d))): + step_hi = new_step + #Fhi = F(cur_x + step_hi*d) + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + else: + #if(is_strong_curvature(new_grad, grad.dot(d), rho)): + #if(is_curvature(new_grad, grad.dot(d), rho)): + if((abs(new_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + return new_step, added_budget + if((new_grad.dot(d))*(step_hi - step_lo) >= 0): + step_hi = step_lo + + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + added_budget += budget_spent + + step_lo = new_step + #Flo = F(cur_x + step_lo*d) + #Fhi = F(cur_x + step_hi*d) + + Flo, div_lo, budget_spent = self.get_simulated_values(problem,cur_x + step_lo*d,value = 'both') + div_lo = div_lo.dot(d) + added_budget += budget_spent + #Fhi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'val') + #added_budget += budget_spent + + cur_iter += 1 + + return new_step, added_budget + + def find_feasible_initial(self, problem, Ce, Ci, de, di,lower, upper, tol = 1e-8): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + # Define decision variables. + x = cp.Variable(problem.dim) + + # Define constraints. + constraints = [] + + if(lower is not None): + constraints += [x >= lower] + if(upper is not None): + constraints += [x <= upper] + if (Ce is not None) and (de is not None): + constraints += [Ce @ x == de] + if (Ci is not None) and (di is not None): + constraints += [Ci @ x <= di] + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve() + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + if not self.is_feasible(x0, problem, tol): + raise ValueError("Could not find feasible x0") + + return x0 + + def get_atom_vectors(self,Ci,di): + """ + get vertices of a polytope defined by the + constraints Ci <= di + """ + a,b = Ci.shape + mat = np.concatenate((di.reshape(a,1),-Ci),axis = 1) + mat = cdd.Matrix(mat,linear=False,number_type='float') + + P = cdd.Polyhedron(mat) + poly = cdd.Polyhedron(mat) + ext = poly.get_generators() + + return np.array(ext)[:,1:] + + def get_random_vertex(self,Ci,di,lower,upper): + + num_var = Ci.shape[1] + x = cp.Variable(num_var) + #objective = cp.Minimize(cp.norm(Ci@x - di,1)) + objective = cp.Maximize(cp.sum(x)) + constraints = [Ci@x <= di] + if(lower is not None): + constraints += [x >= lower] + if(upper is not None): + constraints += [x <= upper] + + problem = cp.Problem(objective, constraints) + #problem.solve(solver=cp.GUROBI,env=env) + problem.solve(solver=cp.SCIPY) + return x.value + + def get_alpha_vec(self,x0,atom_vectors): + """ + get the coefficients of convex combination of the x0 + """ + + m,n = atom_vectors.shape + x = cp.Variable(m) + + objective = cp.Minimize(cp.norm(atom_vectors.T @ x - x0) + cp.norm(x,1)) + constraints = [x >= 0, + x <= 1] + + prob = cp.Problem(objective, constraints) + prob.solve() + + return x.value + + def solve(self, problem): + + max_iters = self.factors['max_iters'] + ls_type = self.factors['LSmethod'] + #self.factors['problem'] = problem + #print(ls_type) + if(ls_type == 'backtracking'): + self.factors["LSfn"] = self.backtrackLineSearch + elif(ls_type == 'interpolation'): + self.factors["LSfn"] = self.interpolateLineSearch + else: + self.factors["LSfn"] = self.zoomLineSearch + + + #print("Solved by Frank Wolfe - " + self.factors["algorithm"]) + + if(self.factors["algorithm"] == "normal"): + return self.normal_FrankWolfe(problem) + elif(self.factors["algorithm"] == "away"): + return self.away_FrankWolfe(problem) + elif(self.factors["algorithm"] == "normal_unbd"): + return self.normal_FrankWolfe_unbd(problem) + else: + return self.pairwise_FrankWolfe(problem) + + def normal_FrankWolfe(self, problem): + #print("Starting Frank Wolfe") + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + dim = problem.dim + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + r = self.factors["r"] + ratio = self.factors["ratio"] + LSmax_iter = self.factors["line_search_max_iters"] + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + #getting the gradient + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + # grad, budget_spent = self.finite_diff(new_solution, problem, r) + # expended_budget += budget_spent + # Update r after each iteration. + # r = int(self.factors["lambda"] * r) + + v = self.get_dir(grad,Ce, Ci, de, di,lower,upper) + #direction = (v-cur_x)/np.linalg.norm(v-cur_x) + direction = (v-cur_x) + #print("grad: ", grad) + #print("dir: ", v) + #max_gamma = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #max_gamma = max_gamma*self.factors["max_gamma"] + max_gamma = 1 + + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,self.factors["max_gamma"],problem,expended_budget) + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = min(self.factors["step_f"](k),self.factors["max_gamma"]) + gamma = min(self.factors["step_f"](k),max_gamma) + + #k = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #print("current max gamma: ", k) + #print("gamma: ", gamma) + #print("direction: ",direction) + #print("grad*direction: ", np.linalg.norm(grad.dot(direction))) + #new_x = (1 - gamma)*np.array(cur_x) + gamma*v + new_x = np.array(cur_x) + gamma*direction + #print("new x: ",new_x) + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("-----------------------------") + return recommended_solns, intermediate_budgets + + def normal_FrankWolfe_unbd(self, problem): + #print("Starting Frank Wolfe") + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + dim = problem.dim + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + r = self.factors["r"] + ratio = self.factors["ratio"] + LSmax_iter = self.factors["line_search_max_iters"] + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + #getting the gradient + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + + v, is_bounded = self.get_dir_unbd(grad,Ce, Ci, de, di,lower,upper) + max_gamma = 1 + + if(is_bounded):#go to a vertex + direction = v - cur_x + else:#go to the open space + direction = v + #print("dir: ", direction) + + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,self.factors["max_gamma"],problem,expended_budget) + gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = min(self.factors["step_f"](k),self.factors["max_gamma"]) + gamma = min(self.factors["step_f"](k),max_gamma) + + #print("gamma: ", gamma) + new_x = np.array(cur_x) + gamma*direction + #print("new x: ",new_x) + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("-----------------------------") + return recommended_solns, intermediate_budgets + + def away_FrankWolfe(self, problem): + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + last_step = [] + last_gamma = [] + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + scale_factor = self.factors["ratio"] + LSmax_iter = self.factors["line_search_max_iters"] + r = self.factors["r"] + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + #new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + new_x = self.get_random_vertex(Ci,di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + + #new_x = self.get_random_vertex(Ci,di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + atom_vectors = np.array([new_x]) + problem.simulate(new_solution, r) + + #initializing active set and all alpha coefficients, contains only one vector here + #active_vectors = {0:[]} + active_vectors = [np.array(new_x)] + alphas = {tuple(new_x):1} + #store the "active" infinite search direction + active_dirs = [] + betas = {} + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + # grad, budget_spent = self.finite_diff(new_solution, problem, r) + # expended_budget += budget_spent + # Update r after each iteration. + # r = int(self.factors["lambda"] * r) + + #print("grad: ", grad) + #print("active set: ") + #the list dot product values [grad_f.a for a in atom] + #s = self.get_dir(grad,Ce, Ci, de, di,lower, upper) + s, is_bounded = self.get_dir_unbd(grad,Ce, Ci, de, di,lower, upper) + + #list of dot product of [grad_f.v for v in active set] + #gv = np.array([grad.dot(a) for a in active_vectors[k]]) + if(len(active_vectors) > 0): + gv = np.array([grad.dot(a) for a in active_vectors]) + #v = active_vectors[k][np.argmax(gv)] + v = active_vectors[np.argmax(gv)] + d_away = cur_x - v + else: + d_away = np.zeros(problem.dim) + v = None + + #compute the directions of normal Frank-Wolfe + if(is_bounded): + d_FW = s - cur_x + else:#go to the open space + d_FW = s + s = d_FW + + #d_FW = d_FW/np.linalg.norm(d_FW) + #d_FW = d_away/np.linalg.norm(d_away) + #print("dFW: ",d_FW) + #print("d_away: ",d_away) + #print("forward step direction: ",s) + #print("is bounded: ",is_bounded) + #print("away step direction: ", v) + #print("current point: ",cur_x) + #print("v: ",v) + + #there is no way to move further since we finished early + if((d_FW == 0).all() and (d_away == 0).all()): + direction = d_FW #by default since it has no effect anyway + gamma = 0 + + elif((-grad.dot(d_FW) >= -grad.dot(d_away)) or (d_away == 0).all() or (not is_bounded)): + #FW step + #print("foward") + #ind.append('FW') + direction = d_FW + #print("dir: ", direction) + #max_gamma = 1 + #max_gamma = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #max_gamma = max_gamma*self.factors["max_gamma"] + #print("gamma: ", gamma) + #if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + # gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + # expended_budget += added_budget + #else: + # gamma = min(self.factors["step_f"](k),max_gamma) + + if(is_bounded): + #print("bounded") + max_gamma = 1 + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + gamma = min(self.factors["step_f"](k),max_gamma) + #update the active set + if(gamma < 1): + add = 0 + #check whether we have added this vertex s before + for vec in active_vectors: + if((vec != s).any()): + add = 1 #if could not find it, we must add it + else: + add = 0 + break; + if(add): #adding the new vertex s + active_vectors.append(s) + alphas[tuple(s)] = 0 + else: + #go the vertex s + active_vectors = [s] + alphas = {tuple(s):0} + + #print("active set change in forward: ", active_vectors) + #for atom in active_vectors[k]: + for atom in active_vectors: + if((atom == s).all()): + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + gamma + else: + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + + for dirs in active_dirs: + betas[tuple(dirs)] = (1-gamma)*betas[tuple(dirs)] + last_step.append("bounded") + else: + #print("unbounded") + #if we have consecutive extreme search + if(k > 0 and last_step[-1] == 'unbounded'): + gamma = last_gamma[-1]/self.factors["ratio"] + else: + max_gamma = 1 + #gamma = 1 + #print("max step: ",max_gamma) + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + gamma = min(self.factors["step_f"](k),max_gamma) + #searching in the infinite search direction + #add a new inf direction if not found yet + if(len(active_dirs) == 0): + #It's the first time we add the search direction + active_dirs.append(s) + betas[tuple(s)] = gamma + else: #we added some extreme direction before + diffs = np.array([sum(abs(vec - s)) for vec in active_dirs]) + if((diffs > 1e-6).all()):#s is a new inf direction + active_dirs.append(s) + betas[tuple(s)] = gamma + else: + betas[tuple(s)] += gamma + last_step.append("unbounded") + + else: + #away step + #print("away") + #ind.append('away') + direction = d_away #xt - v + #print("dir: ", direction) + #direction = d_away/np.linalg.norm(d_away) + #gamma = gamma_f(k) + #max_gamma = alphas[tuple(v)]/(1 - alphas[tuple(v)]) + gamma_star = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #print("gamma_star: ", gamma_star) + #gamma_star = gamma_star*self.factors["max_gamma"] + #print("the alpha in ratio: ",alphas[tuple(v)]) + #direction = direction*gamma_star #d' = gamma_star*d + #max_dist = 1 + #max_dist = min(1,alphas[tuple(v)]/(gamma_star*(1 - alphas[tuple(v)]))) + max_dist = min(gamma_star,alphas[tuple(v)]/((1 - alphas[tuple(v)]))) + #max_gamma = alphas[v]/(1 - alphas[v]) + #print("max_dist: ", max_dist) + #active_vectors[k+1] = active_vectors[k] + + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_dist,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_dist,problem,expended_budget) + expended_budget += added_budget + else: + gamma = min(self.factors["step_f"](k),self.factors["max_gamma"]) + + #if gamma_max, then update St \ {vt} + if(gamma == 1 or gamma <= scale_factor**LSmax_iter): + #print("dropping: ", v) + #active_vectors[k+1] = [] + #for vec in active_vectors[k]: + new_active = [] + for vec in active_vectors: + if((vec != v).any()): + #if((np.linalg.norm(vec - v)) > 1e-4): + #active_vectors[k+1].append(vec) + new_active.append(vec) + active_vectors = new_active + removed_atom = alphas.pop(tuple(v)) + + for atom in active_vectors: + if((atom == v).all()): + #alphas[tuple(atom)] = (1+gamma)*alphas[tuple(atom)] - gamma + alphas[tuple(atom)] = (1+gamma*gamma_star)*alphas[tuple(atom)] - gamma*gamma_star + else: + alphas[tuple(atom)] = (1+gamma*gamma_star)*alphas[tuple(atom)] + last_step.append("away") + #print("alphas: ", alphas) + #print("Displaying Alphas:") + #for key,val in alphas.items(): + # print(key) + # print(val) + # print('**************') + + + #print("max_gamma: ", max_gamma) + #print("gamma: ", gamma) + #print("dir: ", tuple(direction)) + last_gamma.append(gamma) + new_x = cur_x + gamma*direction + #print("new_x: ",tuple(new_x)) + + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + #print("obj: ",candidate_solution.objectives_mean) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + #print("obj: ",candidate_solution.objectives_mean) + + k += 1 + #print("--------------") + return recommended_solns, intermediate_budgets + + def pairwise_FrankWolfe(self, problem): + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + r = self.factors["r"] + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + # Start with the initial solution. + #new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + #new_x = new_solution.x + + #if(not self.is_feasible(new_x, problem)): + # new_x = self.find_feasible_initial(problem, Ce, Ci, de, di) + # new_solution = self.create_new_solution(tuple(new_x), problem) + + # Start with the initial solution. + #new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + #new_x = new_solution.x + + #if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + #new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + # new_x = self.get_random_vertex(Ci,di,lower,upper) + # new_solution = self.create_new_solution(tuple(new_x), problem) + + new_x = self.get_random_vertex(Ci,di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + #initiailizing a dictionary of atom vectors and their coefficients + #atom_vectors = self.factors["atom_vectors"] + #if(self.factors["atom_vectors"] is None): + # atom_vectors = self.get_atom_vectors(Ci,di) + # num_atoms = atom_vectors.shape[0] + # alpha_vec = np.zeros(num_atoms) + # alpha_vec[0] = 1 + + # new_x = atom_vectors[0] + # new_solution = self.create_new_solution(tuple(new_x), problem) + #else: + # atom_vectors = self.factors["atom_vectors"] + # num_atoms = atom_vectors.shape[0] + # alpha_vec = self.get_alpha_vec(new_x,atom_vectors) + + #initiailizing a dictionary of atom vectors and their coefficients + #atom_vectors = self.factors["atom_vectors"] + #atom_vectors = self.get_atom_vectors(Ci,di) + #num_atoms = atom_vectors.shape[0] + #active_vectors = {0:[]} + #alphas = {tuple(v):0 for v in atom_vectors} + + atom_vectors = np.array([new_x]) + active_vectors = [np.array(new_x)] + #alphas = {tuple(v):0 for v in atom_vectors} + alphas = {tuple(new_x):1} + + #new_x = atom_vectors[0] + #new_solution = self.create_new_solution(tuple(new_x), problem) + + #for i in range(num_atoms): + # alphas[tuple(atom_vectors[i])] = alpha_vec[i] + # if(alpha_vec[i] > 0): + # active_vectors[0].append(atom_vectors[i]) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + #print("cur_x: ", cur_x) + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + # grad, budget_spent = self.finite_diff(new_solution, problem, r) + # expended_budget += budget_spent + # Update r after each iteration. + # r = int(self.factors["lambda"] * r) + + s = self.get_dir(grad,Ce, Ci, de, di,lower, upper) + + #compute the directions + if(len(active_vectors) > 0): + gv = np.array([grad.dot(a) for a in active_vectors]) + #v = active_vectors[k][np.argmax(gv)] + v = active_vectors[np.argmax(gv)] + d_pw = s-v + else: + d_pw = np.zeros(problem.dim) + #direction = s - v + d_FW = s - cur_x + + #print("s-v: ", s-v) + #print("foward direction: ", s) + #print("pairwise direction: ", s-v) + #print("grad :", grad) + #print("current point: ",cur_x) + #print("dir: ", direction) + #print("v: ", v) + #max_gamma = min(alphas[tuple(v)]*np.linalg.norm(s-v),self.factors["max_gamma"]) + #max_gamma = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #away vector v = 0 + if((-grad.dot(d_FW) >= -grad.dot(d_pw)) or (d_pw == 0).all()): + #print('Forward') + direction = d_FW + #print("direcition: ", direction) + max_gamma = 1 + + if(self.factors["backtrack"]): + #gamma = LineSearch(F=F,x=cur_x,d=d_away,max_step=max_gamma/2) + gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = self.factors["step_f"](k) + gamma = min(self.factors["step_f"](k),max_gamma) + + #update the active set + if(gamma < 1): + for vec in active_vectors: + if((s != vec).any()): + add = 1 + else: + add = 0 + break; + if(add): + #active_vectors[k+1].append(s) + active_vectors.append(s) + alphas[tuple(s)] = 0 + else: + active_vectors = [s] + alphas = {tuple(s):0} + + #updating weights/coefficients + for atom in active_vectors: + if((atom == s).all()): + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + gamma + else: + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + + else: + #print("pairwise") + direction = d_pw + #print("direcition: ", direction) + max_gamma = alphas[tuple(v)] + #print("max_step: ", max_gamma) + if(self.factors["backtrack"]): + #gamma = LineSearch(F=F,x=cur_x,d=d_away,max_step=max_gamma/2) + gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = self.factors["step_f"](k) + gamma = min(self.factors["step_f"](k),max_gamma) + #print("active set in pairwise: ", active_vectors) + #found a new vertex not in the past vertices + for vec in active_vectors: + #different/a new vertex + if((s != vec).any()): + #if(sum(abs(s-vec)/(problem.dim*(vec+1e-10))) > 1e-2): + add = 1 + else: + add = 0 + break; + if(add): + active_vectors.append(s) + alphas[tuple(s)] = 0 + #print("active set in pairwise: ", active_vectors) + alphas[tuple(s)] = alphas[tuple(s)] + gamma + alphas[tuple(v)] = alphas[tuple(v)] - gamma + + #print("Displaying Alphas:") + #for key,val in alphas.items(): + # print(key) + # print(val) + # print('**************') + + + #print("max_gamma: ", max_gamma) + #print("gamma: ", gamma) + new_x = cur_x + gamma*direction + #print("next x: ",new_x) + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("------------------") + #print("------------------") + + return recommended_solns, intermediate_budgets + \ No newline at end of file diff --git a/simopt/solvers/Boom_ProxGD.py b/simopt/solvers/Boom_ProxGD.py new file mode 100644 index 000000000..2eda3a7e6 --- /dev/null +++ b/simopt/solvers/Boom_ProxGD.py @@ -0,0 +1,904 @@ +#https://github.com/bodono/apgpy +import numpy as np +import cvxpy as cp +import matplotlib.pyplot as plt +#from apgwrapper import NumpyWrapper +#from functools import partial + +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver + +class BoomProxGD(Solver): + """ + + """ + def __init__(self, name="Boom-PGD", fixed_factors={"max_iters": 300, "backtrack": 1, "curve_const": 0.3, "LSmethod": 'zoom', "algorithm": 'away'}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "LSmethod": { + "description": "method", + "datatype": 'zoom', + "default": 'zoom' + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 30 + }, + "h": { + "description": "difference in finite difference gradient", + "datatype": float, + "default": 0.1 + }, + "step_f": { + "description": "step size function", + "datatype": "function", + "default": self.default_step_f + }, + "max_step_size": { + "description": "maximum possible step size", + "datatype": float, + "default": 10 + }, + "max_iters": { + "description": "maximum iterations", + "datatype": int, + "default": 300 + }, + "theta": { + "description": "constant in the line search condition", + "datatype": int, + "default": 0.2 + }, + "line_search_max_iters": { + "description": "maximum iterations for line search", + "datatype": int, + "default": 20 + }, + "ratio": { + "description": "decay ratio in line search", + "datatype": float, + "default": 0.8 + }, + "curve_const": { + "description": "constant in curvature wolfe conditions, usually greater than theta", + "datatype": float, + "default": 0.3 + }, + "zoom_init_ratio": { + "description": "ratio of the max step size in Zoom lien search", + "datatype": float, + "default": 0.2 + }, + "zoom_inc_ratio": { + "description": "increment ratio in Zoom lien search", + "datatype": float, + "default": 1.1 + }, + "max_gamma":{ + "description": "max distance possible", + "datatype": float, + "default": 10 + }, + "backtrack":{ + "description": "an indicator whether we do the backtrack", + "datatype": bool, + "default": 0 + }, + "proj_thres":{ + "description": "proportion of the max iters to stop if have too many projections", + "datatype": float, + "default": 0.1 + }, + "algorithm":{ + "description": "type of FW algorithm", + "datatype": str, + "default": "normal" #away, pairwise + } + } + + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "max_iters": self.check_max_iters, + "proj_thres":self.check_proj_thres + } + super().__init__(fixed_factors) + + def default_step_f(self,k): + """ + take in the current iteration k + """ + + return 1/(k+1) + + def check_r(self): + + return self.factors['r'] > 0 + + def check_max_iters(self): + + return self.factors['max_iters'] > 0 + + def check_proj_thres(self): + + return self.factors["proj_thres"] > 0 and self.factors["proj_thres"] < 1 + + def proj(self,z,Ci,di,Ce,de,lower,upper): + ''' + project a point z onto the constraint + Ax <= b depending on the constraint type + ''' + n = len(z) + u = cp.Variable(n) + + objective = cp.Minimize(cp.square(cp.norm(u-z))) + constraints = [] + + if((lower is not None) and (lower > -np.inf).all()): + constraints += [u >= lower] + if((upper is not None) and (upper < np.inf).all()): + constraints += [u <= upper] + + if (Ci is not None) and (di is not None): + constraints += [Ci@u <= di] + if (Ce is not None) and (de is not None): + constraints += [Ce@u == de] + + #constraints = [A@u <= b] + prob = cp.Problem(objective, constraints) + prob.solve()#solver=cp.ECOS + + return u.value + + def is_feasible(self, x, Ci,di,Ce,de,lower,upper): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + res = True + + if(lower is not None): + res = res & np.all(x >= lower) + if(upper is not None): + res = res & np.all(x <= upper) + + if (Ci is not None) and (di is not None): + res = res & np.all(Ci @ x <= di) + if (Ce is not None) and (de is not None): + res = res & (np.allclose(np.dot(Ce, x), de)) + return res + + def get_max_gamma_ratio_test(self, cur_x, d, Ce, Ci, de, di, lower, upper): + ''' + perform a ratio test to find the max step size + ''' + #step = cp.Variable() + #objective = cp.Maximize(step) + #constraints = [step >= 0] + #ratio test: (bi - ai^Tx)/(ai^Td) + ratio_val = [] + denom = [] + dim = len(cur_x) + + if(lower is not None): + #constraints += [(cur_x + step*d) >= lower] + #vals += [(lower[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((cur_x - lower)/-d) + denom += list(-d) + if(upper is not None): + #constraints += [(cur_x + step*d) <= upper] + #vals += [(upper[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((upper - cur_x)/d) + denom += list(d) + if((Ci is not None) and (di is not None)): + #constraints += [Ci@(cur_x + step*d) <= di] + ratio_val += list((di - Ci@cur_x)/(Ci@d)) + denom += list(Ci@d) + + #print("ratio: ", ratio) + ratio_val = np.array(ratio_val) + denom = np.array(denom) + #print("denom: ", denom) + #print("ratio_val: ", ratio_val) + + #if(len(ratio_val[denom > 1e-6]) == 0): + + return min(ratio_val[denom > 1e-6]) + + def full_min_quadratic(self, div_a,Fa,Fb,a,b,problem): + ''' + return the minimum point which is the + next step size usig the quadratic + interpolation with the information q(a), + q(b), q'(a) and q'(b) where a < b + ''' + #print("div: ",div_a) + #print("Fa,Fb: ", (Fa,Fb)) + #print("(a,b): ", (a,b)) + #numerator = (a**2 - b**2)*div_a - 2*a*(Fa - Fb) + #denominator = 2*((a-b)*div_a - (Fa - Fb)) + #result = numerator/denominator + + #if(result < a): + # return a + #elif(result > b): + # return b + #else: + # return result + + #return numerator/denominator + A = div_a/(a - b) - (Fa - Fb)/((a-b)**2) + B = div_a - 2*A*a + result = -B/(2*A) + + if(-problem.minmax[0] == np.sign(A)): + #if A and problem have the same sign, i.e. min and A > 0 or max and A < 0 + if(result < a): + return a + elif(result > b): + return b + else: + return result + else: + if(problem.minmax[0] > 0): + #maximization but A > 0 + return [a,b][np.argmax([Fa,Fb])] + + else: + #minization but A < 0 + return [a,b][np.argmin([Fa,Fb])] + + def quadratic_interpolate(self,x1,x2,div_x1,div_x2,Fx1,Fx2,problem): + ''' + interpolate the quadratic polynomial using given points + and return the lowest (arg)point + ''' + + if(x2 > x1): + #we use div_x1,x1,x2 + #return min_quadratic(div_x1,Fx1,Fx2,x2) + return self.full_min_quadratic(div_x1,Fx1,Fx2,x1,x2,problem) + else: + #we use div_x2,x2,x1 + #return min_quadratic(div_x2,Fx2,Fx1,x1) + return self.full_min_quadratic(div_x2,Fx2,Fx1,x2,x1,problem) + + def get_simulated_values(self,problem,x,value = 'both'): + """ + getting either sample path or gradient. The return "value" + can be specified to "val"|"gradient"|"both" + """ + r = self.factors["r"] + sol = self.create_new_solution(tuple(x), problem) + problem.simulate(sol, r) + budget = 0 + + #getting the function evaluation + if((value == "both") or (value == "val")): + budget += r + Fval = -1 * problem.minmax[0] * sol.objectives_mean + + if((value == "both") or (value == "gradient")): + if problem.gradient_available: + # Use IPA gradient if available. + gradient = -1 * problem.minmax[0] * sol.objectives_gradients_mean[0] + else: + gradient, budget_spent = self.get_FD_grad(x, problem, self.factors["h"], self.factors["r"]) + budget += budget_spent + + if(value == "val"): + return Fval, budget + elif(value == "gradient"): + return gradient, budget + else: + return Fval, gradient, budget + + def LineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 + + cur_sol: starting point + d: direction + grad: gradient at the point cur_sol + max_step: literally max step + ratio: decay ratio if fails + """ + + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + + while True: + if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + break + + new_x = cur_x + step_size*d + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + #if(newF < curF): + break + step_size = step_size*ratio + cur_iter += 1 + #print("newF: ", newF) + #print("linear F: ", curF + self.factors['theta'] * step_size * np.dot(grad, d)) + #print("inner iter: ", cur_iter) + return step_size, added_budget + + def backtrackLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 + + cur_sol: starting point + d: direction + grad: gradient at the point cur_sol + max_step: literally max step + ratio: decay ratio if fails + """ + #print("backtrack LS") + #print("max step: ", max_step) + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + #print("cur_x: ", cur_x) + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("Line Search...") + while True: + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + new_x = cur_x + step_size*d + #print("next x: ",new_x) + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + #newF, budget_spent = self.get_simulated_values(problem,cur_x + step_size*d,value = 'val') + #added_budget += budget_spent + #new_grad = -1 * problem.minmax[0] * new_sol.objectives_gradients_mean[0] + #print("newF: ",newF) + + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + step_size = step_size*ratio + cur_iter += 1 + #print("---------------") + return step_size, added_budget + + def interpolateLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + """ + #print("Interpolation LS") + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + sign = -problem.minmax[0] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("max_step: ", max_step) + if(max_step == 0): + return max_step, added_budget + + while True: + #while(not suff dec and cur iter) + #while((newF >= curF + self.factors['theta'] * step_size * np.dot(grad, d)) and (cur_iter < max_iter)): + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + #print("cur step size: ", step_size) + new_x = cur_x + step_size*d + #print("LS new x: ",new_x) + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + #xrange = np.arange(0,1,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + # fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #line = -1*problem.minmax[0]*curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + + #sufficient decrease + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + #quadratic interpolation using phi(0), phi'(0), phi(step) + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size,problem) + #print("grad . d: ", grad.dot(d)) + #print("opt new step: ", new_step_size) + if(abs(new_step_size) >= 1e-4): + #if we can make some progress + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + step_size = min(new_step_size,max_step) + #elif(new_step_size == 0): + # step_size = 0 + # break; + else: + #if we did not make progress, use the informaiton {step/2} + temp_x = cur_x + (step_size*ratio)*d + temp_sol = self.create_new_solution(tuple(temp_x), problem) + problem.simulate(temp_sol, r) + added_budget += r + newF = -1 * problem.minmax[0] * temp_sol.objectives_mean + #print("another newF: ", newF) + #new_step_size = ((-grad.dot(d))*((step_size/2)**2))/(2*(newF-curF-(step_size/2)*(grad.dot(d)))) + #new_step_size = ((-grad.dot(d))*((step_size*ratio)**2))/(2*(newF-curF-(step_size*ratio)*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size*ratio,problem) + #check if it's in the interval + if(new_step_size <= 0): #outside interval (too small) + step_size = 0 + break; + elif(new_step_size > step_size*ratio): #outside interval (too big) + step_size = step_size*ratio + else: + step_size = new_step_size + + #print("new step: ", step_size) + cur_iter += 1 + #print("=============") + #print("determined step: ", step_size) + return step_size, added_budget + + def zoomLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + + NOTE: in this method, we increase the step size + """ + #print("ZOOM LS") + #print("max step: ",max_step) + if(max_step == 0): + return 0,0 + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + #step_size = max_step + cur_step_size = max_step*self.factors["zoom_init_ratio"] + last_step_size = 0 + last_grad = grad + added_budget = 0 + + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_sol.x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + lastF = curF + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + #line = -curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + returned_steps = [] + returned_vals = [] + + while True: + #while(not suff dec and cur iter) + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + #print("cur_grad: ", grad.dot(d)) + #print("next_grad: ", next_grad.dot(d)) + #sufficient decrease doesn't satisfy, zoom into an interval + if((nextF >= curF + self.factors['theta'] * cur_step_size * np.dot(grad, d))): + #zoom into the interval {last_step,cur_step} + #step_lo, step_hi, Flo, Fhi, div_lo, div_hi + #print("zooming, NO SF") + return self.zoomSearch(last_step_size,cur_step_size,lastF,nextF, + last_grad.dot(d),next_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + #check curvature, if satisfies then return + if((abs(next_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + #print("Satisfied - upper") + step_size = cur_step_size + break; + if((next_grad.dot(d)) >= 0): + #zoom + #print("zooming, sign") + return self.zoomSearch(cur_step_size,last_step_size,nextF,lastF, + next_grad.dot(d),last_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + returned_steps.append(cur_step_size) + returned_vals.append(nextF) + #print("new step: ", cur_step_size) + #print("sign*Fval: ",nextF) + + if(cur_step_size >= max_step): + break; + + last_step_size = cur_step_size + cur_step_size = min(max_step,cur_step_size*self.factors["zoom_inc_ratio"]) + + lastF = nextF + last_grad = next_grad + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + cur_iter += 1 + #print("iter: ",cur_iter) + #print("added budget: ",added_budget) + #print("---------------") + #print("max step: ",max_step) + + if((cur_iter == max_iter) or (cur_step_size >= max_step)): + #return max_step*self.factors["zoom_init_ratio"], added_budget + #print("return from iteration or max step") + return returned_steps[np.argmin(returned_vals)] ,added_budget + #return max_step, added_budget + else: + return cur_step_size, added_budget + + def zoomSearch(self,step_lo, step_hi, Flo, Fhi, div_lo, div_hi, problem,cur_x,curF, grad,d,added_budget,cur_iter): + """ + carry out the zoom search into the interval {} + *two of these are not ordered* + """ + max_iter = self.factors["line_search_max_iters"] + r = self.factors["r"] + sign = -1*problem.minmax[0] + + while(True): + if(cur_iter >= max_iter): + break; + + #m1 = min([step_lo,step_hi]) + #m2 = max([step_lo,step_hi]) + #print("zooming:: (",str(m1) + "," + str(m2) + ")") + #use the actual value without the sign + new_step = self.quadratic_interpolate(step_lo,step_hi,sign*div_lo,sign*div_hi,sign*Flo,sign*Fhi,problem) + #print("left div: ", left_dif) + #print("right div: ", right_dif) + #print("left val: ", left_val) + #print("right val: ", right_val) + + #print("new step: ", new_step) + + #xrange = np.arange(m1,m2,(m2-m1)/20) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + if(abs(new_step - step_lo) < 1e-4 or abs(new_step - step_hi) < 1e-4): + return new_step, added_budget + + #new_grad = grad_f(cur_x + new_step*d).dot(d) + #newF = F(cur_x + new_step*d) + newF, new_grad, budget_spent = self.get_simulated_values(problem,cur_x + new_step*d,value = 'both') + added_budget += budget_spent + + #is_suff_decrese(nextF, curF, theta, cur_grad, cur_step_size, d) + #if(not is_suff_decrese(newF, curF, theta, grad.dot(d), new_step)): + if((newF >= curF + self.factors['theta'] * new_step * np.dot(grad, d))): + step_hi = new_step + #Fhi = F(cur_x + step_hi*d) + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + else: + #if(is_strong_curvature(new_grad, grad.dot(d), rho)): + #if(is_curvature(new_grad, grad.dot(d), rho)): + if((abs(new_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + return new_step, added_budget + if((new_grad.dot(d))*(step_hi - step_lo) >= 0): + step_hi = step_lo + + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + added_budget += budget_spent + + step_lo = new_step + #Flo = F(cur_x + step_lo*d) + #Fhi = F(cur_x + step_hi*d) + + Flo, div_lo, budget_spent = self.get_simulated_values(problem,cur_x + step_lo*d,value = 'both') + div_lo = div_lo.dot(d) + added_budget += budget_spent + #Fhi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'val') + #added_budget += budget_spent + + cur_iter += 1 + + return new_step, added_budget + + def get_FD_grad(self, x, problem, h, r): + """ + find a finite difference gradient from the problem at + the point x + """ + x = np.array(x) + d = len(x) + + if(d == 1): + #xsol = self.create_new_solution(tuple(x), problem) + x1 = x + h/2 + x2 = x - h/2 + + x1 = self.create_new_solution(tuple(x1), problem) + problem.simulate(x1, r) + f1 = -1 * problem.minmax[0] * x1.objectives_mean + + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate(x2, r) + f2 = -1 * problem.minmax[0] * x2.objectives_mean + grad = (f1-f2)/h + else: + I = np.eye(d) + grad = 0 + + for i in range(d): + x1 = x + h*I[:,i]/2 + x2 = x - h*I[:,i]/2 + + x1 = self.create_new_solution(tuple(x1), problem) + problem.simulate_up_to([x1], r) + f1 = -1 * problem.minmax[0] * x1.objectives_mean + + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate_up_to([x2], r) + f2 = -1 * problem.minmax[0] * x2.objectives_mean + + grad += ((f1-f2)/h)*I[:,i] + + return grad, (2*d*r) + + def solve(self, problem): + + #print("Starting PGD") + + max_iters = self.factors['max_iters'] + proj_thres = self.factors['proj_thres'] + r = self.factors["r"] + max_gamma = self.factors["max_gamma"] + + ls_type = self.factors['LSmethod'] + #self.factors['problem'] = problem + #print(ls_type) + if(ls_type == 'backtracking'): + self.factors["LSfn"] = self.backtrackLineSearch + elif(ls_type == 'interpolation'): + self.factors["LSfn"] = self.interpolateLineSearch + else: + self.factors["LSfn"] = self.zoomLineSearch + + #t = 1 #first max step size + dim = problem.dim + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + action = "normal" #storing whether we do the projection in each step + last_action = "normal" + + #store consecutive projections + consec_proj = 0 + k = 0 + max_step = 1 #initial max step + last_normal_maxstep = 1 + last_proj_maxstep = 1 + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + #cur_x = new_solution.x + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + while expended_budget < problem.factors["budget"] and consec_proj < proj_thres*max_iters: + cur_x = new_solution.x + + proj_trace = int(proj_thres*max_iters) + + #computeing the gradients + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + #grad, budget_spent = self.finite_diff(new_solution, problem, r) + #grad, budget_spent = self.get_FD_grad(self, x, problem, h, r) + # grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + # expended_budget += budget_spent + # Update r after each iteration. + #r = int(self.factors["lambda"] * r) + + #print("max_step: ",max_step) + direction = -grad/np.linalg.norm(grad) + temp_x = cur_x + max_step * direction + #print("cur x: ",cur_x) + #print("temp x: ",temp_x) + + #if the new iterate is feasible, then no need to project + if(not self.is_feasible(temp_x, Ci,di,Ce,de,lower,upper)): + action = "project" + proj_x = self.proj(temp_x,Ci,di,Ce,de,lower,upper) + #print("proj x: ",proj_x) + direction = proj_x - cur_x #change direction to the projected point + max_step = 1 + + #if(last_action == "project"): + #consecutive projection: should increase max proj step + # max_step = min(self.factors["max_step_size"],last_proj_maxstep/self.factors["ratio"]) + #else: + #last step is normal + #max step is to go to the boundary + # max_step = 1 + consec_proj += 1 + else: + action = "normal" + #decrease consecutive projection if we don't have the projection + if(consec_proj > 0): + #consec_proj -= 1 + consec_proj = 0 + + #max_step_feas = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #print("max step: ", max_step) + #step sizes + if(self.factors["backtrack"]): + #t, added_budget = self.LineSearch(new_solution,grad,direction,self.factors["max_gamma"],problem,expended_budget) + #t, added_budget = self.LineSearch(new_solution,grad,direction,t,problem,expended_budget) + t, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_step,problem,expended_budget) + expended_budget += added_budget + else: + #t = min(self.factors["step_f"](k),self.factors["max_gamma"]) + t = self.factors["step_f"](k)#*direction#np.linalg.norm(grad) + + if(action == "normal"): + #store the last max step size of the normal iteratopnm + last_normal_maxstep = max_step + if(t == max_step): + #if we reach max step, then next iteration should move further + #max_step = min(max_gamma,max_step/self.factors["ratio"]) + max_step = min(self.factors["max_step_size"],max_step/self.factors["ratio"]) + #t = min(t,max_step_feas) + else: + max_step = max(1,max_step*self.factors["ratio"]) + else: + #we have the projection, next max step is the max step from + #the iteration before projection + #store the max step in the projection iteration + #last_proj_maxstep = max_step + if(t == max_step): + #print("full projection") + #max_step = min(self.factors["max_step_size"],last_proj_maxstep/self.factors["ratio"]) + last_proj_maxstep = min(self.factors["max_step_size"],last_proj_maxstep/self.factors["ratio"]) + max_step = last_proj_maxstep + else: + #use this for the next iteration, assume to be normal + last_proj_maxstep = max(1,last_proj_maxstep*self.factors["ratio"]) + max_step = last_normal_maxstep + #print("act: ", action) + #if(t == max_step): + # max_step = min(max_gamma,max_step/self.factors["ratio"]) + #print("grad: ", grad) + #print("step: ", t) + #t = self.factors['step_f'](k) + #new_x = cur_x - t * grad + new_x = cur_x + t * direction + last_action = action + #update the max step for the next iteration + #t = min(self.factors["max_step_size"],t/self.factors["ratio"]) + #print("new_x before proj: ", new_x) + + #print("new_x after proj: ", new_x) + #print("new x: ",new_x) + + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + #print("current budget: ",expended_budget) + #print("========================") + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("------------------------------------") + return recommended_solns, intermediate_budgets + \ No newline at end of file diff --git a/simopt/solvers/pgdss.py b/simopt/solvers/pgdss.py new file mode 100644 index 000000000..2c985c9f1 --- /dev/null +++ b/simopt/solvers/pgdss.py @@ -0,0 +1,576 @@ +""" +Summary +------- +PGD-SS: A projected gradient descent algorithm with adaptive step search +for problems with linear constraints, i.e., Ce@x = de, Ci@x <= di. +A detailed description of the solver can be found `here `_. +""" +import numpy as np +import cvxpy as cp +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver + + +class PGDSS(Solver): + """ + The PGD solver with adaptive step search. + + Attributes + ---------- + name : string + name of solver + objective_type : string + description of objective types: + "single" or "multi" + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + gradient_needed : bool + indicates if gradient of objective function is needed + factors : dict + changeable factors (i.e., parameters) of the solver + specifications : dict + details of each factor (for GUI, data validation, and defaults) + rng_list : list of rng.MRG32k3a objects + list of RNGs used for the solver's internal purposes + + Arguments + --------- + name : str + user-specified name for solver + fixed_factors : dict + fixed_factors of the solver + + See also + -------- + base.Solver + """ + def __init__(self, name="PGD-SS", fixed_factors={}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 30 + }, + "theta": { + "description": "constant in the Armijo condition", + "datatype": int, + "default": 0.2 + }, + "gamma": { + "description": "constant for shrinking the step size", + "datatype": int, + "default": 0.8 + }, + "alpha_max": { + "description": "maximum step size", + "datatype": int, + "default": 10 + }, + "alpha_0": { + "description": "initial step size", + "datatype": int, + "default": 1 + }, + "epsilon_f": { + "description": "additive constant in the Armijo condition", + "datatype": int, + "default": 1e-3 # In the paper, this value is estimated for every epoch but a value > 0 is justified in practice. + }, + "lambda": { + "description": "magnifying factor for r inside the finite difference function", + "datatype": int, + "default": 2 + }, + "tol": { + "description": "floating point comparison tolerance", + "datatype": float, + "default": 1e-7 + }, + "finite_diff_step": { + "description": "step size for finite difference", + "datatype": float, + "default": 1e-5 + } + + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "theta": self.check_theta, + "gamma": self.check_gamma, + "alpha_max": self.check_alpha_max, + "alpha_0": self.check_alpha_0, + "epsilon_f": self.check_epsilon_f, + "lambda": self.check_lambda, + "tol": self.check_tol, + "finite_diff_step": self.check_finite_diff_step + } + super().__init__(fixed_factors) + + def check_r(self): + return self.factors["r"] > 0 + + def check_theta(self): + return self.factors["theta"] > 0 & self.factors["theta"] < 1 + + def check_gamma(self): + return self.factors["gamma"] > 0 & self.factors["gamma"] < 1 + + def check_alpha_max(self): + return self.factors["alpha_max"] > 0 + + def check_alpha_0(self): + return self.factors["alpha_0"] > 0 + + def check_epsilon_f(self): + return self.factors["epsilon_f"] > 0 + + def check_tol(self): + return self.factors["tol"] > 0 + + def check_lambda(self): + return self.factors["lambda"] > 0 + + def check_finite_diff_step(self): + return self.factors["finite_diff_step"] > 0 + + def solve(self, problem): + """ + Run a single macroreplication of a solver on a problem. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + crn_across_solns : bool + indicates if CRN are used when simulating different solutions + + Returns + ------- + recommended_solns : list of Solution objects + list of solutions recommended throughout the budget + intermediate_budgets : list of ints + list of intermediate budgets when recommended solutions changes + """ + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + + # Default values. + r = self.factors["r"] + tol = self.factors["tol"] + theta = self.factors["theta"] + gamma = self.factors["gamma"] + alpha_max = self.factors["alpha_max"] + alpha_0 = self.factors["alpha_0"] + epsilon_f = self.factors["epsilon_f"] + + # Upper bound and lower bound. + lower_bound = np.array(problem.lower_bounds) + upper_bound = np.array(problem.upper_bounds) + + # Initialize stepsize. + alpha = alpha_0 + + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + # Checker for whether the problem is unconstrained. + unconstr_flag = (Ce is None) & (Ci is None) & (di is None) & (de is None) & (all(np.isinf(lower_bound))) & (all(np.isinf(upper_bound))) + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + # If the initial solution is not feasible, generate one using phase one simplex. + if (not unconstr_flag) & (not self._feasible(new_x, problem, tol)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di, tol) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + while expended_budget < problem.factors["budget"]: + new_x = new_solution.x + # Check variable bounds. + # forward = np.isclose(new_x, lower_bound, atol = tol).astype(int) + # backward = np.isclose(new_x, upper_bound, atol = tol).astype(int) + # # BdsCheck: 1 stands for forward, -1 stands for backward, 0 means central diff. + # BdsCheck = np.subtract(forward, backward) + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize=alpha) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + while np.all((grad == 0)): + if expended_budget > problem.factors["budget"]: + break + grad, budget_spent = self.finite_diff(new_solution, problem, r) + expended_budget += budget_spent + # Update r after each iteration. + r = int(self.factors["lambda"] * r) + + # Get search direction by taking negative normalized gradient. + dir = -grad / np.linalg.norm(grad) + + # Get a temp solution. + temp_x = new_x + alpha * dir + + if unconstr_flag or self._feasible(temp_x, problem, tol): + candidate_solution = self.create_new_solution(tuple(temp_x), problem) + else: + # If not feasible, project temp_x back to the feasible set. + proj_x = self.project_grad(problem, temp_x, Ce, Ci, de, di) + candidate_solution = self.create_new_solution(tuple(proj_x), problem) + # Get new search direction based on projection. + dir = proj_x - new_x + + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + # Check the modified Armijo condition for sufficient decrease. + if (-1 * problem.minmax[0] * candidate_solution.objectives_mean) <= ( + -1 * problem.minmax[0] * new_solution.objectives_mean + alpha * theta * np.dot(grad, dir) + 2 * epsilon_f): + # Successful step + new_solution = candidate_solution + # Enlarge step size. + alpha = min(alpha_max, alpha / gamma) + else: + # Unsuccessful step - reduce step size. + alpha = gamma * alpha + # Append new solution. + if (problem.minmax[0] * new_solution.objectives_mean > problem.minmax[0] * best_solution.objectives_mean): + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + return recommended_solns, intermediate_budgets + + + def finite_diff(self, new_solution, problem, r, stepsize = 1e-5, tol = 1e-7): + ''' + Finite difference for approximating objective gradient at new_solution. + + Arguments + --------- + new_solution : Solution object + a solution to the problem + problem : Problem object + simulation-optimization problem to solve + r : int + number of replications taken at each solution + stepsize: float + step size for finite differences + + Returns + ------- + grad : ndarray + the estimated objective gradient at new_solution + budget_spent : int + budget spent in finite difference + ''' + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + # Upper bound and lower bound. + lower_bound = np.array(problem.lower_bounds) + upper_bound = np.array(problem.upper_bounds) + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, problem.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + BdsCheck = np.zeros(problem.dim) + fn = -1 * problem.minmax[0] * new_solution.objectives_mean + new_x = new_solution.x + # Store values for each dimension. + FnPlusMinus = np.zeros((problem.dim, 3)) + grad = np.zeros(problem.dim) + + for i in range(problem.dim): + # Initialization. + x1 = list(new_x) + x2 = list(new_x) + # Forward stepsize. + steph1 = stepsize + # Backward stepsize. + steph2 = stepsize + + dir1 = np.zeros(problem.dim) + dir1[i] = 1 + dir2 = np.zeros(problem.dim) + dir2[i] = -1 + + ra = d.flatten() - C @ new_x + ra_d = C @ dir1 + # Initialize maximum step size. + temp_steph1 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph1: + temp_steph1 = s + steph1 = min(temp_steph1, steph1) + + ra_d = C @ dir2 + # Initialize maximum step size. + temp_steph2 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph2: + temp_steph2 = s + steph2 = min(temp_steph2, steph2) + + if (steph1 != 0) & (steph2 != 0): + BdsCheck[i] = 0 + elif steph1 == 0: + BdsCheck[i] = -1 + else: + BdsCheck[i] = 1 + + # Decide stepsize. + # Central diff. + if BdsCheck[i] == 0: + FnPlusMinus[i, 2] = min(steph1, steph2) + x1[i] = x1[i] + FnPlusMinus[i, 2] + x2[i] = x2[i] - FnPlusMinus[i, 2] + # Forward diff. + elif BdsCheck[i] == 1: + FnPlusMinus[i, 2] = steph1 + x1[i] = x1[i] + FnPlusMinus[i, 2] + # Backward diff. + else: + FnPlusMinus[i, 2] = steph2 + x2[i] = x2[i] - FnPlusMinus[i, 2] + + x1_solution = self.create_new_solution(tuple(x1), problem) + if BdsCheck[i] != -1: + problem.simulate_up_to([x1_solution], r) + fn1 = -1 * problem.minmax[0] * x1_solution.objectives_mean + # First column is f(x+h,y). + FnPlusMinus[i, 0] = fn1 + x2_solution = self.create_new_solution(tuple(x2), problem) + if BdsCheck[i] != 1: + problem.simulate_up_to([x2_solution], r) + fn2 = -1 * problem.minmax[0] * x2_solution.objectives_mean + # Second column is f(x-h,y). + FnPlusMinus[i, 1] = fn2 + + # Calculate gradient. + if BdsCheck[i] == 0: + grad[i] = (fn1 - fn2) / (2 * FnPlusMinus[i, 2]) + elif BdsCheck[i] == 1: + grad[i] = (fn1 - fn) / FnPlusMinus[i, 2] + elif BdsCheck[i] == -1: + grad[i] = (fn - fn2) / FnPlusMinus[i, 2] + budget_spent = (2 * problem.dim - np.sum(BdsCheck != 0)) * r + return grad, budget_spent + + def _feasible(self, x, problem, tol): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + lb = np.asarray(problem.lower_bounds) + ub = np.asarray(problem.upper_bounds) + res = True + if (problem.Ci is not None) and (problem.di is not None): + res = res & np.all(problem.Ci @ x <= problem.di + tol) + if (problem.Ce is not None) and (problem.de is not None): + res = res & (np.allclose(np.dot(problem.Ce, x), problem.de, rtol=0, atol=tol)) + return res & (np.all(x >= lb)) & (np.all(x <= ub)) + + def project_grad(self, problem, x, Ae, Ai, be, bi): + """ + Project the vector x onto the hyperplane H: Ae x = be, Ai x <= bi by solving a quadratic projection problem: + + min d^Td + s.t. Ae(x + d) = be + Ai(x + d) <= bi + (x + d) >= lb + (x + d) <= ub + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + x : ndarray + vector to be projected + Ae: ndarray + equality constraint coefficient matrix + be: ndarray + equality constraint coefficient vector + Ai: ndarray + inequality constraint coefficient matrix + bi: ndarray + inequality constraint coefficient vector + Returns + ------- + x_new : ndarray + the projected vector + """ + # Define variables. + d = cp.Variable(problem.dim) + + # Define objective. + obj = cp.Minimize(cp.quad_form(d, np.identity(problem.dim))) + + # Define constraints. + constraints = [] + if (Ae is not None) and (be is not None): + constraints.append(Ae @ (x + d) == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ (x + d) <= bi.ravel()) + + upper_bound = np.array(problem.upper_bounds) + lower_bound = np.array(problem.lower_bounds) + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append((x + d)[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append((x + d)[i] >= lower_bound[i]) + + # Form and solve problem. + prob = cp.Problem(obj, constraints) + prob.solve() + + # Get the projected vector. + x_new = x + d.value + + # Avoid floating point error + x_new[np.abs(x_new) < self.factors["tol"]] = 0 + + return x_new + + def find_feasible_initial(self, problem, Ae, Ai, be, bi, tol): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(problem.upper_bounds) + lower_bound = np.array(problem.lower_bounds) + + # Define decision variables. + x = cp.Variable(problem.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + if not self._feasible(x0, problem, tol): + raise ValueError("Could not find feasible x0") + + return x0 From df238d60ce74b60e197342f7f2990bd1bb53fbe0 Mon Sep 17 00:00:00 2001 From: Litong Liu <46491025+liulitong-Jessie@users.noreply.github.com> Date: Wed, 28 Feb 2024 11:18:35 +0800 Subject: [PATCH 21/21] Add files via upload --- demo/demo_problems_solvers.py | 37 +++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/demo/demo_problems_solvers.py b/demo/demo_problems_solvers.py index 5c22718fb..ab02665b7 100644 --- a/demo/demo_problems_solvers.py +++ b/demo/demo_problems_solvers.py @@ -11,6 +11,13 @@ # Import the ProblemsSolvers class and other useful functions from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles +from simopt.models.san import SANLongestPath +from simopt.models.smf_origin import SMF_Max +from simopt.models.rmitd import RMITDMaxRevenue +from simopt.models.mm1queue import MM1MinMeanSojournTime +# from simopt.models.san_1 import SANLongestPath1 +from simopt.experiment_base import ProblemSolver, plot_area_scatterplots, post_normalize, plot_progress_curves, plot_solvability_cdfs, read_experiment_results, plot_solvability_profiles, plot_terminal_scatterplots, plot_terminal_progress + # !! When testing a new solver/problem, first go to directory.py. # There you should add the import statement and an entry in the respective @@ -20,28 +27,42 @@ # Specify the names of the solver and problem to test. # These names are strings and should match those input to directory.py. # Ex: -solver_names = ["RNDSRCH", "ASTRODF", "NELDMD"] -problem_names = ["CNTNEWS-1", "SAN-1"] +# solver_names = ["ASTRODF", "Boom-PGD", "Boom-FW", "RNDSRCH", "GASSO", "NELDMD"] +solver_names = ['ACTIVESET', 'Boom-FW', 'Boom-PGD', 'PGD-SS'] +problem_names = ["OPENJACKSON-1", 'SAN-1', 'SMFCVX-1', 'SMF-1', 'CASCADE-1', 'NETWORK-1'] +# problem_names = ["DYNAMNEWS-1", "SSCONT-1", "SAN-1"] "OPENJ-1" +# problem_names = ["SMF-1", "SAN-1", "RMITD-1", "MM1-1"] +# problems = [SANLongestPath, SMF_Max, RMITDMaxRevenue, MM1MinMeanSojournTime] # Initialize an instance of the experiment class. mymetaexperiment = ProblemsSolvers(solver_names, problem_names) +# mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems) -# Write to log file. -mymetaexperiment.log_group_experiment_results() +n_solvers = len(mymetaexperiment.experiments) +n_problems = len(mymetaexperiment.experiments[0]) # Run a fixed number of macroreplications of each solver on each problem. -mymetaexperiment.run(n_macroreps=3) +mymetaexperiment.run(n_macroreps=20) + print("Post-processing results.") # Run a fixed number of postreplications at all recommended solutions. -mymetaexperiment.post_replicate(n_postreps=50) +mymetaexperiment.post_replicate(n_postreps=20) # Find an optimal solution x* for normalization. -mymetaexperiment.post_normalize(n_postreps_init_opt=50) +mymetaexperiment.post_normalize(n_postreps_init_opt=20) print("Plotting results.") # Produce basic plots of the solvers on the problems. -plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="cdf_solvability") +plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="cdf_solvability") # cdf_solvability +# plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="diff_quantile_solvability", ref_solver='RNDSRCH', all_in_one=True, plot_CIs=True, print_max_hw=True) +# plot_terminal_scatterplots(experiments=mymetaexperiment.experiments, all_in_one=True) + +# Plot the mean progress curves of the solvers on the problems. +CI_param = True +for i in range(n_problems): + plot_progress_curves([mymetaexperiment.experiments[solver_idx][i] for solver_idx in range(n_solvers)], plot_type="mean", all_in_one=True, plot_CIs=CI_param, print_max_hw=True) + # Plots will be saved in the folder experiments/plots. print("Finished. Plots can be found in experiments/plots folder.") \ No newline at end of file