diff --git a/demo/demo_problems_solvers.py b/demo/demo_problems_solvers.py index 5c22718fb..ab02665b7 100644 --- a/demo/demo_problems_solvers.py +++ b/demo/demo_problems_solvers.py @@ -11,6 +11,13 @@ # Import the ProblemsSolvers class and other useful functions from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles +from simopt.models.san import SANLongestPath +from simopt.models.smf_origin import SMF_Max +from simopt.models.rmitd import RMITDMaxRevenue +from simopt.models.mm1queue import MM1MinMeanSojournTime +# from simopt.models.san_1 import SANLongestPath1 +from simopt.experiment_base import ProblemSolver, plot_area_scatterplots, post_normalize, plot_progress_curves, plot_solvability_cdfs, read_experiment_results, plot_solvability_profiles, plot_terminal_scatterplots, plot_terminal_progress + # !! When testing a new solver/problem, first go to directory.py. # There you should add the import statement and an entry in the respective @@ -20,28 +27,42 @@ # Specify the names of the solver and problem to test. # These names are strings and should match those input to directory.py. # Ex: -solver_names = ["RNDSRCH", "ASTRODF", "NELDMD"] -problem_names = ["CNTNEWS-1", "SAN-1"] +# solver_names = ["ASTRODF", "Boom-PGD", "Boom-FW", "RNDSRCH", "GASSO", "NELDMD"] +solver_names = ['ACTIVESET', 'Boom-FW', 'Boom-PGD', 'PGD-SS'] +problem_names = ["OPENJACKSON-1", 'SAN-1', 'SMFCVX-1', 'SMF-1', 'CASCADE-1', 'NETWORK-1'] +# problem_names = ["DYNAMNEWS-1", "SSCONT-1", "SAN-1"] "OPENJ-1" +# problem_names = ["SMF-1", "SAN-1", "RMITD-1", "MM1-1"] +# problems = [SANLongestPath, SMF_Max, RMITDMaxRevenue, MM1MinMeanSojournTime] # Initialize an instance of the experiment class. mymetaexperiment = ProblemsSolvers(solver_names, problem_names) +# mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems) -# Write to log file. -mymetaexperiment.log_group_experiment_results() +n_solvers = len(mymetaexperiment.experiments) +n_problems = len(mymetaexperiment.experiments[0]) # Run a fixed number of macroreplications of each solver on each problem. -mymetaexperiment.run(n_macroreps=3) +mymetaexperiment.run(n_macroreps=20) + print("Post-processing results.") # Run a fixed number of postreplications at all recommended solutions. -mymetaexperiment.post_replicate(n_postreps=50) +mymetaexperiment.post_replicate(n_postreps=20) # Find an optimal solution x* for normalization. -mymetaexperiment.post_normalize(n_postreps_init_opt=50) +mymetaexperiment.post_normalize(n_postreps_init_opt=20) print("Plotting results.") # Produce basic plots of the solvers on the problems. -plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="cdf_solvability") +plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="cdf_solvability") # cdf_solvability +# plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type="diff_quantile_solvability", ref_solver='RNDSRCH', all_in_one=True, plot_CIs=True, print_max_hw=True) +# plot_terminal_scatterplots(experiments=mymetaexperiment.experiments, all_in_one=True) + +# Plot the mean progress curves of the solvers on the problems. +CI_param = True +for i in range(n_problems): + plot_progress_curves([mymetaexperiment.experiments[solver_idx][i] for solver_idx in range(n_solvers)], plot_type="mean", all_in_one=True, plot_CIs=CI_param, print_max_hw=True) + # Plots will be saved in the folder experiments/plots. print("Finished. Plots can be found in experiments/plots folder.") \ No newline at end of file diff --git a/demo/demo_radom_model.py b/demo/demo_radom_model.py new file mode 100644 index 000000000..fc55255e5 --- /dev/null +++ b/demo/demo_radom_model.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":2.965958,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_radom_model.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is intended to help with debugging a random model.","It imports a model, initializes a model object with given factors,","sets up pseudorandom number generators, and runs one or more replications.","\"\"\"","","\"\"\"","Instead of modifying the problem and model class, we modify the demo_model and demo_problems.","\"\"\"","","import sys","import os.path as o","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","import numpy as np","# Import random number generator.","# from mrg32k3a.mrg32k3a import MRG32k3a","from rng.mrg32k3a import MRG32k3a","","# Import model. ","from simopt.models.san_2 import SAN","","fixed_factors = {}","mymodel = SAN(fixed_factors = fixed_factors, random=True)","","# from models. import ","# Replace with name of .py file containing model class.","# Replace with name of model class.","","# Fix factors of model. Specify a dictionary of factors.","","# fixed_factors = {} # Resort to all default values.","# Look at Model class definition to get names of factors.","","# Initialize an instance of the specified model class.","","# mymodel = (fixed_factors)","# Replace with name of model class.","","# Working example for MM1 model.","# -----------------------------------------------","# from simopt.models.mm1queue import MM1Queue","# fixed_factors = {\"lambda\": 3.0, \"mu\": 8.0}","# mymodel = MM1Queue(fixed_factors)","# -----------------------------------------------","","# The rest of this script requires no changes.","","# Check that all factors describe a simulatable model.","# Check fixed factors individually.","","for key, value in mymodel.factors.items():"," print(f\"The factor {key} is set as {value}. Is this simulatable? {bool(mymodel.check_simulatable_factor(key))}.\")","# Check all factors collectively.","print(f\"Is the specified model simulatable? {bool(mymodel.check_simulatable_factors())}.\")","","# Create a list of RNG objects for the simulation model to use when","# running replications.","rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(mymodel.n_rngs)]","rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4 + ss, 0]) for ss in range(mymodel.n_random)]","","mymodel.attach_rng(rng_list2)","responses, gradients = mymodel.replicate(rng_list)","print(\"\\nFor a single replication:\")","print(\"\\nResponses:\")","for key, value in responses.items():"," print(f\"\\t {key} is {value}.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":62,"cssClass":"pl-s"}],[{"start":0,"end":66,"cssClass":"pl-s"}],[{"start":0,"end":74,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":93,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-k"},{"start":16,"end":18,"cssClass":"pl-s1"}],[{"start":0,"end":33,"cssClass":"pl-c"}],[{"start":0,"end":40,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[],[{"start":0,"end":19,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":35,"cssClass":"pl-v"}],[],[{"start":0,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"}],[{"start":0,"end":7,"cssClass":"pl-s1"},{"start":8,"end":9,"cssClass":"pl-c1"},{"start":10,"end":13,"cssClass":"pl-v"},{"start":14,"end":27,"cssClass":"pl-s1"},{"start":28,"end":29,"cssClass":"pl-c1"},{"start":30,"end":43,"cssClass":"pl-s1"},{"start":45,"end":51,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":52,"end":56,"cssClass":"pl-c1"}],[],[{"start":0,"end":50,"cssClass":"pl-c"}],[{"start":0,"end":66,"cssClass":"pl-c"}],[{"start":0,"end":54,"cssClass":"pl-c"}],[],[{"start":0,"end":56,"cssClass":"pl-c"}],[],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[],[{"start":0,"end":45,"cssClass":"pl-c"}],[{"start":0,"end":54,"cssClass":"pl-c"}],[],[{"start":0,"end":32,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":45,"cssClass":"pl-c"}],[{"start":0,"end":44,"cssClass":"pl-c"}],[{"start":0,"end":35,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[],[{"start":0,"end":46,"cssClass":"pl-c"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[{"start":0,"end":35,"cssClass":"pl-c"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":7,"cssClass":"pl-s1"},{"start":9,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-c1"},{"start":18,"end":25,"cssClass":"pl-s1"},{"start":26,"end":33,"cssClass":"pl-s1"},{"start":34,"end":39,"cssClass":"pl-en"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":116,"cssClass":"pl-s"},{"start":23,"end":28,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-kos"},{"start":24,"end":27,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-kos"},{"start":39,"end":46,"cssClass":"pl-s1"},{"start":39,"end":40,"cssClass":"pl-kos"},{"start":40,"end":45,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-kos"},{"start":69,"end":114,"cssClass":"pl-s1"},{"start":69,"end":70,"cssClass":"pl-kos"},{"start":70,"end":74,"cssClass":"pl-en"},{"start":75,"end":82,"cssClass":"pl-s1"},{"start":83,"end":107,"cssClass":"pl-en"},{"start":108,"end":111,"cssClass":"pl-s1"},{"start":113,"end":114,"cssClass":"pl-kos"}],[{"start":0,"end":33,"cssClass":"pl-c"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":89,"cssClass":"pl-s"},{"start":44,"end":87,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-kos"},{"start":45,"end":49,"cssClass":"pl-en"},{"start":50,"end":57,"cssClass":"pl-s1"},{"start":58,"end":83,"cssClass":"pl-en"},{"start":86,"end":87,"cssClass":"pl-kos"}],[],[{"start":0,"end":67,"cssClass":"pl-c"}],[{"start":0,"end":23,"cssClass":"pl-c"}],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":12,"end":20,"cssClass":"pl-v"},{"start":21,"end":35,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-c1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":40,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":51,"cssClass":"pl-k"},{"start":52,"end":54,"cssClass":"pl-s1"},{"start":55,"end":57,"cssClass":"pl-c1"},{"start":58,"end":63,"cssClass":"pl-en"},{"start":64,"end":71,"cssClass":"pl-s1"},{"start":72,"end":78,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":21,"cssClass":"pl-v"},{"start":22,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":39,"cssClass":"pl-c1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":45,"end":47,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-c1"},{"start":53,"end":56,"cssClass":"pl-k"},{"start":57,"end":59,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-en"},{"start":69,"end":76,"cssClass":"pl-s1"},{"start":77,"end":85,"cssClass":"pl-s1"}],[],[{"start":0,"end":7,"cssClass":"pl-s1"},{"start":8,"end":18,"cssClass":"pl-en"},{"start":19,"end":28,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":11,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":30,"cssClass":"pl-s1"},{"start":31,"end":40,"cssClass":"pl-en"},{"start":41,"end":49,"cssClass":"pl-s1"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":35,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-cce"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":20,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-cce"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":7,"cssClass":"pl-s1"},{"start":9,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-c1"},{"start":18,"end":27,"cssClass":"pl-s1"},{"start":28,"end":33,"cssClass":"pl-en"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":33,"cssClass":"pl-s"},{"start":12,"end":14,"cssClass":"pl-cce"},{"start":15,"end":20,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-kos"},{"start":16,"end":19,"cssClass":"pl-s1"},{"start":19,"end":20,"cssClass":"pl-kos"},{"start":24,"end":31,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-kos"},{"start":25,"end":30,"cssClass":"pl-s1"},{"start":30,"end":31,"cssClass":"pl-kos"}],[]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_radom_model.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_radom_model.py?raw=true","headerInfo":{"blobSize":"2.31 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_radom_model.py","gitLfsPath":null,"onBranch":true,"shortPath":"2c955a3","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_radom_model.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"68","truncatedSloc":"51"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_radom_model.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"fixed_factors","kind":"constant","identStart":619,"identEnd":632,"extentStart":619,"extentEnd":637,"fullyQualifiedName":"fixed_factors","identUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":18}}},{"name":"mymodel","kind":"constant","identStart":638,"identEnd":645,"extentStart":638,"extentEnd":695,"fullyQualifiedName":"mymodel","identUtf16":{"start":{"lineNumber":23,"utf16Col":0},"end":{"lineNumber":23,"utf16Col":7}},"extentUtf16":{"start":{"lineNumber":23,"utf16Col":0},"end":{"lineNumber":23,"utf16Col":57}}},{"name":"rng_list","kind":"constant","identStart":1980,"identEnd":1988,"extentStart":1980,"extentEnd":2060,"fullyQualifiedName":"rng_list","identUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":80}}},{"name":"rng_list2","kind":"constant","identStart":2061,"identEnd":2070,"extentStart":2061,"extentEnd":2148,"fullyQualifiedName":"rng_list2","identUtf16":{"start":{"lineNumber":59,"utf16Col":0},"end":{"lineNumber":59,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":59,"utf16Col":0},"end":{"lineNumber":59,"utf16Col":87}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"gVU58KclJBN61sDrSwOOUJiybVpLcV12s3LAhNPf6fQpWv1_SSZaFIlQyu69yIJl3x2kP1olFHzV3v_ewpZEyw"},"/repos/preferences":{"post":"3OBW80b3l3llVA6hLg_ye5J6A1VDX2zjpk7KenloWtpICcN373IG90UWVbebxESBrNN6w737tvradEkTjKGfXg"}}},"title":"simopt/demo_radom_model.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/demo/demo_random_problem.py b/demo/demo_random_problem.py new file mode 100644 index 000000000..aadc6f039 --- /dev/null +++ b/demo/demo_random_problem.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":5.38909,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_random_problem.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is intended to help with debugging a random problem.","It imports a random problem, initializes a problem object with given factors,","sets up pseudorandom number generators, and runs multiple replications","at a given solution.","\"\"\"","","import sys","import os.path as o","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import random number generator.","# from mrg32k3a.mrg32k3a import MRG32k3a","from rng.mrg32k3a import MRG32k3a","","# Import the Solution class.","from simopt.base import Solution","","# Import problem.","# from models. import ","# Replace with name of .py file containing problem class.","# Replace with name of problem class.","","# Fix factors of problem. Specify a dictionary of factors.","","# fixed_factors = {} # Resort to all default values.","# Look at Problem class definition to get names of factors.","","# Initialize an instance of the specified problem class.","","# myproblem = (fixed_factors=fixed_factors)","# Replace with name of problem class.","","# Initialize a solution x corresponding to the problem.","","# Look at the Problem class definition to identify the decision variables.","# x will be a tuple consisting of the decision variables.","","# The following line does not need to be changed.","# mysolution = Solution(x, myproblem)","","# -----------------------------------------------","","from simopt.models.san_2 import SANLongestPath # Change this import command correspondingly","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","n_inst = 5 # The number of random instances you want to generate","","model_fixed_factors = {\"num_nodes\": 9, \"num_arcs\": 14} # Change to empty {} if want to use the default value ","myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True) # Change to the imported problem","","rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myproblem.model.n_rngs)]","random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)]","rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]","","# Generate n_inst random problem instances","for i in range(n_inst):"," random_rng = rebase(random_rng, 1)"," rng_list2 = rebase(rng_list2, 1)"," myproblem = SANLongestPath(model_fixed_factors=model_fixed_factors, random=True, random_rng=rng_list2) # Change to the imported problem"," myproblem.attach_rngs(random_rng)"," x = (8,) * myproblem.dim # Change the initial value according to the dimension"," mysolution = Solution(x, myproblem)"," mysolution.attach_rngs(rng_list, copy=False)"," "," # Simulate a fixed number of replications (n_reps) at the solution x."," n_reps = 10",""," myproblem.simulate(mysolution, m=n_reps)",""," # Print results to console."," print(mysolution.objectives_mean[0])"," print(type(mysolution))"," print(f\"Ran {n_reps} replications of the {myproblem.name} problem at solution x = {x}.\\n\")"," # print(f\"The mean objective estimate was {round(mysolution.objectives_mean[0], 4)} with standard error {round(mysolution.objectives_stderr[0], 4)}.\") "," print(\"The individual observations of the objective were:\")"," for idx in range(n_reps):"," print(f\"\\t {round(mysolution.objectives[idx][0], 4)}\")"," if myproblem.gradient_available:"," print(\"\\nThe individual observations of the gradients of the objective were:\")"," for idx in range(n_reps):"," print(f\"\\t {[round(g, 4) for g in mysolution.objectives_gradients[idx][0]]}\")"," else:"," print(\"\\nThis problem has no known gradients.\")"," if myproblem.n_stochastic_constraints > 0:"," print(f\"\\nThis problem has {myproblem.n_stochastic_constraints} stochastic constraints of the form E[LHS] <= 0.\")"," for stc_idx in range(myproblem.n_stochastic_constraints):"," print(f\"\\tFor stochastic constraint #{stc_idx + 1}, the mean of the LHS was {round(mysolution.stoch_constraints_mean[stc_idx], 4)} with standard error {round(mysolution.stoch_constraints_stderr[stc_idx], 4)}.\")"," print(\"\\tThe observations of the LHSs were:\")"," for idx in range(n_reps):"," print(f\"\\t\\t {round(mysolution.stoch_constraints[idx][stc_idx], 4)}\")"," else:"," print(\"\\nThis problem has no stochastic constraints.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":64,"cssClass":"pl-s"}],[{"start":0,"end":77,"cssClass":"pl-s"}],[{"start":0,"end":70,"cssClass":"pl-s"}],[{"start":0,"end":20,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":33,"cssClass":"pl-c"}],[{"start":0,"end":40,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[],[{"start":0,"end":28,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-k"},{"start":24,"end":32,"cssClass":"pl-v"}],[],[{"start":0,"end":17,"cssClass":"pl-c"}],[{"start":0,"end":52,"cssClass":"pl-c"}],[{"start":0,"end":68,"cssClass":"pl-c"}],[{"start":0,"end":58,"cssClass":"pl-c"}],[],[{"start":0,"end":58,"cssClass":"pl-c"}],[],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":59,"cssClass":"pl-c"}],[],[{"start":0,"end":56,"cssClass":"pl-c"}],[],[{"start":0,"end":63,"cssClass":"pl-c"}],[{"start":0,"end":58,"cssClass":"pl-c"}],[],[{"start":0,"end":55,"cssClass":"pl-c"}],[],[{"start":0,"end":74,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":37,"cssClass":"pl-c"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":92,"cssClass":"pl-c"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":6,"cssClass":"pl-s1"},{"start":7,"end":8,"cssClass":"pl-c1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":12,"end":65,"cssClass":"pl-c"}],[],[{"start":0,"end":19,"cssClass":"pl-s1"},{"start":20,"end":21,"cssClass":"pl-c1"},{"start":23,"end":34,"cssClass":"pl-s"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":39,"end":49,"cssClass":"pl-s"},{"start":51,"end":53,"cssClass":"pl-c1"},{"start":56,"end":110,"cssClass":"pl-c"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":26,"cssClass":"pl-v"},{"start":27,"end":46,"cssClass":"pl-s1"},{"start":46,"end":47,"cssClass":"pl-c1"},{"start":47,"end":66,"cssClass":"pl-s1"},{"start":68,"end":74,"cssClass":"pl-s1"},{"start":74,"end":75,"cssClass":"pl-c1"},{"start":75,"end":79,"cssClass":"pl-c1"},{"start":82,"end":114,"cssClass":"pl-c"}],[],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":12,"end":20,"cssClass":"pl-v"},{"start":21,"end":35,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-c1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":40,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":51,"cssClass":"pl-k"},{"start":52,"end":54,"cssClass":"pl-s1"},{"start":55,"end":57,"cssClass":"pl-c1"},{"start":58,"end":63,"cssClass":"pl-en"},{"start":64,"end":73,"cssClass":"pl-s1"},{"start":74,"end":79,"cssClass":"pl-s1"},{"start":80,"end":86,"cssClass":"pl-s1"}],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":22,"cssClass":"pl-v"},{"start":23,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":39,"end":40,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":47,"cssClass":"pl-s1"},{"start":50,"end":53,"cssClass":"pl-k"},{"start":54,"end":56,"cssClass":"pl-s1"},{"start":57,"end":59,"cssClass":"pl-c1"},{"start":60,"end":65,"cssClass":"pl-en"},{"start":66,"end":75,"cssClass":"pl-s1"},{"start":76,"end":81,"cssClass":"pl-s1"},{"start":82,"end":90,"cssClass":"pl-s1"},{"start":92,"end":101,"cssClass":"pl-s1"},{"start":102,"end":107,"cssClass":"pl-s1"},{"start":108,"end":116,"cssClass":"pl-s1"},{"start":117,"end":118,"cssClass":"pl-c1"},{"start":119,"end":128,"cssClass":"pl-s1"},{"start":129,"end":135,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":21,"cssClass":"pl-v"},{"start":22,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":39,"cssClass":"pl-c1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":44,"end":46,"cssClass":"pl-s1"},{"start":49,"end":52,"cssClass":"pl-k"},{"start":53,"end":55,"cssClass":"pl-s1"},{"start":56,"end":58,"cssClass":"pl-c1"},{"start":59,"end":64,"cssClass":"pl-en"},{"start":65,"end":74,"cssClass":"pl-s1"},{"start":75,"end":80,"cssClass":"pl-s1"},{"start":81,"end":89,"cssClass":"pl-s1"}],[],[{"start":0,"end":42,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":21,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":22,"cssClass":"pl-en"},{"start":23,"end":32,"cssClass":"pl-s1"},{"start":34,"end":35,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":30,"cssClass":"pl-v"},{"start":31,"end":50,"cssClass":"pl-s1"},{"start":50,"end":51,"cssClass":"pl-c1"},{"start":51,"end":70,"cssClass":"pl-s1"},{"start":72,"end":78,"cssClass":"pl-s1"},{"start":78,"end":79,"cssClass":"pl-c1"},{"start":79,"end":83,"cssClass":"pl-c1"},{"start":85,"end":95,"cssClass":"pl-s1"},{"start":95,"end":96,"cssClass":"pl-c1"},{"start":96,"end":105,"cssClass":"pl-s1"},{"start":108,"end":140,"cssClass":"pl-c"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-en"},{"start":26,"end":36,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":13,"end":14,"cssClass":"pl-c1"},{"start":15,"end":24,"cssClass":"pl-s1"},{"start":25,"end":28,"cssClass":"pl-s1"},{"start":30,"end":83,"cssClass":"pl-c"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-v"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":29,"end":38,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":26,"cssClass":"pl-en"},{"start":27,"end":35,"cssClass":"pl-s1"},{"start":37,"end":41,"cssClass":"pl-s1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":42,"end":47,"cssClass":"pl-c1"}],[],[{"start":4,"end":73,"cssClass":"pl-c"}],[{"start":4,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":13,"end":15,"cssClass":"pl-c1"}],[],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":22,"cssClass":"pl-en"},{"start":23,"end":33,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":37,"end":43,"cssClass":"pl-s1"}],[],[{"start":4,"end":31,"cssClass":"pl-c"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":20,"cssClass":"pl-s1"},{"start":21,"end":36,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":14,"cssClass":"pl-en"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":93,"cssClass":"pl-s"},{"start":16,"end":24,"cssClass":"pl-s1"},{"start":16,"end":17,"cssClass":"pl-kos"},{"start":17,"end":23,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-kos"},{"start":45,"end":61,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-kos"},{"start":46,"end":55,"cssClass":"pl-s1"},{"start":56,"end":60,"cssClass":"pl-s1"},{"start":60,"end":61,"cssClass":"pl-kos"},{"start":86,"end":89,"cssClass":"pl-s1"},{"start":86,"end":87,"cssClass":"pl-kos"},{"start":87,"end":88,"cssClass":"pl-s1"},{"start":88,"end":89,"cssClass":"pl-kos"},{"start":90,"end":92,"cssClass":"pl-cce"}],[{"start":4,"end":158,"cssClass":"pl-c"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":62,"cssClass":"pl-s"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":20,"cssClass":"pl-en"},{"start":21,"end":27,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":61,"cssClass":"pl-s"},{"start":16,"end":18,"cssClass":"pl-cce"},{"start":19,"end":60,"cssClass":"pl-s1"},{"start":19,"end":20,"cssClass":"pl-kos"},{"start":20,"end":25,"cssClass":"pl-en"},{"start":26,"end":36,"cssClass":"pl-s1"},{"start":37,"end":47,"cssClass":"pl-s1"},{"start":48,"end":51,"cssClass":"pl-s1"},{"start":53,"end":54,"cssClass":"pl-c1"},{"start":57,"end":58,"cssClass":"pl-c1"},{"start":59,"end":60,"cssClass":"pl-kos"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":16,"cssClass":"pl-s1"},{"start":17,"end":35,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":85,"cssClass":"pl-s"},{"start":15,"end":17,"cssClass":"pl-cce"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":15,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-en"},{"start":25,"end":31,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":88,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-cce"},{"start":23,"end":87,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-kos"},{"start":25,"end":30,"cssClass":"pl-en"},{"start":31,"end":32,"cssClass":"pl-s1"},{"start":34,"end":35,"cssClass":"pl-c1"},{"start":37,"end":40,"cssClass":"pl-k"},{"start":41,"end":42,"cssClass":"pl-s1"},{"start":43,"end":45,"cssClass":"pl-c1"},{"start":46,"end":56,"cssClass":"pl-s1"},{"start":57,"end":77,"cssClass":"pl-s1"},{"start":78,"end":81,"cssClass":"pl-s1"},{"start":83,"end":84,"cssClass":"pl-c1"},{"start":86,"end":87,"cssClass":"pl-kos"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":54,"cssClass":"pl-s"},{"start":15,"end":17,"cssClass":"pl-cce"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":16,"cssClass":"pl-s1"},{"start":17,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":44,"end":45,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":120,"cssClass":"pl-s"},{"start":16,"end":18,"cssClass":"pl-cce"},{"start":35,"end":71,"cssClass":"pl-s1"},{"start":35,"end":36,"cssClass":"pl-kos"},{"start":36,"end":45,"cssClass":"pl-s1"},{"start":46,"end":70,"cssClass":"pl-s1"},{"start":70,"end":71,"cssClass":"pl-kos"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":63,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":221,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-cce"},{"start":49,"end":62,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-kos"},{"start":50,"end":57,"cssClass":"pl-s1"},{"start":58,"end":59,"cssClass":"pl-c1"},{"start":60,"end":61,"cssClass":"pl-c1"},{"start":61,"end":62,"cssClass":"pl-kos"},{"start":88,"end":142,"cssClass":"pl-s1"},{"start":88,"end":89,"cssClass":"pl-kos"},{"start":89,"end":94,"cssClass":"pl-en"},{"start":95,"end":105,"cssClass":"pl-s1"},{"start":106,"end":128,"cssClass":"pl-s1"},{"start":129,"end":136,"cssClass":"pl-s1"},{"start":139,"end":140,"cssClass":"pl-c1"},{"start":141,"end":142,"cssClass":"pl-kos"},{"start":163,"end":219,"cssClass":"pl-s1"},{"start":163,"end":164,"cssClass":"pl-kos"},{"start":164,"end":169,"cssClass":"pl-en"},{"start":170,"end":180,"cssClass":"pl-s1"},{"start":181,"end":205,"cssClass":"pl-s1"},{"start":206,"end":213,"cssClass":"pl-s1"},{"start":216,"end":217,"cssClass":"pl-c1"},{"start":218,"end":219,"cssClass":"pl-kos"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":56,"cssClass":"pl-s"},{"start":19,"end":21,"cssClass":"pl-cce"}],[{"start":12,"end":15,"cssClass":"pl-k"},{"start":16,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":35,"cssClass":"pl-s1"}],[{"start":16,"end":21,"cssClass":"pl-en"},{"start":22,"end":84,"cssClass":"pl-s"},{"start":24,"end":26,"cssClass":"pl-cce"},{"start":26,"end":28,"cssClass":"pl-cce"},{"start":29,"end":83,"cssClass":"pl-s1"},{"start":29,"end":30,"cssClass":"pl-kos"},{"start":30,"end":35,"cssClass":"pl-en"},{"start":36,"end":46,"cssClass":"pl-s1"},{"start":47,"end":64,"cssClass":"pl-s1"},{"start":65,"end":68,"cssClass":"pl-s1"},{"start":70,"end":77,"cssClass":"pl-s1"},{"start":80,"end":81,"cssClass":"pl-c1"},{"start":82,"end":83,"cssClass":"pl-kos"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":61,"cssClass":"pl-s"},{"start":15,"end":17,"cssClass":"pl-cce"}],[]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_random_problem.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem.py?raw=true","headerInfo":{"blobSize":"4.57 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_random_problem.py","gitLfsPath":null,"onBranch":true,"shortPath":"8dfdab7","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_random_problem.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"103","truncatedSloc":"81"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"rebase","kind":"function","identStart":1519,"identEnd":1525,"extentStart":1515,"extentEnd":1884,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":45,"utf16Col":4},"end":{"lineNumber":45,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":45,"utf16Col":0},"end":{"lineNumber":53,"utf16Col":21}}},{"name":"n_inst","kind":"constant","identStart":1886,"identEnd":1892,"extentStart":1886,"extentEnd":1896,"fullyQualifiedName":"n_inst","identUtf16":{"start":{"lineNumber":55,"utf16Col":0},"end":{"lineNumber":55,"utf16Col":6}},"extentUtf16":{"start":{"lineNumber":55,"utf16Col":0},"end":{"lineNumber":55,"utf16Col":10}}},{"name":"model_fixed_factors","kind":"constant","identStart":1953,"identEnd":1972,"extentStart":1953,"extentEnd":2007,"fullyQualifiedName":"model_fixed_factors","identUtf16":{"start":{"lineNumber":57,"utf16Col":0},"end":{"lineNumber":57,"utf16Col":19}},"extentUtf16":{"start":{"lineNumber":57,"utf16Col":0},"end":{"lineNumber":57,"utf16Col":54}}},{"name":"myproblem","kind":"constant","identStart":2064,"identEnd":2073,"extentStart":2064,"extentEnd":2144,"fullyQualifiedName":"myproblem","identUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":58,"utf16Col":0},"end":{"lineNumber":58,"utf16Col":80}}},{"name":"rng_list","kind":"constant","identStart":2180,"identEnd":2188,"extentStart":2180,"extentEnd":2268,"fullyQualifiedName":"rng_list","identUtf16":{"start":{"lineNumber":60,"utf16Col":0},"end":{"lineNumber":60,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":60,"utf16Col":0},"end":{"lineNumber":60,"utf16Col":88}}},{"name":"random_rng","kind":"constant","identStart":2269,"identEnd":2279,"extentStart":2269,"extentEnd":2406,"fullyQualifiedName":"random_rng","identUtf16":{"start":{"lineNumber":61,"utf16Col":0},"end":{"lineNumber":61,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":61,"utf16Col":0},"end":{"lineNumber":61,"utf16Col":137}}},{"name":"rng_list2","kind":"constant","identStart":2407,"identEnd":2416,"extentStart":2407,"extentEnd":2498,"fullyQualifiedName":"rng_list2","identUtf16":{"start":{"lineNumber":62,"utf16Col":0},"end":{"lineNumber":62,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":62,"utf16Col":0},"end":{"lineNumber":62,"utf16Col":91}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"nLnJdnqZpSi40o4UgPmHr1FE3pTCl3uVWXeekMnECEA0tg35lJrbL0tUhBF2MouaFusX8dPDMp8_26HK2I2lfw"},"/repos/preferences":{"post":"3WtmyQcDpjPpPbCcgZqvqe0tVJyv8s-Ofs-U8MOk9PRJgvNNroY3vcl_64o0URlT04QtClFWFZcC9ReZNm0xcA"}}},"title":"simopt/demo_random_problem.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/demo/demo_random_problem_solver.py b/demo/demo_random_problem_solver.py new file mode 100644 index 000000000..4cdbd770d --- /dev/null +++ b/demo/demo_random_problem_solver.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":3.607858,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_random_problem_solver.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is intended to help with debugging random problems and solvers.","It create a problem-solver pairing by importing problems and runs multiple","macroreplications of the solver on the problem.","\"\"\"","","import sys","import os.path as o","import numpy as np","import os","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import the ProblemSolver class and other useful functions","from simopt.experiment_base import ProblemSolver, read_experiment_results, post_normalize, plot_progress_curves, plot_solvability_cdfs","from rng.mrg32k3a import MRG32k3a","from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr","","# !! When testing a new solver/problem, first go to directory.py.","# See directory.py for more details.","# Specify the names of the solver to test.","","# -----------------------------------------------","solver_name = \"RNDSRCH\" # Random search solver","# -----------------------------------------------","","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","def strtobool(t):"," t = t.lower()"," if t == \"t\":"," return True"," else:"," return False","","n_inst = int(input('Please enter the number of instance you want to generate: '))","rand = input('Please decide whether you want to generate random instances or determinent instances (T/F): ')","rand = strtobool(rand)","","model_fixed_factors = {} # Override model factors","","myproblem = SANLongestPathConstr(random=True, model_fixed_factors=model_fixed_factors)","","random_rng = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random, myproblem.model.n_random + myproblem.n_rngs)]","rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]","","# Generate 5 random problem instances","for i in range(n_inst):"," random_rng = rebase(random_rng, 1)"," rng_list2 = rebase(rng_list2, 1)"," myproblem = SANLongestPathConstr(random=rand, random_rng=rng_list2, model_fixed_factors=model_fixed_factors)"," myproblem.attach_rngs(random_rng)"," problem_name = myproblem.model.name + str(i)"," print('-------------------------------------------------------')"," print(f\"Testing solver {solver_name} on problem {problem_name}.\")",""," # Specify file path name for storing experiment outputs in .pickle file."," file_name_path = \"experiments/outputs/\" + solver_name + \"_on_\" + problem_name + \".pickle\""," print(f\"Results will be stored as {file_name_path}.\")",""," # Initialize an instance of the experiment class."," myexperiment = ProblemSolver(solver_name=solver_name, problem=myproblem)",""," # Run a fixed number of macroreplications of the solver on the problem."," myexperiment.run(n_macroreps=100)",""," # If the solver runs have already been performed, uncomment the"," # following pair of lines (and uncommmen the myexperiment.run(...)"," # line above) to read in results from a .pickle file."," # myexperiment = read_experiment_results(file_name_path)",""," print(\"Post-processing results.\")"," # Run a fixed number of postreplications at all recommended solutions."," myexperiment.post_replicate(n_postreps=1) #200, 10"," # Find an optimal solution x* for normalization."," post_normalize([myexperiment], n_postreps_init_opt=1) #200, 5",""," # Log results."," myexperiment.log_experiment_results()",""," print(\"Optimal solution: \",np.array(myexperiment.xstar))"," print(\"Optimal Value: \", myexperiment.all_est_objectives[0])",""," print(\"Plotting results.\")"," # Produce basic plots of the solver on the problem."," plot_progress_curves(experiments=[myexperiment], plot_type=\"all\", normalize=False)"," plot_progress_curves(experiments=[myexperiment], plot_type=\"mean\", normalize=False)"," plot_progress_curves(experiments=[myexperiment], plot_type=\"quantile\", beta=0.90, normalize=False)"," plot_solvability_cdfs(experiments=[myexperiment], solve_tol=0.1)",""," # Plots will be saved in the folder experiments/plots."," print(\"Finished. Plots can be found in experiments/plots folder.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":75,"cssClass":"pl-s"}],[{"start":0,"end":74,"cssClass":"pl-s"}],[{"start":0,"end":47,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-k"},{"start":16,"end":18,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":59,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":48,"cssClass":"pl-v"},{"start":50,"end":73,"cssClass":"pl-s1"},{"start":75,"end":89,"cssClass":"pl-s1"},{"start":91,"end":111,"cssClass":"pl-s1"},{"start":113,"end":134,"cssClass":"pl-s1"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":68,"cssClass":"pl-v"}],[],[{"start":0,"end":65,"cssClass":"pl-c"}],[{"start":0,"end":36,"cssClass":"pl-c"}],[{"start":0,"end":42,"cssClass":"pl-c"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":11,"cssClass":"pl-s1"},{"start":12,"end":13,"cssClass":"pl-c1"},{"start":14,"end":23,"cssClass":"pl-s"},{"start":25,"end":47,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":13,"cssClass":"pl-en"},{"start":14,"end":15,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":15,"cssClass":"pl-en"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":8,"cssClass":"pl-s1"},{"start":9,"end":11,"cssClass":"pl-c1"},{"start":12,"end":15,"cssClass":"pl-s"}],[{"start":8,"end":14,"cssClass":"pl-k"},{"start":15,"end":19,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":14,"cssClass":"pl-k"},{"start":15,"end":20,"cssClass":"pl-c1"}],[],[{"start":0,"end":6,"cssClass":"pl-s1"},{"start":7,"end":8,"cssClass":"pl-c1"},{"start":9,"end":12,"cssClass":"pl-en"},{"start":13,"end":18,"cssClass":"pl-en"},{"start":19,"end":79,"cssClass":"pl-s"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":12,"cssClass":"pl-en"},{"start":13,"end":107,"cssClass":"pl-s"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":16,"cssClass":"pl-en"},{"start":17,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":19,"cssClass":"pl-s1"},{"start":20,"end":21,"cssClass":"pl-c1"},{"start":26,"end":50,"cssClass":"pl-c"}],[],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":32,"cssClass":"pl-v"},{"start":33,"end":39,"cssClass":"pl-s1"},{"start":39,"end":40,"cssClass":"pl-c1"},{"start":40,"end":44,"cssClass":"pl-c1"},{"start":46,"end":65,"cssClass":"pl-s1"},{"start":65,"end":66,"cssClass":"pl-c1"},{"start":66,"end":85,"cssClass":"pl-s1"}],[],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":22,"cssClass":"pl-v"},{"start":23,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":39,"end":40,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":47,"cssClass":"pl-s1"},{"start":50,"end":53,"cssClass":"pl-k"},{"start":54,"end":56,"cssClass":"pl-s1"},{"start":57,"end":59,"cssClass":"pl-c1"},{"start":60,"end":65,"cssClass":"pl-en"},{"start":66,"end":75,"cssClass":"pl-s1"},{"start":76,"end":81,"cssClass":"pl-s1"},{"start":82,"end":90,"cssClass":"pl-s1"},{"start":92,"end":101,"cssClass":"pl-s1"},{"start":102,"end":107,"cssClass":"pl-s1"},{"start":108,"end":116,"cssClass":"pl-s1"},{"start":117,"end":118,"cssClass":"pl-c1"},{"start":119,"end":128,"cssClass":"pl-s1"},{"start":129,"end":135,"cssClass":"pl-s1"}],[{"start":0,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":21,"cssClass":"pl-v"},{"start":22,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":39,"cssClass":"pl-c1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":44,"end":46,"cssClass":"pl-s1"},{"start":49,"end":52,"cssClass":"pl-k"},{"start":53,"end":55,"cssClass":"pl-s1"},{"start":56,"end":58,"cssClass":"pl-c1"},{"start":59,"end":64,"cssClass":"pl-en"},{"start":65,"end":74,"cssClass":"pl-s1"},{"start":75,"end":80,"cssClass":"pl-s1"},{"start":81,"end":89,"cssClass":"pl-s1"}],[],[{"start":0,"end":37,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":21,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":22,"cssClass":"pl-en"},{"start":23,"end":32,"cssClass":"pl-s1"},{"start":34,"end":35,"cssClass":"pl-c1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":36,"cssClass":"pl-v"},{"start":37,"end":43,"cssClass":"pl-s1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":44,"end":48,"cssClass":"pl-s1"},{"start":50,"end":60,"cssClass":"pl-s1"},{"start":60,"end":61,"cssClass":"pl-c1"},{"start":61,"end":70,"cssClass":"pl-s1"},{"start":72,"end":91,"cssClass":"pl-s1"},{"start":91,"end":92,"cssClass":"pl-c1"},{"start":92,"end":111,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-en"},{"start":26,"end":36,"cssClass":"pl-s1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":28,"cssClass":"pl-s1"},{"start":29,"end":34,"cssClass":"pl-s1"},{"start":35,"end":39,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":42,"end":45,"cssClass":"pl-en"},{"start":46,"end":47,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":67,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":68,"cssClass":"pl-s"},{"start":27,"end":40,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-kos"},{"start":28,"end":39,"cssClass":"pl-s1"},{"start":39,"end":40,"cssClass":"pl-kos"},{"start":52,"end":66,"cssClass":"pl-s1"},{"start":52,"end":53,"cssClass":"pl-kos"},{"start":53,"end":65,"cssClass":"pl-s1"},{"start":65,"end":66,"cssClass":"pl-kos"}],[],[{"start":4,"end":76,"cssClass":"pl-c"}],[{"start":4,"end":18,"cssClass":"pl-s1"},{"start":19,"end":20,"cssClass":"pl-c1"},{"start":21,"end":43,"cssClass":"pl-s"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":46,"end":57,"cssClass":"pl-s1"},{"start":58,"end":59,"cssClass":"pl-c1"},{"start":60,"end":66,"cssClass":"pl-s"},{"start":67,"end":68,"cssClass":"pl-c1"},{"start":69,"end":81,"cssClass":"pl-s1"},{"start":82,"end":83,"cssClass":"pl-c1"},{"start":84,"end":93,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":56,"cssClass":"pl-s"},{"start":38,"end":54,"cssClass":"pl-s1"},{"start":38,"end":39,"cssClass":"pl-kos"},{"start":39,"end":53,"cssClass":"pl-s1"},{"start":53,"end":54,"cssClass":"pl-kos"}],[],[{"start":4,"end":53,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":32,"cssClass":"pl-v"},{"start":33,"end":44,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":45,"end":56,"cssClass":"pl-s1"},{"start":58,"end":65,"cssClass":"pl-s1"},{"start":65,"end":66,"cssClass":"pl-c1"},{"start":66,"end":75,"cssClass":"pl-s1"}],[],[{"start":4,"end":75,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":33,"end":36,"cssClass":"pl-c1"}],[],[{"start":4,"end":67,"cssClass":"pl-c"}],[{"start":4,"end":70,"cssClass":"pl-c"}],[{"start":4,"end":57,"cssClass":"pl-c"}],[{"start":4,"end":60,"cssClass":"pl-c"}],[],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":36,"cssClass":"pl-s"}],[{"start":4,"end":74,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":46,"end":54,"cssClass":"pl-c"}],[{"start":4,"end":52,"cssClass":"pl-c"}],[{"start":4,"end":18,"cssClass":"pl-en"},{"start":20,"end":32,"cssClass":"pl-s1"},{"start":35,"end":54,"cssClass":"pl-s1"},{"start":54,"end":55,"cssClass":"pl-c1"},{"start":55,"end":56,"cssClass":"pl-c1"},{"start":58,"end":65,"cssClass":"pl-c"}],[],[{"start":4,"end":18,"cssClass":"pl-c"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":39,"cssClass":"pl-en"}],[],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":30,"cssClass":"pl-s"},{"start":31,"end":33,"cssClass":"pl-s1"},{"start":34,"end":39,"cssClass":"pl-en"},{"start":40,"end":52,"cssClass":"pl-s1"},{"start":53,"end":58,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":27,"cssClass":"pl-s"},{"start":29,"end":41,"cssClass":"pl-s1"},{"start":42,"end":60,"cssClass":"pl-s1"},{"start":61,"end":62,"cssClass":"pl-c1"}],[],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":29,"cssClass":"pl-s"}],[{"start":4,"end":55,"cssClass":"pl-c"}],[{"start":4,"end":24,"cssClass":"pl-en"},{"start":25,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":50,"cssClass":"pl-s1"},{"start":53,"end":62,"cssClass":"pl-s1"},{"start":62,"end":63,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-s"},{"start":70,"end":79,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":80,"end":85,"cssClass":"pl-c1"}],[{"start":4,"end":24,"cssClass":"pl-en"},{"start":25,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":50,"cssClass":"pl-s1"},{"start":53,"end":62,"cssClass":"pl-s1"},{"start":62,"end":63,"cssClass":"pl-c1"},{"start":63,"end":69,"cssClass":"pl-s"},{"start":71,"end":80,"cssClass":"pl-s1"},{"start":80,"end":81,"cssClass":"pl-c1"},{"start":81,"end":86,"cssClass":"pl-c1"}],[{"start":4,"end":24,"cssClass":"pl-en"},{"start":25,"end":36,"cssClass":"pl-s1"},{"start":36,"end":37,"cssClass":"pl-c1"},{"start":38,"end":50,"cssClass":"pl-s1"},{"start":53,"end":62,"cssClass":"pl-s1"},{"start":62,"end":63,"cssClass":"pl-c1"},{"start":63,"end":73,"cssClass":"pl-s"},{"start":75,"end":79,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":80,"end":84,"cssClass":"pl-c1"},{"start":86,"end":95,"cssClass":"pl-s1"},{"start":95,"end":96,"cssClass":"pl-c1"},{"start":96,"end":101,"cssClass":"pl-c1"}],[{"start":4,"end":25,"cssClass":"pl-en"},{"start":26,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":39,"end":51,"cssClass":"pl-s1"},{"start":54,"end":63,"cssClass":"pl-s1"},{"start":63,"end":64,"cssClass":"pl-c1"},{"start":64,"end":67,"cssClass":"pl-c1"}],[],[{"start":4,"end":58,"cssClass":"pl-c"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":69,"cssClass":"pl-s"}]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_random_problem_solver.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem_solver.py?raw=true","headerInfo":{"blobSize":"4.17 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_random_problem_solver.py","gitLfsPath":null,"onBranch":true,"shortPath":"dc37c07","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_random_problem_solver.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"100","truncatedSloc":"79"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_random_problem_solver.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"solver_name","kind":"constant","identStart":849,"identEnd":860,"extentStart":849,"extentEnd":872,"fullyQualifiedName":"solver_name","identUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":11}},"extentUtf16":{"start":{"lineNumber":22,"utf16Col":0},"end":{"lineNumber":22,"utf16Col":23}}},{"name":"rebase","kind":"function","identStart":953,"identEnd":959,"extentStart":949,"extentEnd":1318,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":26,"utf16Col":4},"end":{"lineNumber":26,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":26,"utf16Col":0},"end":{"lineNumber":34,"utf16Col":21}}},{"name":"strtobool","kind":"function","identStart":1324,"identEnd":1333,"extentStart":1320,"extentEnd":1423,"fullyQualifiedName":"strtobool","identUtf16":{"start":{"lineNumber":36,"utf16Col":4},"end":{"lineNumber":36,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":36,"utf16Col":0},"end":{"lineNumber":41,"utf16Col":20}}},{"name":"n_inst","kind":"constant","identStart":1425,"identEnd":1431,"extentStart":1425,"extentEnd":1506,"fullyQualifiedName":"n_inst","identUtf16":{"start":{"lineNumber":43,"utf16Col":0},"end":{"lineNumber":43,"utf16Col":6}},"extentUtf16":{"start":{"lineNumber":43,"utf16Col":0},"end":{"lineNumber":43,"utf16Col":81}}},{"name":"rand","kind":"constant","identStart":1507,"identEnd":1511,"extentStart":1507,"extentEnd":1615,"fullyQualifiedName":"rand","identUtf16":{"start":{"lineNumber":44,"utf16Col":0},"end":{"lineNumber":44,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":44,"utf16Col":0},"end":{"lineNumber":44,"utf16Col":108}}},{"name":"rand","kind":"constant","identStart":1616,"identEnd":1620,"extentStart":1616,"extentEnd":1638,"fullyQualifiedName":"rand","identUtf16":{"start":{"lineNumber":45,"utf16Col":0},"end":{"lineNumber":45,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":45,"utf16Col":0},"end":{"lineNumber":45,"utf16Col":22}}},{"name":"model_fixed_factors","kind":"constant","identStart":1640,"identEnd":1659,"extentStart":1640,"extentEnd":1664,"fullyQualifiedName":"model_fixed_factors","identUtf16":{"start":{"lineNumber":47,"utf16Col":0},"end":{"lineNumber":47,"utf16Col":19}},"extentUtf16":{"start":{"lineNumber":47,"utf16Col":0},"end":{"lineNumber":47,"utf16Col":24}}},{"name":"myproblem","kind":"constant","identStart":1692,"identEnd":1701,"extentStart":1692,"extentEnd":1778,"fullyQualifiedName":"myproblem","identUtf16":{"start":{"lineNumber":49,"utf16Col":0},"end":{"lineNumber":49,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":49,"utf16Col":0},"end":{"lineNumber":49,"utf16Col":86}}},{"name":"random_rng","kind":"constant","identStart":1780,"identEnd":1790,"extentStart":1780,"extentEnd":1917,"fullyQualifiedName":"random_rng","identUtf16":{"start":{"lineNumber":51,"utf16Col":0},"end":{"lineNumber":51,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":51,"utf16Col":0},"end":{"lineNumber":51,"utf16Col":137}}},{"name":"rng_list2","kind":"constant","identStart":1918,"identEnd":1927,"extentStart":1918,"extentEnd":2009,"fullyQualifiedName":"rng_list2","identUtf16":{"start":{"lineNumber":52,"utf16Col":0},"end":{"lineNumber":52,"utf16Col":9}},"extentUtf16":{"start":{"lineNumber":52,"utf16Col":0},"end":{"lineNumber":52,"utf16Col":91}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"bo50Ta0pqezuFqdyhIuxc4A08ugUj2YisoOlcEleflvGgbDCQyrX6x2QrXdyQL1Gx5s7jQXbLyjUL5oqWBfTZA"},"/repos/preferences":{"post":"wcttIkjLR6fY-zNC2Fo7-UJslO5L-3dymWrHjxwpae5VIvim4U7WKfi5aFRtkY0DfMXteLVfrWvlUETm6eCsag"}}},"title":"simopt/demo_random_problem_solver.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/demo/demo_user.py b/demo/demo_user.py new file mode 100644 index 000000000..ee336c814 --- /dev/null +++ b/demo/demo_user.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":2.910284,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_user.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is the user interface for generating multiple random problem instances and","solve them by specified solvers.","It create problem-solver groups and runs multiple","macroreplications of each problem-solver pair. To run the file, user need","to import the solver and probelm they want to build random instances at the beginning,","and also provide an input file, which include the information needed to ","build random instances (the name of problem, number of random instances to ","generate, and some overriding factors).","\"\"\"","","import sys","import os.path as o","import os","import re","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import the ProblemsSolvers class and other useful functions","from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles","from rng.mrg32k3a import MRG32k3a","from simopt.base import Solution","from simopt.models.smf import SMF_Max","from simopt.models.rmitd import RMITDMaxRevenue","from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr","from simopt.models.mm1queue import MM1MinMeanSojournTime","","","# !! When testing a new solver/problem, first import problems from the random code file,","# Then create a test_input.txt file in your computer.","# There you should add the import statement and an entry in the file","# You need to specify name of solvers and problems you want to test in the file by 'solver_name'","# And specify the problem related informations by problem = [...]","# All lines start with '#' will be counted as commend and will not be implemented","# See the following example for more details.","","# Ex:","# To create two random instance of SAN and three random instances of SMF:","# In the demo_user.py, modify:","# from simopt.models.smf import SMF_Max","# from simopt.models.san_2 import SANLongestPath","# In the input information file (test_input.txt), include the following lines:","# solver_names = [\"RNDSRCH\", \"ASTRODF\", \"NELDMD\"]","# problem1 = [SANLongestPath, 2, {'num_nodes':8, 'num_arcs':12}]","# problem2 = [SMF_Max, 3, {'num_nodes':7, 'num_arcs':16}]","","# Grab information from the input file","def get_info(path):"," L = []"," with open(path) as file:"," lines = [line.rstrip() for line in file]"," for line in lines:"," if not line.startswith(\"#\") and line:"," L.append(line)"," lines = L"," command_lines = []"," problem_sets = []"," for line in lines:"," if 'import' in line:"," command_lines.append(line)"," elif 'solver_names' in line:"," solver_names = line"," else:"," problem_sets.append(line)",""," for i in command_lines:"," exec(i)"," "," problems = []"," solver_names = eval(re.findall(r'\\[.*?\\]', solver_names)[0])"," for l in problem_sets:"," o = re.findall(r'\\[.*?\\]', l)[0]"," problems.append(eval(o))"," "," problem_sets = [p[0] for p in problems]"," L_num = [p[1] for p in problems]"," L_para = [p[2] for p in problems]"," "," return solver_names, problem_sets, L_num, L_para","","# Read input file and process information","path = input('Please input the path of the input file: ')","if \"'\" in path: # If the input path already has quotation marks"," path = path.replace(\"'\", \"\")"," ","solver_names, problem_set, L_num, L_para = get_info(path)","rands = [True for i in range(len(problem_set))]","","# Check whether the input file is valid","if len(L_num) != len(problem_set) or len(L_para) != len(problem_set):"," print('Invalid input. The input number of random instances does not match with the number of problems you want.')"," print('Please check your input file')","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","myproblems = problem_set","","# Check whether the problem is random","for i in range(len(problem_set)):"," if L_num[i] == 0:"," L_num[i] = 1"," rands[i] = False"," else:"," rands[i] = True","","problems = []","problem_names = []","","def generate_problem(i, myproblems, rands, problems, L_num, L_para):"," print('For problem ', myproblems[i]().name, ':') "," model_fixed_factors = L_para[i]"," "," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i])"," random_rng = [MRG32k3a(s_ss_sss_index=[2, 4 + L_num[i], ss]) for ss in range(myproblem.n_rngs)]"," rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]"," "," if rands[i] == False: # Determinant case"," problems.append(myproblem)"," myproblem.name = str(myproblem.model.name) + str(0)"," problem_names.append(myproblem.name)"," print('')"," "," else:"," for j in range(L_num[i]):"," random_rng = rebase(random_rng, 1) # Advance the substream for different instances"," rng_list2 = rebase(rng_list2, 1)"," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i], random_rng=rng_list2)"," myproblem.attach_rngs(random_rng)"," # myproblem.name = str(myproblem.model.name) + str(j)"," myproblem.name = str(myproblem.name) + '-' + str(j)"," problems.append(myproblem)"," problem_names.append(myproblem.name)"," print('')"," "," return problems, problem_names"," ","# Generate problems","for i in range(len(L_num)):"," problems, problem_names = generate_problem(i, myproblems, rands, problems, L_num, L_para)","","# Initialize an instance of the experiment class.","mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems)","","# Run a fixed number of macroreplications of each solver on each problem.","mymetaexperiment.run(n_macroreps=3)","","print(\"Post-processing results.\")","# Run a fixed number of postreplications at all recommended solutions.","mymetaexperiment.post_replicate(n_postreps=20)","# Find an optimal solution x* for normalization.","mymetaexperiment.post_normalize(n_postreps_init_opt=20)","","print(\"Plotting results.\")","# Produce basic plots of the solvers on the problems.","plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type=\"cdf_solvability\")","","# Plots will be saved in the folder experiments/plots.","print(\"Finished. Plots can be found in experiments/plots folder.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":49,"cssClass":"pl-s"}],[{"start":0,"end":73,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":72,"cssClass":"pl-s"}],[{"start":0,"end":75,"cssClass":"pl-s"}],[{"start":0,"end":39,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":61,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":50,"cssClass":"pl-v"},{"start":52,"end":77,"cssClass":"pl-s1"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-k"},{"start":24,"end":32,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":22,"cssClass":"pl-s1"},{"start":23,"end":29,"cssClass":"pl-k"},{"start":30,"end":37,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":47,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":68,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":56,"cssClass":"pl-v"}],[],[],[{"start":0,"end":88,"cssClass":"pl-c"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":68,"cssClass":"pl-c"}],[{"start":0,"end":96,"cssClass":"pl-c"}],[{"start":0,"end":65,"cssClass":"pl-c"}],[{"start":0,"end":81,"cssClass":"pl-c"}],[{"start":0,"end":45,"cssClass":"pl-c"}],[],[{"start":0,"end":5,"cssClass":"pl-c"}],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":30,"cssClass":"pl-c"}],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":78,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":64,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":38,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":12,"cssClass":"pl-en"},{"start":13,"end":17,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-v"},{"start":6,"end":7,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"},{"start":9,"end":13,"cssClass":"pl-en"},{"start":14,"end":18,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-k"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":31,"end":34,"cssClass":"pl-k"},{"start":35,"end":39,"cssClass":"pl-s1"},{"start":40,"end":42,"cssClass":"pl-c1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":19,"cssClass":"pl-c1"},{"start":20,"end":25,"cssClass":"pl-s1"}],[{"start":12,"end":14,"cssClass":"pl-k"},{"start":15,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-en"},{"start":35,"end":38,"cssClass":"pl-s"},{"start":40,"end":43,"cssClass":"pl-c1"},{"start":44,"end":48,"cssClass":"pl-s1"}],[{"start":16,"end":17,"cssClass":"pl-v"},{"start":18,"end":24,"cssClass":"pl-en"},{"start":25,"end":29,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":13,"cssClass":"pl-v"}],[{"start":4,"end":17,"cssClass":"pl-s1"},{"start":18,"end":19,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-c1"},{"start":16,"end":21,"cssClass":"pl-s1"}],[{"start":8,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":37,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"},{"start":13,"end":27,"cssClass":"pl-s"},{"start":28,"end":30,"cssClass":"pl-c1"},{"start":31,"end":35,"cssClass":"pl-s1"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":26,"cssClass":"pl-c1"},{"start":27,"end":31,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":26,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-en"},{"start":13,"end":14,"cssClass":"pl-s1"}],[],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-en"},{"start":24,"end":26,"cssClass":"pl-s1"},{"start":27,"end":34,"cssClass":"pl-en"},{"start":35,"end":45,"cssClass":"pl-s"},{"start":47,"end":59,"cssClass":"pl-s1"},{"start":61,"end":62,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":14,"cssClass":"pl-s1"},{"start":15,"end":22,"cssClass":"pl-en"},{"start":23,"end":33,"cssClass":"pl-s"},{"start":35,"end":36,"cssClass":"pl-s1"},{"start":38,"end":39,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":30,"cssClass":"pl-s1"}],[],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":20,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-k"},{"start":29,"end":30,"cssClass":"pl-s1"},{"start":31,"end":33,"cssClass":"pl-c1"},{"start":34,"end":42,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-v"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":21,"cssClass":"pl-k"},{"start":22,"end":23,"cssClass":"pl-s1"},{"start":24,"end":26,"cssClass":"pl-c1"},{"start":27,"end":35,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-v"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":16,"end":17,"cssClass":"pl-c1"},{"start":19,"end":22,"cssClass":"pl-k"},{"start":23,"end":24,"cssClass":"pl-s1"},{"start":25,"end":27,"cssClass":"pl-c1"},{"start":28,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":23,"cssClass":"pl-s1"},{"start":25,"end":37,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-v"},{"start":46,"end":52,"cssClass":"pl-v"}],[],[{"start":0,"end":41,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":12,"cssClass":"pl-en"},{"start":13,"end":56,"cssClass":"pl-s"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-c1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":17,"end":64,"cssClass":"pl-c"}],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":15,"cssClass":"pl-s1"},{"start":16,"end":23,"cssClass":"pl-en"},{"start":24,"end":27,"cssClass":"pl-s"},{"start":29,"end":31,"cssClass":"pl-s"}],[],[{"start":0,"end":12,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-s1"},{"start":27,"end":32,"cssClass":"pl-v"},{"start":34,"end":40,"cssClass":"pl-v"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":51,"cssClass":"pl-en"},{"start":52,"end":56,"cssClass":"pl-s1"}],[{"start":0,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":9,"end":13,"cssClass":"pl-c1"},{"start":14,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":44,"cssClass":"pl-s1"}],[],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-en"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":34,"end":36,"cssClass":"pl-c1"},{"start":37,"end":40,"cssClass":"pl-en"},{"start":41,"end":47,"cssClass":"pl-v"},{"start":49,"end":51,"cssClass":"pl-c1"},{"start":52,"end":55,"cssClass":"pl-en"},{"start":56,"end":67,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":116,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":40,"cssClass":"pl-s"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":13,"end":24,"cssClass":"pl-s1"}],[],[{"start":0,"end":37,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":30,"cssClass":"pl-s1"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-v"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-c1"}],[],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"}],[{"start":0,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":20,"cssClass":"pl-en"},{"start":21,"end":22,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":41,"cssClass":"pl-s1"},{"start":43,"end":51,"cssClass":"pl-s1"},{"start":53,"end":58,"cssClass":"pl-v"},{"start":60,"end":66,"cssClass":"pl-v"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":24,"cssClass":"pl-s"},{"start":26,"end":36,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-s1"},{"start":42,"end":46,"cssClass":"pl-s1"},{"start":48,"end":51,"cssClass":"pl-s"}],[{"start":4,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":32,"cssClass":"pl-v"},{"start":33,"end":34,"cssClass":"pl-s1"}],[],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":20,"cssClass":"pl-en"},{"start":21,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":41,"end":60,"cssClass":"pl-s1"},{"start":62,"end":68,"cssClass":"pl-s1"},{"start":68,"end":69,"cssClass":"pl-c1"},{"start":69,"end":74,"cssClass":"pl-s1"},{"start":75,"end":76,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":26,"cssClass":"pl-v"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":46,"end":47,"cssClass":"pl-c1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":50,"end":55,"cssClass":"pl-v"},{"start":56,"end":57,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-s1"},{"start":65,"end":68,"cssClass":"pl-k"},{"start":69,"end":71,"cssClass":"pl-s1"},{"start":72,"end":74,"cssClass":"pl-c1"},{"start":75,"end":80,"cssClass":"pl-en"},{"start":81,"end":90,"cssClass":"pl-s1"},{"start":91,"end":97,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-v"},{"start":26,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":46,"cssClass":"pl-c1"},{"start":48,"end":50,"cssClass":"pl-s1"},{"start":53,"end":56,"cssClass":"pl-k"},{"start":57,"end":59,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-en"},{"start":69,"end":78,"cssClass":"pl-s1"},{"start":79,"end":84,"cssClass":"pl-s1"},{"start":85,"end":93,"cssClass":"pl-s1"}],[],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"},{"start":27,"end":45,"cssClass":"pl-c"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":33,"cssClass":"pl-s1"}],[{"start":8,"end":17,"cssClass":"pl-s1"},{"start":18,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-s1"},{"start":45,"end":49,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":53,"end":56,"cssClass":"pl-en"},{"start":57,"end":58,"cssClass":"pl-c1"}],[{"start":8,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":43,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":16,"cssClass":"pl-s"}],[],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":13,"cssClass":"pl-s1"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":22,"cssClass":"pl-en"},{"start":23,"end":28,"cssClass":"pl-v"},{"start":29,"end":30,"cssClass":"pl-s1"}],[{"start":12,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":95,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":30,"cssClass":"pl-en"},{"start":31,"end":40,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":29,"cssClass":"pl-s1"},{"start":30,"end":31,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":48,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":49,"end":68,"cssClass":"pl-s1"},{"start":70,"end":76,"cssClass":"pl-s1"},{"start":76,"end":77,"cssClass":"pl-c1"},{"start":77,"end":82,"cssClass":"pl-s1"},{"start":83,"end":84,"cssClass":"pl-s1"},{"start":87,"end":97,"cssClass":"pl-s1"},{"start":97,"end":98,"cssClass":"pl-c1"},{"start":98,"end":107,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":33,"cssClass":"pl-en"},{"start":34,"end":44,"cssClass":"pl-s1"}],[{"start":12,"end":65,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-c1"},{"start":51,"end":54,"cssClass":"pl-s"},{"start":55,"end":56,"cssClass":"pl-c1"},{"start":57,"end":60,"cssClass":"pl-en"},{"start":61,"end":62,"cssClass":"pl-s1"}],[{"start":12,"end":20,"cssClass":"pl-s1"},{"start":21,"end":27,"cssClass":"pl-en"},{"start":28,"end":37,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":20,"cssClass":"pl-s"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s1"},{"start":21,"end":34,"cssClass":"pl-s1"}],[],[{"start":0,"end":19,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":24,"cssClass":"pl-v"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":18,"end":31,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":34,"end":50,"cssClass":"pl-en"},{"start":51,"end":52,"cssClass":"pl-s1"},{"start":54,"end":64,"cssClass":"pl-s1"},{"start":66,"end":71,"cssClass":"pl-s1"},{"start":73,"end":81,"cssClass":"pl-s1"},{"start":83,"end":88,"cssClass":"pl-v"},{"start":90,"end":96,"cssClass":"pl-v"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":34,"cssClass":"pl-v"},{"start":35,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":48,"end":60,"cssClass":"pl-s1"},{"start":62,"end":70,"cssClass":"pl-s1"},{"start":71,"end":72,"cssClass":"pl-c1"},{"start":73,"end":81,"cssClass":"pl-s1"}],[],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":33,"end":34,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":70,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":43,"end":45,"cssClass":"pl-c1"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":51,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":52,"end":54,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":25,"cssClass":"pl-s"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":25,"cssClass":"pl-en"},{"start":26,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":38,"end":54,"cssClass":"pl-s1"},{"start":55,"end":66,"cssClass":"pl-s1"},{"start":68,"end":77,"cssClass":"pl-s1"},{"start":77,"end":78,"cssClass":"pl-c1"},{"start":78,"end":95,"cssClass":"pl-s"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":65,"cssClass":"pl-s"}]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_user.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_user.py?raw=true","headerInfo":{"blobSize":"6.31 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_user.py","gitLfsPath":null,"onBranch":true,"shortPath":"1872aa0","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_user.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"167","truncatedSloc":"139"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_user.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"get_info","kind":"function","identStart":2086,"identEnd":2094,"extentStart":2082,"extentEnd":2985,"fullyQualifiedName":"get_info","identUtf16":{"start":{"lineNumber":46,"utf16Col":4},"end":{"lineNumber":46,"utf16Col":12}},"extentUtf16":{"start":{"lineNumber":46,"utf16Col":0},"end":{"lineNumber":77,"utf16Col":52}}},{"name":"path","kind":"constant","identStart":3029,"identEnd":3033,"extentStart":3029,"extentEnd":3086,"fullyQualifiedName":"path","identUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":57}}},{"name":"rands","kind":"constant","identStart":3248,"identEnd":3253,"extentStart":3248,"extentEnd":3295,"fullyQualifiedName":"rands","identUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":5}},"extentUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":47}}},{"name":"rebase","kind":"function","identStart":3572,"identEnd":3578,"extentStart":3568,"extentEnd":3937,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":92,"utf16Col":4},"end":{"lineNumber":92,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":92,"utf16Col":0},"end":{"lineNumber":100,"utf16Col":21}}},{"name":"myproblems","kind":"constant","identStart":3939,"identEnd":3949,"extentStart":3939,"extentEnd":3963,"fullyQualifiedName":"myproblems","identUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":24}}},{"name":"problems","kind":"constant","identStart":4140,"identEnd":4148,"extentStart":4140,"extentEnd":4153,"fullyQualifiedName":"problems","identUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":13}}},{"name":"problem_names","kind":"constant","identStart":4154,"identEnd":4167,"extentStart":4154,"extentEnd":4172,"fullyQualifiedName":"problem_names","identUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":18}}},{"name":"generate_problem","kind":"function","identStart":4178,"identEnd":4194,"extentStart":4174,"extentEnd":5505,"fullyQualifiedName":"generate_problem","identUtf16":{"start":{"lineNumber":115,"utf16Col":4},"end":{"lineNumber":115,"utf16Col":20}},"extentUtf16":{"start":{"lineNumber":115,"utf16Col":0},"end":{"lineNumber":143,"utf16Col":34}}},{"name":"mymetaexperiment","kind":"constant","identStart":5707,"identEnd":5723,"extentStart":5707,"extentEnd":5789,"fullyQualifiedName":"mymetaexperiment","identUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":16}},"extentUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":82}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"gb0BMxr-fXyz6kun91_9a83dZUM5fg4_5o-QJh56D-QpssW89P0De0BsQaIBlPFeinKsJigqRzWAI698DzOi2w"},"/repos/preferences":{"post":"EIlhdtGF6ei2TTLwdZupeDLV6yHP0AEgzvpCLVPSf8WEYPTyeAB4ZpYPaebAUB-CDHyStzF02zmywMFEphu6QQ"}}},"title":"simopt/demo_user.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/docs/gasso.rst b/docs/gasso.rst new file mode 100644 index 000000000..a4a86decb --- /dev/null +++ b/docs/gasso.rst @@ -0,0 +1,61 @@ +Solver: Gradient-Based Adaptive Stochastic Search for Simulation Optimization Over Continuous Space (GASSO) +================================================================ + +Description: +------------ +The solver iteratively generates population of candidate solutions from a sample distribution, +and uses the performance of sample distribution to update the sampling dsitribution. +GASSO has two stages in each iteration: +1. Stage I: Generate candidate solutions from some exponential family of distribution, and the +2. Stage II: Evaluate candidate solutions, and update the parameter of sampling distribution via +direct gradient search. + +Scope: +------ +* objective_type: single + +* constraint_type: box + +* variable_type: continuous + +Solver Factors: +--------------- +* crn_across_solns: Use CRN across solutions? + + * Default: True + +* N: Number of candidate solutions + + * Default: :math:`50 * \sqrt(dim)` + +* M: Number of function evaluations per candidate + + * Default: 10 + +* K: Number of iterations + + * Default: Budget/(N * M) + +* alpha_0: Determines the initial step size + + * Default: 50 + +* alpha_c: Determines the speed at which the step size decreases + + * Default: 1500 + +* alpha_p: Determines the rate at which step size gets smaller + + * Default: 0.6 + +* alpha_k: Step size + + * Default: :math:`\frac{alpha_0}{(k + \alpha_c) ^ {alpha_p}}` + + +References: +=========== +This solver is adapted from the article Enlu Zhou, Shalabh Bhatnagar (2018). +Zhou, E., & Bhatnagar, S. (2017). Gradient-based adaptive stochastic search for simulation optimization over continuous space. +*INFORMS Journal on Computing, 30(1), 154-167. +(https://doi.org/10.1287/ijoc.2017.0771) diff --git a/docs/openjackson.rst b/docs/openjackson.rst new file mode 100644 index 000000000..fc26b463d --- /dev/null +++ b/docs/openjackson.rst @@ -0,0 +1,117 @@ +Model: Open Jackson Network +=============================================== + +Description: +------------ +This model represents an Open Jackson Network with Poisson arrival time, exponential service time, and probabilistic routing. + +Sources of Randomness: +---------------------- +There are 3 sources of randomness in this model: +1. Exponential inter-arrival time of customers at each station. +2. Exponential service time of customers at each station. +3. Routing of customers at each station after service. + +Model Factors: +-------------- +* number_queues: The number of queues in the network. + * Default: 3 + +* arrival_alphas: The rate parameter of the exponential distribution for the inter-arrival time of customers at each station. + * Default: [1,1,1,1,1] + +* service_mus: The rate parameter of exponential distribution for the service time of customers at each station. + * Default: [2,2,2,2,2] + +* routing_matrix: The routing probabilities for a customer at station i to go to service j after service. + The departure probability from station i is :math: `1 - \sum_{j=1}^{n} (P_{ij})` + where n is the number of stations, and P is the routing matrix. + * Default: [[0.1, 0.1, 0.2, 0.2, 0], + [0.1, 0.1, 0.2, 0.2, 0], + [0.1, 0.1, 0, 0.1, 0.3], + [0.1, 0.1, 0.1, 0, 0.3], + [0.1, 0.1, 0.1, 0.1, 0.2]] + +* t_end: The time at which the simulation ends. + * Default: 200 + +* warm_up: The time at which the warm-up period ends. Relevant only when steady_state_initialization is False. + * Default: 100 + +* steady_state_initialization: Whether to initialize with queues sampled from steady state. + If so, we sample geometric distribution with parameter lambdas/service_mus for each queue and initialize the queues with the sample. + * Default: True + +Below Factors are only relevant when creating random instances of the Model + +* density_p: The probability of an edge existing in the graph in the random instance. Higher the value, denser the graph. + * Default: 0.5 + +* random_arrival_parameter: The parameter for the random arrival rate exponential distribution when creating a random instance. + * Default: 1 + + +Responses: +---------- +* average_queue_length: The time-average queue length at each station. + +References: +=========== +This model is adapted from Jackson, James R. (1957). +"Networks of waiting lines". Operations Research. 4 (4): 518–521. +(doi:10.1287/opre.5.4.518) + +Optimization Problem: OpenJacksonMinQueue (OPENJACKSON-1) +================================================================ + +Decision Variables: +------------------- +* service_mus + +Objectives: +----------- +Minimize the sum of average queue length at each station. + +Constraints: +------------ +We require that the sum of service_mus at each station to be less than service_rates_budget. + +Problem Factors: +---------------- +* budget: Max # of replications for a solver to take. + + * Default: 1000 + +* service_rates_budget: Total budget to be allocated to service_mus_budget. + + * Default: 150 + +Below factors are only relevant when creating random instances of the Problem + +* gamma_mean: Scale of the mean of gamma distribution when generating service rates upper bound in random instances. + + * Default: 0.5 + +* gamma_scale: Shape of gamma distribution when generating service rates upper bound in random instances. + + * Default: 5 + +Fixed Model Factors: +-------------------- +* N/A + +Starting Solution: +------------------ +* initial_solution: lambdas * (service_rates_budget/sum(lambdas)) + +Random Solutions: +----------------- +Sample a Dirichlet distribution that sum to service_rates_budget - sum(lambdas). Then add lambdas to the sample. + +Optimal Solution: +----------------- +Unknown + +Optimal Objective Function Value: +--------------------------------- +Unknown \ No newline at end of file diff --git a/simopt/base.py b/simopt/base.py index d2cfa12a2..e2e8b47d0 100644 --- a/simopt/base.py +++ b/simopt/base.py @@ -3,11 +3,19 @@ Summary ------- Provide base classes for solvers, problems, and models. +This is the modified version to generate and run random model/random problem instance. """ import numpy as np from copy import deepcopy -from mrg32k3a.mrg32k3a import MRG32k3a +import sys +import os.path as o +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local + +from simopt.auto_diff_util import bi_dict, replicate_wrapper + +sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), ".."))) class Solver(object): @@ -378,7 +386,7 @@ def check_factor_datatype(self, factor_name): is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) return is_right_type - def attach_rngs(self, rng_list): + def attach_rngs(self, random_rng, copy=True): """Attach a list of random-number generators to the problem. Parameters @@ -387,7 +395,25 @@ def attach_rngs(self, rng_list): List of random-number generators used to generate a random initial solution or a random problem instance. """ - self.rng_list = rng_list + if copy: + self.random_rng = [deepcopy(rng) for rng in random_rng] + else: + self.random_rng = random_rng + + def rebase(self, n_reps): + """Rebase the progenitor rngs to start at a later subsubstream index. + + Parameters + ---------- + n_reps : int + Substream index to skip to. + """ + new_rngs = [] + for rng in self.random_rng: + stream_index = rng.s_ss_sss_index[0] + substream_index = rng.s_ss_sss_index[1] + new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps])) + self.random_rng = new_rngs def vector_to_factor_dict(self, vector): """ @@ -622,8 +648,6 @@ def simulate(self, solution, m=1): # to those of deterministic components of objectives. solution.objectives[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_objectives(responses), solution.det_objectives)] if self.gradient_available: - # print(self.response_dict_to_objectives_gradients(vector_gradients)) - # print(solution.det_objectives_gradients) solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives_gradients(vector_gradients), solution.det_objectives_gradients)] # solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives(vector_gradients), solution.det_objectives_gradients)] if self.n_stochastic_constraints > 0: @@ -755,6 +779,21 @@ def check_factor_datatype(self, factor_name): """ is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"]) return is_right_type + + def attach_rng(self, random_rng, copy=True): + """Attach a list of random-number generators to the problem. + + Parameters + ---------- + rng_list : list [``mrg32k3a.mrg32k3a.MRG32k3a``] + List of random-number generators used to generate a random initial solution + or a random problem instance. + """ + # self.random_rng = random_rng + if copy: + self.random_rng = [deepcopy(rng) for rng in random_rng] + else: + self.random_rng = random_rng def replicate(self, rng_list): """Simulate a single replication for the current model factors. @@ -772,6 +811,27 @@ def replicate(self, rng_list): Gradient estimate for each response. """ raise NotImplementedError + + +class Auto_Model(Model): + """ + Subclass of Model. + """ + def __init__(self, fixed_factors): + # set factors of the simulation model + # fill in missing factors with default values + super(Auto_Model, self).__init__(fixed_factors) + self.differentiable_factor_names = [] + for key in self.specifications: + if self.specifications[key]["datatype"] == float: + self.differentiable_factor_names.append(key) + self.bi_dict = bi_dict(self.response_names) + + def innner_replicate(self, rng_list): + raise NotImplementedError + + def replicate(self, rng_list, **kwargs): + return replicate_wrapper(self, rng_list, **kwargs) class Solution(object): @@ -826,7 +886,10 @@ class Solution(object): def __init__(self, x, problem): super().__init__() self.x = x - self.dim = len(x) + if isinstance(x, int) or isinstance(x, float): + self.dim = 1 + else: + self.dim = len(x) self.decision_factors = problem.vector_to_factor_dict(x) self.n_reps = 0 self.det_objectives, self.det_objectives_gradients = problem.deterministic_objectives_and_gradients(self.x) diff --git a/simopt/demo_user.py b/simopt/demo_user.py new file mode 100644 index 000000000..ee336c814 --- /dev/null +++ b/simopt/demo_user.py @@ -0,0 +1 @@ +{"payload":{"allShortcutsEnabled":true,"fileTree":{"":{"items":[{"name":"data_farming_experiments","path":"data_farming_experiments","contentType":"directory"},{"name":"demo","path":"demo","contentType":"directory"},{"name":"dist","path":"dist","contentType":"directory"},{"name":"docs","path":"docs","contentType":"directory"},{"name":"experiments","path":"experiments","contentType":"directory"},{"name":"notebooks","path":"notebooks","contentType":"directory"},{"name":"simopt","path":"simopt","contentType":"directory"},{"name":"simoptlib.egg-info","path":"simoptlib.egg-info","contentType":"directory"},{"name":"workshop","path":"workshop","contentType":"directory"},{"name":".gitignore","path":".gitignore","contentType":"file"},{"name":"LICENSE","path":"LICENSE","contentType":"file"},{"name":"README.md","path":"README.md","contentType":"file"},{"name":"demo_radom_model.py","path":"demo_radom_model.py","contentType":"file"},{"name":"demo_random_problem.py","path":"demo_random_problem.py","contentType":"file"},{"name":"demo_random_problem_solver.py","path":"demo_random_problem_solver.py","contentType":"file"},{"name":"demo_user.py","path":"demo_user.py","contentType":"file"},{"name":"pyproject.toml","path":"pyproject.toml","contentType":"file"}],"totalCount":17}},"fileTreeProcessingTime":2.910284,"foldersToFetch":[],"reducedMotionEnabled":"system","repo":{"id":194012165,"defaultBranch":"master","name":"simopt","ownerLogin":"simopt-admin","currentUserCanPush":true,"isFork":false,"isEmpty":false,"createdAt":"2019-06-26T22:55:30.000-04:00","ownerAvatar":"https://avatars.githubusercontent.com/u/52267122?v=4","public":true,"private":false,"isOrgOwned":false},"symbolsExpanded":false,"treeExpanded":true,"refInfo":{"name":"python_dev_litong","listCacheKey":"v0:1692137230.0","canEdit":true,"refType":"branch","currentOid":"382561d40918dac6fcfb54e7c1f873bdca0f46e9"},"path":"demo_user.py","currentUser":{"id":46491025,"login":"liulitong-Jessie","userEmail":"118010185@link.cuhk.edu.cn"},"blob":{"rawLines":["\"\"\"","This script is the user interface for generating multiple random problem instances and","solve them by specified solvers.","It create problem-solver groups and runs multiple","macroreplications of each problem-solver pair. To run the file, user need","to import the solver and probelm they want to build random instances at the beginning,","and also provide an input file, which include the information needed to ","build random instances (the name of problem, number of random instances to ","generate, and some overriding factors).","\"\"\"","","import sys","import os.path as o","import os","import re","sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), \"..\")))","","# Import the ProblemsSolvers class and other useful functions","from simopt.experiment_base import ProblemsSolvers, plot_solvability_profiles","from rng.mrg32k3a import MRG32k3a","from simopt.base import Solution","from simopt.models.smf import SMF_Max","from simopt.models.rmitd import RMITDMaxRevenue","from simopt.models.san_2 import SANLongestPath, SANLongestPathConstr","from simopt.models.mm1queue import MM1MinMeanSojournTime","","","# !! When testing a new solver/problem, first import problems from the random code file,","# Then create a test_input.txt file in your computer.","# There you should add the import statement and an entry in the file","# You need to specify name of solvers and problems you want to test in the file by 'solver_name'","# And specify the problem related informations by problem = [...]","# All lines start with '#' will be counted as commend and will not be implemented","# See the following example for more details.","","# Ex:","# To create two random instance of SAN and three random instances of SMF:","# In the demo_user.py, modify:","# from simopt.models.smf import SMF_Max","# from simopt.models.san_2 import SANLongestPath","# In the input information file (test_input.txt), include the following lines:","# solver_names = [\"RNDSRCH\", \"ASTRODF\", \"NELDMD\"]","# problem1 = [SANLongestPath, 2, {'num_nodes':8, 'num_arcs':12}]","# problem2 = [SMF_Max, 3, {'num_nodes':7, 'num_arcs':16}]","","# Grab information from the input file","def get_info(path):"," L = []"," with open(path) as file:"," lines = [line.rstrip() for line in file]"," for line in lines:"," if not line.startswith(\"#\") and line:"," L.append(line)"," lines = L"," command_lines = []"," problem_sets = []"," for line in lines:"," if 'import' in line:"," command_lines.append(line)"," elif 'solver_names' in line:"," solver_names = line"," else:"," problem_sets.append(line)",""," for i in command_lines:"," exec(i)"," "," problems = []"," solver_names = eval(re.findall(r'\\[.*?\\]', solver_names)[0])"," for l in problem_sets:"," o = re.findall(r'\\[.*?\\]', l)[0]"," problems.append(eval(o))"," "," problem_sets = [p[0] for p in problems]"," L_num = [p[1] for p in problems]"," L_para = [p[2] for p in problems]"," "," return solver_names, problem_sets, L_num, L_para","","# Read input file and process information","path = input('Please input the path of the input file: ')","if \"'\" in path: # If the input path already has quotation marks"," path = path.replace(\"'\", \"\")"," ","solver_names, problem_set, L_num, L_para = get_info(path)","rands = [True for i in range(len(problem_set))]","","# Check whether the input file is valid","if len(L_num) != len(problem_set) or len(L_para) != len(problem_set):"," print('Invalid input. The input number of random instances does not match with the number of problems you want.')"," print('Please check your input file')","","def rebase(random_rng, n):"," new_rngs = []"," for rng in random_rng:"," stream_index = rng.s_ss_sss_index[0]"," substream_index = rng.s_ss_sss_index[1]"," subsubstream_index = rng.s_ss_sss_index[2]"," new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index + n, subsubstream_index]))"," random_rng = new_rngs"," return random_rng","","myproblems = problem_set","","# Check whether the problem is random","for i in range(len(problem_set)):"," if L_num[i] == 0:"," L_num[i] = 1"," rands[i] = False"," else:"," rands[i] = True","","problems = []","problem_names = []","","def generate_problem(i, myproblems, rands, problems, L_num, L_para):"," print('For problem ', myproblems[i]().name, ':') "," model_fixed_factors = L_para[i]"," "," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i])"," random_rng = [MRG32k3a(s_ss_sss_index=[2, 4 + L_num[i], ss]) for ss in range(myproblem.n_rngs)]"," rng_list2 = [MRG32k3a(s_ss_sss_index=[2, 4, ss]) for ss in range(myproblem.model.n_random)]"," "," if rands[i] == False: # Determinant case"," problems.append(myproblem)"," myproblem.name = str(myproblem.model.name) + str(0)"," problem_names.append(myproblem.name)"," print('')"," "," else:"," for j in range(L_num[i]):"," random_rng = rebase(random_rng, 1) # Advance the substream for different instances"," rng_list2 = rebase(rng_list2, 1)"," name = myproblems[i]"," myproblem = name(model_fixed_factors=model_fixed_factors, random=rands[i], random_rng=rng_list2)"," myproblem.attach_rngs(random_rng)"," # myproblem.name = str(myproblem.model.name) + str(j)"," myproblem.name = str(myproblem.name) + '-' + str(j)"," problems.append(myproblem)"," problem_names.append(myproblem.name)"," print('')"," "," return problems, problem_names"," ","# Generate problems","for i in range(len(L_num)):"," problems, problem_names = generate_problem(i, myproblems, rands, problems, L_num, L_para)","","# Initialize an instance of the experiment class.","mymetaexperiment = ProblemsSolvers(solver_names=solver_names, problems = problems)","","# Run a fixed number of macroreplications of each solver on each problem.","mymetaexperiment.run(n_macroreps=3)","","print(\"Post-processing results.\")","# Run a fixed number of postreplications at all recommended solutions.","mymetaexperiment.post_replicate(n_postreps=20)","# Find an optimal solution x* for normalization.","mymetaexperiment.post_normalize(n_postreps_init_opt=20)","","print(\"Plotting results.\")","# Produce basic plots of the solvers on the problems.","plot_solvability_profiles(experiments=mymetaexperiment.experiments, plot_type=\"cdf_solvability\")","","# Plots will be saved in the folder experiments/plots.","print(\"Finished. Plots can be found in experiments/plots folder.\")"],"stylingDirectives":[[{"start":0,"end":3,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":49,"cssClass":"pl-s"}],[{"start":0,"end":73,"cssClass":"pl-s"}],[{"start":0,"end":86,"cssClass":"pl-s"}],[{"start":0,"end":72,"cssClass":"pl-s"}],[{"start":0,"end":75,"cssClass":"pl-s"}],[{"start":0,"end":39,"cssClass":"pl-s"}],[{"start":0,"end":3,"cssClass":"pl-s"}],[],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":10,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":15,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":6,"cssClass":"pl-k"},{"start":7,"end":9,"cssClass":"pl-s1"}],[{"start":0,"end":3,"cssClass":"pl-s1"},{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":15,"cssClass":"pl-en"},{"start":16,"end":17,"cssClass":"pl-s1"},{"start":18,"end":25,"cssClass":"pl-en"},{"start":26,"end":27,"cssClass":"pl-s1"},{"start":28,"end":32,"cssClass":"pl-en"},{"start":33,"end":34,"cssClass":"pl-s1"},{"start":35,"end":42,"cssClass":"pl-en"},{"start":43,"end":46,"cssClass":"pl-s1"},{"start":47,"end":54,"cssClass":"pl-s1"},{"start":55,"end":63,"cssClass":"pl-s1"},{"start":65,"end":73,"cssClass":"pl-s1"},{"start":76,"end":80,"cssClass":"pl-s"}],[],[{"start":0,"end":61,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":50,"cssClass":"pl-v"},{"start":52,"end":77,"cssClass":"pl-s1"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":8,"cssClass":"pl-s1"},{"start":9,"end":17,"cssClass":"pl-s1"},{"start":18,"end":24,"cssClass":"pl-k"},{"start":25,"end":33,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-k"},{"start":24,"end":32,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":22,"cssClass":"pl-s1"},{"start":23,"end":29,"cssClass":"pl-k"},{"start":30,"end":37,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":47,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-k"},{"start":32,"end":46,"cssClass":"pl-v"},{"start":48,"end":68,"cssClass":"pl-v"}],[{"start":0,"end":4,"cssClass":"pl-k"},{"start":5,"end":11,"cssClass":"pl-s1"},{"start":12,"end":18,"cssClass":"pl-s1"},{"start":19,"end":27,"cssClass":"pl-s1"},{"start":28,"end":34,"cssClass":"pl-k"},{"start":35,"end":56,"cssClass":"pl-v"}],[],[],[{"start":0,"end":88,"cssClass":"pl-c"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":68,"cssClass":"pl-c"}],[{"start":0,"end":96,"cssClass":"pl-c"}],[{"start":0,"end":65,"cssClass":"pl-c"}],[{"start":0,"end":81,"cssClass":"pl-c"}],[{"start":0,"end":45,"cssClass":"pl-c"}],[],[{"start":0,"end":5,"cssClass":"pl-c"}],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":30,"cssClass":"pl-c"}],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":78,"cssClass":"pl-c"}],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":64,"cssClass":"pl-c"}],[{"start":0,"end":57,"cssClass":"pl-c"}],[],[{"start":0,"end":38,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":12,"cssClass":"pl-en"},{"start":13,"end":17,"cssClass":"pl-s1"}],[{"start":4,"end":5,"cssClass":"pl-v"},{"start":6,"end":7,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"},{"start":9,"end":13,"cssClass":"pl-en"},{"start":14,"end":18,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-k"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":31,"end":34,"cssClass":"pl-k"},{"start":35,"end":39,"cssClass":"pl-s1"},{"start":40,"end":42,"cssClass":"pl-c1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":19,"cssClass":"pl-c1"},{"start":20,"end":25,"cssClass":"pl-s1"}],[{"start":12,"end":14,"cssClass":"pl-k"},{"start":15,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-en"},{"start":35,"end":38,"cssClass":"pl-s"},{"start":40,"end":43,"cssClass":"pl-c1"},{"start":44,"end":48,"cssClass":"pl-s1"}],[{"start":16,"end":17,"cssClass":"pl-v"},{"start":18,"end":24,"cssClass":"pl-en"},{"start":25,"end":29,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":13,"cssClass":"pl-v"}],[{"start":4,"end":17,"cssClass":"pl-s1"},{"start":18,"end":19,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":12,"cssClass":"pl-s1"},{"start":13,"end":15,"cssClass":"pl-c1"},{"start":16,"end":21,"cssClass":"pl-s1"}],[{"start":8,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":27,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":37,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"},{"start":13,"end":27,"cssClass":"pl-s"},{"start":28,"end":30,"cssClass":"pl-c1"},{"start":31,"end":35,"cssClass":"pl-s1"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":26,"cssClass":"pl-c1"},{"start":27,"end":31,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-k"}],[{"start":12,"end":24,"cssClass":"pl-s1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":26,"cssClass":"pl-s1"}],[{"start":8,"end":12,"cssClass":"pl-en"},{"start":13,"end":14,"cssClass":"pl-s1"}],[],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-en"},{"start":24,"end":26,"cssClass":"pl-s1"},{"start":27,"end":34,"cssClass":"pl-en"},{"start":35,"end":45,"cssClass":"pl-s"},{"start":47,"end":59,"cssClass":"pl-s1"},{"start":61,"end":62,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":12,"cssClass":"pl-c1"},{"start":13,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":9,"cssClass":"pl-s1"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":12,"end":14,"cssClass":"pl-s1"},{"start":15,"end":22,"cssClass":"pl-en"},{"start":23,"end":33,"cssClass":"pl-s"},{"start":35,"end":36,"cssClass":"pl-s1"},{"start":38,"end":39,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":30,"cssClass":"pl-s1"}],[],[{"start":4,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":20,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-k"},{"start":29,"end":30,"cssClass":"pl-s1"},{"start":31,"end":33,"cssClass":"pl-c1"},{"start":34,"end":42,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-v"},{"start":10,"end":11,"cssClass":"pl-c1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":21,"cssClass":"pl-k"},{"start":22,"end":23,"cssClass":"pl-s1"},{"start":24,"end":26,"cssClass":"pl-c1"},{"start":27,"end":35,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-v"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":16,"end":17,"cssClass":"pl-c1"},{"start":19,"end":22,"cssClass":"pl-k"},{"start":23,"end":24,"cssClass":"pl-s1"},{"start":25,"end":27,"cssClass":"pl-c1"},{"start":28,"end":36,"cssClass":"pl-s1"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":23,"cssClass":"pl-s1"},{"start":25,"end":37,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-v"},{"start":46,"end":52,"cssClass":"pl-v"}],[],[{"start":0,"end":41,"cssClass":"pl-c"}],[{"start":0,"end":4,"cssClass":"pl-s1"},{"start":5,"end":6,"cssClass":"pl-c1"},{"start":7,"end":12,"cssClass":"pl-en"},{"start":13,"end":56,"cssClass":"pl-s"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-s"},{"start":7,"end":9,"cssClass":"pl-c1"},{"start":10,"end":14,"cssClass":"pl-s1"},{"start":17,"end":64,"cssClass":"pl-c"}],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":15,"cssClass":"pl-s1"},{"start":16,"end":23,"cssClass":"pl-en"},{"start":24,"end":27,"cssClass":"pl-s"},{"start":29,"end":31,"cssClass":"pl-s"}],[],[{"start":0,"end":12,"cssClass":"pl-s1"},{"start":14,"end":25,"cssClass":"pl-s1"},{"start":27,"end":32,"cssClass":"pl-v"},{"start":34,"end":40,"cssClass":"pl-v"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":51,"cssClass":"pl-en"},{"start":52,"end":56,"cssClass":"pl-s1"}],[{"start":0,"end":5,"cssClass":"pl-s1"},{"start":6,"end":7,"cssClass":"pl-c1"},{"start":9,"end":13,"cssClass":"pl-c1"},{"start":14,"end":17,"cssClass":"pl-k"},{"start":18,"end":19,"cssClass":"pl-s1"},{"start":20,"end":22,"cssClass":"pl-c1"},{"start":23,"end":28,"cssClass":"pl-en"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":44,"cssClass":"pl-s1"}],[],[{"start":0,"end":39,"cssClass":"pl-c"}],[{"start":0,"end":2,"cssClass":"pl-k"},{"start":3,"end":6,"cssClass":"pl-en"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":34,"end":36,"cssClass":"pl-c1"},{"start":37,"end":40,"cssClass":"pl-en"},{"start":41,"end":47,"cssClass":"pl-v"},{"start":49,"end":51,"cssClass":"pl-c1"},{"start":52,"end":55,"cssClass":"pl-en"},{"start":56,"end":67,"cssClass":"pl-s1"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":116,"cssClass":"pl-s"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":40,"cssClass":"pl-s"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":10,"cssClass":"pl-en"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-s1"}],[{"start":4,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-c1"}],[{"start":4,"end":7,"cssClass":"pl-k"},{"start":8,"end":11,"cssClass":"pl-s1"},{"start":12,"end":14,"cssClass":"pl-c1"},{"start":15,"end":25,"cssClass":"pl-s1"}],[{"start":8,"end":20,"cssClass":"pl-s1"},{"start":21,"end":22,"cssClass":"pl-c1"},{"start":23,"end":26,"cssClass":"pl-s1"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":8,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":29,"cssClass":"pl-s1"},{"start":30,"end":44,"cssClass":"pl-s1"},{"start":45,"end":46,"cssClass":"pl-c1"}],[{"start":8,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-s1"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":32,"cssClass":"pl-v"},{"start":33,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":49,"end":61,"cssClass":"pl-s1"},{"start":63,"end":78,"cssClass":"pl-s1"},{"start":79,"end":80,"cssClass":"pl-c1"},{"start":81,"end":82,"cssClass":"pl-s1"},{"start":84,"end":102,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-s1"}],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":21,"cssClass":"pl-s1"}],[],[{"start":0,"end":10,"cssClass":"pl-s1"},{"start":11,"end":12,"cssClass":"pl-c1"},{"start":13,"end":24,"cssClass":"pl-s1"}],[],[{"start":0,"end":37,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":30,"cssClass":"pl-s1"}],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-v"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-v"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":20,"cssClass":"pl-c1"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"}],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":23,"cssClass":"pl-c1"}],[],[{"start":0,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"}],[{"start":0,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"}],[],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":20,"cssClass":"pl-en"},{"start":21,"end":22,"cssClass":"pl-s1"},{"start":24,"end":34,"cssClass":"pl-s1"},{"start":36,"end":41,"cssClass":"pl-s1"},{"start":43,"end":51,"cssClass":"pl-s1"},{"start":53,"end":58,"cssClass":"pl-v"},{"start":60,"end":66,"cssClass":"pl-v"}],[{"start":4,"end":9,"cssClass":"pl-en"},{"start":10,"end":24,"cssClass":"pl-s"},{"start":26,"end":36,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-s1"},{"start":42,"end":46,"cssClass":"pl-s1"},{"start":48,"end":51,"cssClass":"pl-s"}],[{"start":4,"end":23,"cssClass":"pl-s1"},{"start":24,"end":25,"cssClass":"pl-c1"},{"start":26,"end":32,"cssClass":"pl-v"},{"start":33,"end":34,"cssClass":"pl-s1"}],[],[{"start":4,"end":8,"cssClass":"pl-s1"},{"start":9,"end":10,"cssClass":"pl-c1"},{"start":11,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":16,"end":20,"cssClass":"pl-en"},{"start":21,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":41,"end":60,"cssClass":"pl-s1"},{"start":62,"end":68,"cssClass":"pl-s1"},{"start":68,"end":69,"cssClass":"pl-c1"},{"start":69,"end":74,"cssClass":"pl-s1"},{"start":75,"end":76,"cssClass":"pl-s1"}],[{"start":4,"end":14,"cssClass":"pl-s1"},{"start":15,"end":16,"cssClass":"pl-c1"},{"start":18,"end":26,"cssClass":"pl-v"},{"start":27,"end":41,"cssClass":"pl-s1"},{"start":41,"end":42,"cssClass":"pl-c1"},{"start":43,"end":44,"cssClass":"pl-c1"},{"start":46,"end":47,"cssClass":"pl-c1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":50,"end":55,"cssClass":"pl-v"},{"start":56,"end":57,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-s1"},{"start":65,"end":68,"cssClass":"pl-k"},{"start":69,"end":71,"cssClass":"pl-s1"},{"start":72,"end":74,"cssClass":"pl-c1"},{"start":75,"end":80,"cssClass":"pl-en"},{"start":81,"end":90,"cssClass":"pl-s1"},{"start":91,"end":97,"cssClass":"pl-s1"}],[{"start":4,"end":13,"cssClass":"pl-s1"},{"start":14,"end":15,"cssClass":"pl-c1"},{"start":17,"end":25,"cssClass":"pl-v"},{"start":26,"end":40,"cssClass":"pl-s1"},{"start":40,"end":41,"cssClass":"pl-c1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":45,"end":46,"cssClass":"pl-c1"},{"start":48,"end":50,"cssClass":"pl-s1"},{"start":53,"end":56,"cssClass":"pl-k"},{"start":57,"end":59,"cssClass":"pl-s1"},{"start":60,"end":62,"cssClass":"pl-c1"},{"start":63,"end":68,"cssClass":"pl-en"},{"start":69,"end":78,"cssClass":"pl-s1"},{"start":79,"end":84,"cssClass":"pl-s1"},{"start":85,"end":93,"cssClass":"pl-s1"}],[],[{"start":4,"end":6,"cssClass":"pl-k"},{"start":7,"end":12,"cssClass":"pl-s1"},{"start":13,"end":14,"cssClass":"pl-s1"},{"start":16,"end":18,"cssClass":"pl-c1"},{"start":19,"end":24,"cssClass":"pl-c1"},{"start":27,"end":45,"cssClass":"pl-c"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":17,"end":23,"cssClass":"pl-en"},{"start":24,"end":33,"cssClass":"pl-s1"}],[{"start":8,"end":17,"cssClass":"pl-s1"},{"start":18,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":44,"cssClass":"pl-s1"},{"start":45,"end":49,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":53,"end":56,"cssClass":"pl-en"},{"start":57,"end":58,"cssClass":"pl-c1"}],[{"start":8,"end":21,"cssClass":"pl-s1"},{"start":22,"end":28,"cssClass":"pl-en"},{"start":29,"end":38,"cssClass":"pl-s1"},{"start":39,"end":43,"cssClass":"pl-s1"}],[{"start":8,"end":13,"cssClass":"pl-en"},{"start":14,"end":16,"cssClass":"pl-s"}],[],[{"start":4,"end":8,"cssClass":"pl-k"}],[{"start":8,"end":11,"cssClass":"pl-k"},{"start":12,"end":13,"cssClass":"pl-s1"},{"start":14,"end":16,"cssClass":"pl-c1"},{"start":17,"end":22,"cssClass":"pl-en"},{"start":23,"end":28,"cssClass":"pl-v"},{"start":29,"end":30,"cssClass":"pl-s1"}],[{"start":12,"end":22,"cssClass":"pl-s1"},{"start":23,"end":24,"cssClass":"pl-c1"},{"start":25,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":44,"end":45,"cssClass":"pl-c1"},{"start":48,"end":95,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":30,"cssClass":"pl-en"},{"start":31,"end":40,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"}],[{"start":12,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":29,"cssClass":"pl-s1"},{"start":30,"end":31,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":23,"cssClass":"pl-c1"},{"start":24,"end":28,"cssClass":"pl-en"},{"start":29,"end":48,"cssClass":"pl-s1"},{"start":48,"end":49,"cssClass":"pl-c1"},{"start":49,"end":68,"cssClass":"pl-s1"},{"start":70,"end":76,"cssClass":"pl-s1"},{"start":76,"end":77,"cssClass":"pl-c1"},{"start":77,"end":82,"cssClass":"pl-s1"},{"start":83,"end":84,"cssClass":"pl-s1"},{"start":87,"end":97,"cssClass":"pl-s1"},{"start":97,"end":98,"cssClass":"pl-c1"},{"start":98,"end":107,"cssClass":"pl-s1"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":33,"cssClass":"pl-en"},{"start":34,"end":44,"cssClass":"pl-s1"}],[{"start":12,"end":65,"cssClass":"pl-c"}],[{"start":12,"end":21,"cssClass":"pl-s1"},{"start":22,"end":26,"cssClass":"pl-s1"},{"start":27,"end":28,"cssClass":"pl-c1"},{"start":29,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"},{"start":49,"end":50,"cssClass":"pl-c1"},{"start":51,"end":54,"cssClass":"pl-s"},{"start":55,"end":56,"cssClass":"pl-c1"},{"start":57,"end":60,"cssClass":"pl-en"},{"start":61,"end":62,"cssClass":"pl-s1"}],[{"start":12,"end":20,"cssClass":"pl-s1"},{"start":21,"end":27,"cssClass":"pl-en"},{"start":28,"end":37,"cssClass":"pl-s1"}],[{"start":12,"end":25,"cssClass":"pl-s1"},{"start":26,"end":32,"cssClass":"pl-en"},{"start":33,"end":42,"cssClass":"pl-s1"},{"start":43,"end":47,"cssClass":"pl-s1"}],[{"start":12,"end":17,"cssClass":"pl-en"},{"start":18,"end":20,"cssClass":"pl-s"}],[],[{"start":4,"end":10,"cssClass":"pl-k"},{"start":11,"end":19,"cssClass":"pl-s1"},{"start":21,"end":34,"cssClass":"pl-s1"}],[],[{"start":0,"end":19,"cssClass":"pl-c"}],[{"start":0,"end":3,"cssClass":"pl-k"},{"start":4,"end":5,"cssClass":"pl-s1"},{"start":6,"end":8,"cssClass":"pl-c1"},{"start":9,"end":14,"cssClass":"pl-en"},{"start":15,"end":18,"cssClass":"pl-en"},{"start":19,"end":24,"cssClass":"pl-v"}],[{"start":8,"end":16,"cssClass":"pl-s1"},{"start":18,"end":31,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":34,"end":50,"cssClass":"pl-en"},{"start":51,"end":52,"cssClass":"pl-s1"},{"start":54,"end":64,"cssClass":"pl-s1"},{"start":66,"end":71,"cssClass":"pl-s1"},{"start":73,"end":81,"cssClass":"pl-s1"},{"start":83,"end":88,"cssClass":"pl-v"},{"start":90,"end":96,"cssClass":"pl-v"}],[],[{"start":0,"end":49,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":18,"cssClass":"pl-c1"},{"start":19,"end":34,"cssClass":"pl-v"},{"start":35,"end":47,"cssClass":"pl-s1"},{"start":47,"end":48,"cssClass":"pl-c1"},{"start":48,"end":60,"cssClass":"pl-s1"},{"start":62,"end":70,"cssClass":"pl-s1"},{"start":71,"end":72,"cssClass":"pl-c1"},{"start":73,"end":81,"cssClass":"pl-s1"}],[],[{"start":0,"end":73,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":20,"cssClass":"pl-en"},{"start":21,"end":32,"cssClass":"pl-s1"},{"start":32,"end":33,"cssClass":"pl-c1"},{"start":33,"end":34,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":32,"cssClass":"pl-s"}],[{"start":0,"end":70,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":42,"cssClass":"pl-s1"},{"start":42,"end":43,"cssClass":"pl-c1"},{"start":43,"end":45,"cssClass":"pl-c1"}],[{"start":0,"end":48,"cssClass":"pl-c"}],[{"start":0,"end":16,"cssClass":"pl-s1"},{"start":17,"end":31,"cssClass":"pl-en"},{"start":32,"end":51,"cssClass":"pl-s1"},{"start":51,"end":52,"cssClass":"pl-c1"},{"start":52,"end":54,"cssClass":"pl-c1"}],[],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":25,"cssClass":"pl-s"}],[{"start":0,"end":53,"cssClass":"pl-c"}],[{"start":0,"end":25,"cssClass":"pl-en"},{"start":26,"end":37,"cssClass":"pl-s1"},{"start":37,"end":38,"cssClass":"pl-c1"},{"start":38,"end":54,"cssClass":"pl-s1"},{"start":55,"end":66,"cssClass":"pl-s1"},{"start":68,"end":77,"cssClass":"pl-s1"},{"start":77,"end":78,"cssClass":"pl-c1"},{"start":78,"end":95,"cssClass":"pl-s"}],[],[{"start":0,"end":54,"cssClass":"pl-c"}],[{"start":0,"end":5,"cssClass":"pl-en"},{"start":6,"end":65,"cssClass":"pl-s"}]],"csv":null,"csvError":null,"dependabotInfo":{"showConfigurationBanner":null,"configFilePath":null,"networkDependabotPath":"/simopt-admin/simopt/network/updates","dismissConfigurationNoticePath":"/settings/dismiss-notice/dependabot_configuration_notice","configurationNoticeDismissed":false,"repoAlertsPath":"/simopt-admin/simopt/security/dependabot","repoSecurityAndAnalysisPath":"/simopt-admin/simopt/settings/security_analysis","repoOwnerIsOrg":false,"currentUserCanAdminRepo":false},"displayName":"demo_user.py","displayUrl":"https://github.com/simopt-admin/simopt/blob/python_dev_litong/demo_user.py?raw=true","headerInfo":{"blobSize":"6.31 KB","deleteInfo":{"deleteTooltip":"Delete this file"},"editInfo":{"editTooltip":"Edit this file"},"ghDesktopPath":"x-github-client://openRepo/https://github.com/simopt-admin/simopt?branch=python_dev_litong&filepath=demo_user.py","gitLfsPath":null,"onBranch":true,"shortPath":"1872aa0","siteNavLoginPath":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsimopt-admin%2Fsimopt%2Fblob%2Fpython_dev_litong%2Fdemo_user.py","isCSV":false,"isRichtext":false,"toc":null,"lineInfo":{"truncatedLoc":"167","truncatedSloc":"139"},"mode":"file"},"image":false,"isCodeownersFile":null,"isPlain":false,"isValidLegacyIssueTemplate":false,"issueTemplateHelpUrl":"https://docs.github.com/articles/about-issue-and-pull-request-templates","issueTemplate":null,"discussionTemplate":null,"language":"Python","languageID":303,"large":false,"loggedIn":true,"newDiscussionPath":"/simopt-admin/simopt/discussions/new","newIssuePath":"/simopt-admin/simopt/issues/new","planSupportInfo":{"repoIsFork":null,"repoOwnedByCurrentUser":null,"requestFullPath":"/simopt-admin/simopt/blob/python_dev_litong/demo_user.py","showFreeOrgGatedFeatureMessage":null,"showPlanSupportBanner":null,"upgradeDataAttributes":null,"upgradePath":null},"publishBannersInfo":{"dismissActionNoticePath":"/settings/dismiss-notice/publish_action_from_dockerfile","dismissStackNoticePath":"/settings/dismiss-notice/publish_stack_from_file","releasePath":"/simopt-admin/simopt/releases/new?marketplace=true","showPublishActionBanner":false,"showPublishStackBanner":false},"renderImageOrRaw":false,"richText":null,"renderedFileInfo":null,"shortPath":null,"tabSize":8,"topBannersInfo":{"overridingGlobalFundingFile":false,"globalPreferredFundingPath":null,"repoOwner":"simopt-admin","repoName":"simopt","showInvalidCitationWarning":false,"citationHelpUrl":"https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-citation-files","showDependabotConfigurationBanner":null,"actionsOnboardingTip":null},"truncated":false,"viewable":true,"workflowRedirectUrl":null,"symbols":{"timedOut":false,"notAnalyzed":false,"symbols":[{"name":"get_info","kind":"function","identStart":2086,"identEnd":2094,"extentStart":2082,"extentEnd":2985,"fullyQualifiedName":"get_info","identUtf16":{"start":{"lineNumber":46,"utf16Col":4},"end":{"lineNumber":46,"utf16Col":12}},"extentUtf16":{"start":{"lineNumber":46,"utf16Col":0},"end":{"lineNumber":77,"utf16Col":52}}},{"name":"path","kind":"constant","identStart":3029,"identEnd":3033,"extentStart":3029,"extentEnd":3086,"fullyQualifiedName":"path","identUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":4}},"extentUtf16":{"start":{"lineNumber":80,"utf16Col":0},"end":{"lineNumber":80,"utf16Col":57}}},{"name":"rands","kind":"constant","identStart":3248,"identEnd":3253,"extentStart":3248,"extentEnd":3295,"fullyQualifiedName":"rands","identUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":5}},"extentUtf16":{"start":{"lineNumber":85,"utf16Col":0},"end":{"lineNumber":85,"utf16Col":47}}},{"name":"rebase","kind":"function","identStart":3572,"identEnd":3578,"extentStart":3568,"extentEnd":3937,"fullyQualifiedName":"rebase","identUtf16":{"start":{"lineNumber":92,"utf16Col":4},"end":{"lineNumber":92,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":92,"utf16Col":0},"end":{"lineNumber":100,"utf16Col":21}}},{"name":"myproblems","kind":"constant","identStart":3939,"identEnd":3949,"extentStart":3939,"extentEnd":3963,"fullyQualifiedName":"myproblems","identUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":10}},"extentUtf16":{"start":{"lineNumber":102,"utf16Col":0},"end":{"lineNumber":102,"utf16Col":24}}},{"name":"problems","kind":"constant","identStart":4140,"identEnd":4148,"extentStart":4140,"extentEnd":4153,"fullyQualifiedName":"problems","identUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":8}},"extentUtf16":{"start":{"lineNumber":112,"utf16Col":0},"end":{"lineNumber":112,"utf16Col":13}}},{"name":"problem_names","kind":"constant","identStart":4154,"identEnd":4167,"extentStart":4154,"extentEnd":4172,"fullyQualifiedName":"problem_names","identUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":13}},"extentUtf16":{"start":{"lineNumber":113,"utf16Col":0},"end":{"lineNumber":113,"utf16Col":18}}},{"name":"generate_problem","kind":"function","identStart":4178,"identEnd":4194,"extentStart":4174,"extentEnd":5505,"fullyQualifiedName":"generate_problem","identUtf16":{"start":{"lineNumber":115,"utf16Col":4},"end":{"lineNumber":115,"utf16Col":20}},"extentUtf16":{"start":{"lineNumber":115,"utf16Col":0},"end":{"lineNumber":143,"utf16Col":34}}},{"name":"mymetaexperiment","kind":"constant","identStart":5707,"identEnd":5723,"extentStart":5707,"extentEnd":5789,"fullyQualifiedName":"mymetaexperiment","identUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":16}},"extentUtf16":{"start":{"lineNumber":150,"utf16Col":0},"end":{"lineNumber":150,"utf16Col":82}}}]}},"copilotInfo":{"notices":{"codeViewPopover":{"dismissed":false,"dismissPath":"/settings/dismiss-notice/code_view_copilot_popover"}},"userAccess":{"accessAllowed":false,"hasSubscriptionEnded":false,"orgHasCFBAccess":false,"userHasCFIAccess":false,"userHasOrgs":false,"userIsOrgAdmin":false,"userIsOrgMember":false,"business":null,"featureRequestInfo":null}},"csrf_tokens":{"/simopt-admin/simopt/branches":{"post":"gb0BMxr-fXyz6kun91_9a83dZUM5fg4_5o-QJh56D-QpssW89P0De0BsQaIBlPFeinKsJigqRzWAI698DzOi2w"},"/repos/preferences":{"post":"EIlhdtGF6ei2TTLwdZupeDLV6yHP0AEgzvpCLVPSf8WEYPTyeAB4ZpYPaebAUB-CDHyStzF02zmywMFEphu6QQ"}}},"title":"simopt/demo_user.py at python_dev_litong · simopt-admin/simopt"} \ No newline at end of file diff --git a/simopt/experiment_base.py b/simopt/experiment_base.py index bdabf48c1..a325ec3fb 100644 --- a/simopt/experiment_base.py +++ b/simopt/experiment_base.py @@ -4,6 +4,7 @@ ------- Provide base classes for problem-solver pairs and helper functions for reading/writing data and plotting. +This is the modified version to generate and solve random problem instances by solvers. """ import numpy as np @@ -15,8 +16,8 @@ import importlib import time import os -from mrg32k3a.mrg32k3a import MRG32k3a - +# from mrg32k3a.mrg32k3a import MRG32k3a +from rng.mrg32k3a import MRG32k3a #when do the multinomial, change to the local from .base import Solution from .directory import solver_directory, problem_directory @@ -464,7 +465,7 @@ def run(self, n_macroreps): for mrep in range(self.n_macroreps): print(f"Running macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.") # Create, initialize, and attach RNGs used for simulating solutions. - progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 3, ss, 0]) for ss in range(self.problem.model.n_rngs)] + progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 2, ss, 0]) for ss in range(self.problem.model.n_rngs)] self.solver.solution_progenitor_rngs = progenitor_rngs # print([rng.s_ss_sss_index for rng in progenitor_rngs]) # Run the solver on the problem. @@ -524,7 +525,6 @@ def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macrorep self.all_post_replicates = [[[] for _ in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)] # Simulate intermediate recommended solutions. for mrep in range(self.n_macroreps): - print(f"Postreplicating macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.") for budget_index in range(len(self.all_intermediate_budgets[mrep])): x = self.all_recommended_xs[mrep][budget_index] fresh_soln = Solution(x, self.problem) @@ -760,7 +760,7 @@ def log_experiment_results(self, print_solutions=True): new_path = self.file_name_path.replace("outputs", "logs") # Adjust file_path_name to correct folder. new_path2 = new_path.replace(".pickle", "") # Remove .pickle from .txt file name. - # Create directories if they do not exist. + # Create directories if they do no exist. if "./experiments/logs" in new_path2 and not os.path.exists("./experiments/logs"): os.makedirs("./experiments", exist_ok=True) os.makedirs("./experiments/logs") @@ -799,10 +799,7 @@ def log_experiment_results(self, print_solutions=True): # and how many replications were taken of them (n_postreps_init_opt). if self.check_postnormalize(): file.write(f"The initial solution is {tuple([round(x, 4) for x in self.x0])}. Its estimated objective is {round(np.mean(self.x0_postreps), 4)}.\n") - if self.xstar is None: - file.write(f"No proxy optimal solution was used. A proxy optimal objective function value of {self.problem.optimal_value[0]} was provided.\n") - else: - file.write(f"The proxy optimal solution is {tuple([round(x, 4) for x in self.xstar])}. Its estimated objective is {round(np.mean(self.xstar_postreps), 4)}.\n") + file.write(f"The proxy optimal solution is {tuple([round(x, 4) for x in self.xstar])}. Its estimated objective is {round(np.mean(self.xstar_postreps), 4)}.\n") file.write(f"{self.n_postreps_init_opt} postreplications were taken at x0 and x_star.\n\n") # Display recommended solution at each budget value for each macroreplication. file.write('Macroreplication Results:\n') @@ -899,7 +896,6 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p elif getattr(experiment, "n_postreps", None) != getattr(ref_experiment, "n_postreps", None): print("At least two experiments have different numbers of post-replications.") print("Estimation of optimal solution x* may be based on different numbers of post-replications.") - print(f"Postnormalizing on Problem {ref_experiment.problem.name}.") # Take post-replications at common x0. # Create, initialize, and attach RNGs for model. # Stream 0: reserved for post-replications. @@ -920,17 +916,11 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p # objective function value. If deterministic (proxy for) f(x*), # create duplicate post-replicates to facilitate later bootstrapping. # If proxy for f(x*) is specified... - print("Finding f(x*)...") if proxy_opt_val is not None: - if proxy_opt_x is None: - xstar = None - else: - xstar = proxy_opt_x # Assuming the provided x is optimal in this case. - print("\t...using provided proxy f(x*).") + xstar = None xstar_postreps = [proxy_opt_val] * n_postreps_init_opt # ...else if proxy for x* is specified... elif proxy_opt_x is not None: - print("\t...using provided proxy x*.") xstar = proxy_opt_x # Take post-replications at xstar. opt_soln = Solution(xstar, ref_experiment.problem) @@ -939,14 +929,10 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective # ...else if f(x*) is known... elif ref_experiment.problem.optimal_value is not None: - print("\t...using coded f(x*).") xstar = None - # NOTE: optimal_value is a tuple. - # Currently hard-coded for single objective case, i.e., optimal_value[0]. - xstar_postreps = [ref_experiment.problem.optimal_value[0]] * n_postreps_init_opt + xstar_postreps = [ref_experiment.problem.optimal_value] * n_postreps_init_opt # ...else if x* is known... elif ref_experiment.problem.optimal_solution is not None: - print("\t...using coded x*.") xstar = ref_experiment.problem.optimal_solution # Take post-replications at xstar. opt_soln = Solution(xstar, ref_experiment.problem) @@ -956,7 +942,6 @@ def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, p # ...else determine x* empirically as estimated best solution # found by any solver on any macroreplication. else: - print("\t...using best postreplicated solution as proxy for x*.") # TO DO: Simplify this block of code. best_est_objectives = np.zeros(len(experiments)) for experiment_idx in range(len(experiments)): @@ -1079,7 +1064,7 @@ def bootstrap_procedure(experiments, n_bootstraps, conf_level, plot_type, beta=N "quantile_solvability" : quantile solvability profile; "diff_cdf_solvability" : difference of cdf solvability profiles; - + "diff_quantile_solvability" : difference of quantile solvability profiles. beta : float, optional Quantile to plot, e.g., beta quantile; in (0, 1). @@ -1155,7 +1140,7 @@ def functional_of_curves(bootstrap_curves, plot_type, beta=0.5, solve_tol=0.1): "solve_time_quantile" : beta quantile of solve time; "solve_time_cdf" : cdf of solve time; - + "cdf_solvability" : cdf solvability profile; "quantile_solvability" : quantile solvability profile; @@ -2036,7 +2021,7 @@ def plot_terminal_progress(experiments, plot_type="violin", normalize=True, all_ ProblemSolver pairs of different solvers on a common problem. plot_type : str, default="violin" String indicating which type of plot to produce: - + "box" : comparative box plots; "violin" : comparative violin plots. @@ -2469,6 +2454,14 @@ def __init__(self, solver_names=None, problem_names=None, solver_renames=None, p self.problem_names = [problem.name for problem in self.problems] self.n_solvers = len(self.solvers) self.n_problems = len(self.problems) + elif solvers is None and problems is not None: # Method by providing solver and problem names + self.experiments = [[ProblemSolver(solver_name=solver_name, problem=problem) for problem in problems] for solver_name in solver_names] + self.solvers = [solver_directory[solver_name](name=solver_name) for solver_name in solver_names] + self.solver_names = solver_names + self.problems = problems + self.problem_names = [problem.name for problem in self.problems] + self.n_solvers = len(self.solvers) + self.n_problems = len(self.problems) else: # Method #1 if solver_renames is None: self.solver_names = solver_names @@ -2622,56 +2615,6 @@ def record_group_experiment_results(self): with open(self.file_name_path, "wb") as file: pickle.dump(self, file, pickle.HIGHEST_PROTOCOL) - def log_group_experiment_results(self): - """Create readable .txt file describing the solvers and problems that make up the ProblemSolvers object. - """ - # Create a new text file in experiments/logs folder with correct name. - new_path = self.file_name_path.replace("outputs", "logs") # Adjust file_path_name to correct folder. - new_path = new_path.replace(".pickle", "") # Remove .pickle from .txt file name. - - # Create directories if they do no exist. - if "./experiments/logs" in new_path and not os.path.exists("./experiments/logs"): - os.makedirs("./experiments", exist_ok=True) - os.makedirs("./experiments/logs") - # Create text file. - with open(new_path + "_group_experiment_results.txt", "w") as file: - # Title text file with experiment information. - file.write(self.file_name_path) - file.write('\n') - # Write the name of each problem. - file.write("----------------------------------------------------------------------------------------------") - file.write("\nProblems:\n\n") - for i in range(self.n_problems): - file.write(f"{self.problem_names[i]}\n\t") - # Write model factors for each problem. - file.write("Model Factors:\n") - for key, value in self.problems[i].model.factors.items(): - # Excluding model factors corresponding to decision variables. - if key not in self.problems[i].model_decision_factors: - file.write(f"\t\t{key}: {value}\n") - # Write problem factors for each problem. - file.write("\n\tProblem Factors:\n") - for key, value in self.problems[i].factors.items(): - file.write(f"\t\t{key}: {value}\n") - file.write("\n") - file.write("----------------------------------------------------------------------------------------------") - # Write the name of each Solver. - file.write("\nSolvers:\n\n") - # Write solver factors for each solver. - for j in range(self.n_solvers): - file.write(f"{self.solver_names[j]}\n\t") - file.write("Solver Factors:\n") - for key, value in self.solvers[i].factors.items(): - file.write(f"\t\t{key}: {value}\n") - file.write("\n") - file.write("----------------------------------------------------------------------------------------------") - # Write the name of pickle files for each Problem-Solver pair. - file.write("\nThe .pickle files for the associated Problem-Solver pairs are:\n") - for p in self.problem_names: - for s in self.solver_names: - file.write(f"\t{s}_on_{p}.pickle\n") - file.close() - def read_group_experiment_results(file_name_path): """Read in ``experiment_base.ProblemsSolvers`` object from .pickle file. diff --git a/simopt/models/cascade.py b/simopt/models/cascade.py new file mode 100644 index 000000000..c91d2aeed --- /dev/null +++ b/simopt/models/cascade.py @@ -0,0 +1,648 @@ +""" +Summary +------- +Simulate a progressive cascade process in an infinite time horizon. +`here `_. + +""" +import numpy as np +import networkx as nx +import cvxpy as cp + +from ..base import Model, Problem + + +class Cascade(Model): + """ + Simulate a progressive cascade process in an infinite time horizon. + + Attributes + ---------- + name : str + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI, data validation, and defaults) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + ---------- + fixed_factors : dict + fixed_factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = "CASCADE" + self.n_rngs = 2 + self.n_responses = 1 + self.factors = fixed_factors + self.G = nx.read_graphml('/Users/liulitong/Desktop/simopt-1/DAG.graphml') + self.num_nodes = len(self.G) + self.specifications = { + "num_subgraph": { + "description": "number of subgraphs to generate", + "datatype": int, + "default": 10 + }, + "init_prob": { + "description": "probability of initiating the nodes", + "datatype": np.ndarray, + "default": 0.1 * np.ones(self.num_nodes) + } + } + + self.check_factor_list = { + "num_subgraph": self.check_num_subgraph, + "init_prob": self.check_init_prob, + } + # Set factors of the simulation model + super().__init__(fixed_factors) + + # Check for simulatable factors + def check_num_subgraph(self): + return self.factors["num_subgraph"] > 0 + + def check_init_prob(self): + return np.all(self.factors["init_prob"] >= 0) + + def check_simulatable_factors(self): + return True + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : [list] [mrg32k3a.mrg32k3a.MRG32k3a] + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "mean_num_activated" = Mean number of activated nodes + """ + # Designate random number generators. + seed_rng = rng_list[0] + activate_rng = rng_list[1] + + nodes = list(self.G.nodes) + num_lst = [] + for _ in range(self.factors["num_subgraph"]): + # Create seed nodes. + seeds = [nodes[j] for j in range(self.num_nodes) if seed_rng.uniform(0, 1) < self.factors["init_prob"][j]] + # Set all nodes as not activated. + activated = set() + # Add the seed nodes to the activated set. + activated.update(set(seeds)) + # Initialize the newly activated nodes list with the seed nodes. + newly_activated = set(seeds) + + # Run the model until there are no more newly activated nodes. + while len(newly_activated) != 0: + temp_activated = set() + for v in newly_activated: + # Check for each successor if it gets activated. + for w in self.G.successors(v): + if w not in activated: + u = activate_rng.uniform(0, 1) + if u < self.G[v][w]["weight"]: + temp_activated.add(w) + # Add newly activated nodes to the activated set. + newly_activated = temp_activated + activated.update(newly_activated) + + + num_activated = len(activated) + num_lst.append(num_activated) + + + # Calculate responses from simulation data. + responses = {"mean_num_activated": np.mean(num_lst) + } + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + return responses, gradients + + +""" +Summary +------- +Maximize the expected number of activated nodes. +""" + +class CascadeMax(Problem): + """ + Class to make network cascade simulation-optimization problems. + + Attributes + ---------- + name : str + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : str + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : str + description of variable types: + "discrete", "continuous", "mixed" + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : float + optimal objective function value + optimal_solution : tuple + optimal solution + model : base.Model + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : [list] [mrg32k3a.mrg32k3a.MRG32k3a] + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : tuple + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name of problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="CASCADE-1", fixed_factors=None, model_fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1,) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = False + self.optimal_value = None + self.optimal_solution = None + self.G = nx.read_graphml('/Users/liulitong/Desktop/simopt-1/DAG.graphml') + self.model_default_factors = {} + self.model_decision_factors = {"init_prob"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": tuple(0.001 * np.ones(len(self.G))) + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 10000 + }, + "B": { + "description": "budget for the activation costs", + "datatype": int, + "default": 200 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and overwritten defaults. + self.model = Cascade(self.model_fixed_factors) + self.dim = len(self.model.G) + self.lower_bounds = (0,) * self.dim + self.upper_bounds = (1,) * self.dim + self.Ci = np.array([self.model.G.nodes[node]["cost"] for node in self.model.G.nodes()]) + self.Ce = None + self.di = np.array([self.factors["B"]]) + self.de = None + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dict + dictionary with factor keys and associated values + """ + factor_dict = { + "init_prob": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dict + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = (factor_dict["init_prob"],) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dict + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["mean_num_activated"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dict + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,),) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints + for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of + stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic + constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + return np.dot(self.Ci, x) <= self.factors["B"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + + # Upper bound and lower bound. + lower_bound = np.array(self.lower_bounds) + upper_bound = np.array(self.upper_bounds) + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = self.Ci + di = self.di + Ce = self.Ce + de = self.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, self.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Hit and Run + start_pt = self.find_feasible_initial(None, C, None, d) + tol = 1e-6 + + x = start_pt + # Generate the markov chain for sufficiently long. + for _ in range(20): + # Generate a random direction to travel. + direction = np.array([rand_sol_rng.uniform(0, 1) for _ in range(self.dim)]) + direction = direction / np.linalg.norm(direction) + + dir = direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + + dir = -direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star2 = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star2: + s_star2 = s + + # Generate random point between lambdas. + lam = rand_sol_rng.uniform(-1 * min(1, s_star2), min(1, s_star)) + + # Compute the new point. + x += lam * direction + + x= tuple(x) + return x + + + def get_multiple_random_solution(self, rand_sol_rng, n_samples): + """ + Generate multiple random solutions for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + n_samples: int + number of random solutions to generate + + Returns + ------- + xs : list[tuple] + list of vectors of decision variables + """ + + # Upper bound and lower bound. + lower_bound = np.array(self.lower_bounds) + upper_bound = np.array(self.upper_bounds) + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = self.Ci + di = self.di + Ce = self.Ce + de = self.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, self.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Hit and Run + start_pt = self.find_feasible_initial(None, self.Ci, None, self.di) + xs = [] + x = start_pt + tol = 1e-6 + + # Generate the markov chain for sufficiently long. + for _ in range(20 + n_samples): + # Generate a random direction to travel. + direction = np.array([rand_sol_rng.uniform(0, 1) for _ in range(self.dim)]) + direction = direction / np.linalg.norm(direction) + + dir = direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + + dir = -direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star2 = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star2: + s_star2 = s + + # Generate random point between lambdas. + lam = rand_sol_rng.uniform(-1 * min(1, s_star2), min(1, s_star)) + + # Compute the new point. + x += lam * direction + + xs.append(tuple(x)) + + return xs[: -n_samples] + + + def find_feasible_initial(self, Ae, Ai, be, bi): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(self.upper_bounds) + lower_bound = np.array(self.lower_bounds) + + # Define decision variables. + x = cp.Variable(self.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound)) + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + + return x0 + + diff --git a/simopt/models/network.py b/simopt/models/network.py index f2f2f8582..448aa968c 100644 --- a/simopt/models/network.py +++ b/simopt/models/network.py @@ -49,12 +49,13 @@ def __init__(self, fixed_factors=None): "process_prob": { "description": "probability that a message will go through a particular network i", "datatype": list, - "default": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] + # "default": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] + "default": [1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10, 1/10] }, "cost_process": { "description": "message processing cost of network i", "datatype": list, - "default": [1, 1 / 2, 1 / 3, 1 / 4, 1 / 5, 1 / 6, 1 / 7, 1 / 8, 1 / 9, 1 / 10] + "default": [1, 1 / 2, 1 / 3, 1 / 4, 1 / 5, 1 / 6, 1 / 7, 1 / 8, 1 / 9, 1 / 10] # Random }, "cost_time": { "description": "cost for the length of time a message spends in a network i per each unit of time", @@ -64,22 +65,22 @@ def __init__(self, fixed_factors=None): "mode_transit_time": { "description": "mode time of transit for network i following a triangular distribution", "datatype": list, - "default": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + "default": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # Random }, "lower_limits_transit_time": { "description": "lower limits for the triangular distribution for the transit time", "datatype": list, - "default": [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5] + "default": [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5] # Random }, "upper_limits_transit_time": { "description": "upper limits for the triangular distribution for the transit time", "datatype": list, - "default": [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5] + "default": [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5] # Random }, "arrival_rate": { "description": "arrival rate of messages following a Poisson process", "datatype": float, - "default": 1.0 + "default": 1.0 # Random }, "n_messages": { "description": "number of messages that arrives and needs to be routed", @@ -183,7 +184,9 @@ def replicate(self, rng_list): # Generate all interarrival, network routes, and service times before the simulation run. arrival_times = [arrival_rng.expovariate(self.factors["arrival_rate"]) for _ in range(total_arrivals)] - network_routes = network_rng.choices(range(self.factors["n_networks"]), weights=self.factors["process_prob"], k=total_arrivals) + network_routes = network_rng.choices(range(self.factors["n_networks"]), weights=self.factors['process_prob'], k=total_arrivals) + # print(self.factors['process_prob']) + # print(len(range(self.factors['n_networks'])), len(list(self.factors['process_prob'])), np.sum(self.factors['process_prob'])) service_times = [transit_rng.triangular(low=self.factors["lower_limits_transit_time"][network_routes[i]], high=self.factors["upper_limits_transit_time"][network_routes[i]], mode=self.factors["mode_transit_time"][network_routes[i]]) @@ -268,7 +271,7 @@ class NetworkMinTotalCost(Problem): upper bound for each decision variable gradient_available : bool indicates if gradient of objective function is available - optimal_value : tuple + optimal_value : float optimal objective function value optimal_solution : tuple optimal solution @@ -340,6 +343,10 @@ def __init__(self, name="NETWORK-1", fixed_factors=None, model_fixed_factors=Non self.dim = self.model.factors["n_networks"] self.lower_bounds = tuple([0 for _ in range(self.model.factors["n_networks"])]) self.upper_bounds = tuple([1 for _ in range(self.model.factors["n_networks"])]) + self.Ci = None + self.Ce = np.array([1 for _ in range(self.model.factors["n_networks"])]) #None + self.di = None + self.de = np.array([1]) #None def vector_to_factor_dict(self, vector): """ diff --git a/simopt/models/openjackson.py b/simopt/models/openjackson.py new file mode 100644 index 000000000..de5413355 --- /dev/null +++ b/simopt/models/openjackson.py @@ -0,0 +1,1086 @@ +""" +Summary +------- +Simulate an open jackson network +""" +import autograd.numpy as np +import math as math +from collections import deque +from ..auto_diff_util import bi_dict, replicate_wrapper, factor_dict, resp_dict_to_array + +from ..base import Model, Problem + +# generates an erdos renyi graph where each subgraph has an exit +def erdos_renyi(rng, n, p, directed = True): + graph = np.zeros((n,n+1)) + for i in range(n): + for j in range(n+1): + prob = rng.uniform(0,1) + if prob < p: + graph[i][j] = 1 + if not directed: + graph = np.triu(graph) + + #check for exits in each subgraph if there are not valid exits + # then create a new erdos_renyi graph until one is valid + has_exit = set() + checked = False + while(not checked): + numexitable = len(has_exit) + for i in range(n): + if (graph[i][-1]) == 1: + has_exit.add(i) + # print("add original", has_exit) + if len(has_exit) > 0: + has_exit2 = [] + for j in has_exit: + if graph[i][j] == 1 : + has_exit2 += [i] + for a in has_exit2: + has_exit.add(a) + # print("add adjacent", has_exit) + afternumexitable = len(has_exit) + checked = (afternumexitable == n or numexitable == afternumexitable) + # if the graph has nodes that have no path out then add a path out to those nodes + if len(has_exit) != n: + for x in set(range(n)).difference(has_exit): + graph[x][-1] = 1 + + return graph + + +class OpenJackson(Model): + """ + A model of an open jackson network . + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI, data validation, and defaults) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : dict + fixed_factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None, random = False): + if fixed_factors is None: + fixed_factors = {} + self.name = "OPENJACKSON" + self.n_responses = 2 + self.random = random + self.n_random = 2 # Number of rng used for the random instance + # random instance factors: number_queues, arrival_alphas, service_mus, routing_matrix + + self.factors = fixed_factors + self.specifications = { + "number_queues": { + "description": "The number of queues in the network", + "datatype": int, + "default": 5 + }, + "arrival_alphas": { + "description": "The arrival rates to each queue from outside the network", + "datatype": tuple, + "default": (2,3,2,4,3) + }, + "service_mus": { + "description": "The mu values for the exponential service times ", + "datatype": tuple, + "default": (11,11,11,11,11) + }, + "routing_matrix": { + "description": "The routing matrix that describes the probabilities of moving to the next queue after leaving the current one", + "datatype": list, + "default": [[0.1, 0.1, 0.2, 0.2, 0], + [0.1, 0.1, 0.2, 0.2, 0], + [0.2, 0.1, 0, 0.1, 0.2], + [0.1, 0.1, 0.1, 0, 0.2], + [0.1, 0.1, 0.1, 0.1, 0.2]] + }, + "t_end": { + "description": "A number of replications to run", + "datatype": int, + "default": 200 + }, + "warm_up": { + "description": "A number of replications to use as a warm up period", + "datatype": int, + "default": 0 + }, + "steady_state_initialization":{ + "description": "Whether the model will be initialized with steady state values", + "datatype": bool, + "default": False + }, + "density_p":{ + "description": "The probability of an edge existing in the graph in the random instance", + "datatype": float, + "default": 0.5 + }, + "random_arrival_parameter":{ + "description": "The parameter for the random arrival rate exponential distribution when creating a random instance", + "datatype": float, + "default": 1 + } + + + } + self.check_factor_list = { + "number_queues": self.check_number_queues, + "arrival_alphas": self.check_arrival_alphas, + "routing_matrix": self.check_routing_matrix, + "service_mus": self.check_service_mus, + "t_end": self.check_t_end, + "warm_up": self.check_warm_up, + "steady_state_initialization": self.check_steady_state_initialization, + "density_p": self.check_density_p, + "random_arrival_parameter": self.check_random_arrival_parameter + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + self.n_rngs = 3 * (self.factors["number_queues"] + 1) + + + + def check_number_queues(self): + return self.factors["number_queues"]>=0 + def check_arrival_alphas(self): + return all(x >= 0 for x in self.factors["arrival_alphas"]) + def check_service_mus(self): + lambdas = self.calc_lambdas() + return all(x >= 0 for x in self.factors["service_mus"]) and all(self.factors['service_mus'][i] > lambdas[i] for i in range(self.factors["number_queues"])) + def check_routing_matrix(self): + transition_sums = list(map(sum, self.factors["routing_matrix"])) + if all([len(row) == len(self.factors["routing_matrix"]) for row in self.factors["routing_matrix"]]) & \ + all(transition_sums[i] <= 1 for i in range(self.factors["number_queues"])): + return True + else: + return False + def check_t_end(self): + return self.factors["t_end"] >= 0 + def check_warm_up(self): + # Assume f(x) can be evaluated at any x in R^d. + return self.factors["warm_up"] >= 0 + def check_steady_state_initialization(self): + return isinstance(self.factors["steady_state_initialization"], bool) + def check_density_p(self): + return 0 <= self.factors["density_p"] <= 1 + def check_random_arrival_parameter(self): + return self.factors["random_arrival_parameter"] >= 0 + + # function that calulates the lambdas + def calc_lambdas(self): + routing_matrix = np.asarray(self.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.factors['number_queues']) - routing_matrix.T) @ self.factors["arrival_alphas"] + return lambdas + + def check_simulatable_factors(self): + lambdas = self.calc_lambdas() + return all(self.factors['service_mus'][i] > lambdas[i] for i in range(self.factors['number_queues'])) + + def attach_rng(self, random_rng): + #returns a dirichlet distribution of same shape as alpha + def dirichlet(alpha, rng): + gamma_vars = [rng.gammavariate(a, 1) for a in alpha] + sum_gamma_vars = sum(gamma_vars) + dirichlet_vars = [x / sum_gamma_vars for x in gamma_vars] + return dirichlet_vars + + self.random_rng = random_rng + random_num_queue = self.factors['number_queues'] + p = self.factors['density_p'] + random_matrix = erdos_renyi(random_rng[0], random_num_queue,p) + prob_matrix = np.zeros((random_num_queue, random_num_queue + 1)) + for i in range(random_num_queue): + a = int(sum(random_matrix[i]))+1 + probs = dirichlet(np.ones(a), rng = random_rng[0]) + r = 0 + for j in range(random_num_queue+1): + if random_matrix[i][j]==1 or j == random_num_queue: + prob_matrix[i][j] = probs[r] + r += 1 + prob_matrix = np.asarray(prob_matrix) + prob_matrix = prob_matrix[:, :-1] + random_arrival = [] + for i in range(random_num_queue): + random_arrival.append(random_rng[1].expovariate(self.factors['random_arrival_parameter'])) + + self.factors["arrival_alphas"] = random_arrival + self.factors['routing_matrix'] = prob_matrix.tolist() + + return + + def get_IPA(Dl, V, W, q, k, mu, self): # D is the dictionary, St L[i][1]: ith arrive cust's + def I(x, k): + if x==k: + return 1 + else: + return 0 + IA, IW = [[] for i in range(q)], [[-V[i][0]/mu * I(i, k)] for i in range(q)] + for i in range(len(Dl)): + queue = int(Dl[i][0]) + idx = Dl[i][1] + v = V[queue][idx] + if idx == 0: + if Dl[i][2][0] == -1: + IA[queue].append(0) + else: + pre_queue = Dl[i][2][0] + pre_idx = Dl[i][2][1]-1 + print('i: ', i, ', prequeue: ', pre_queue, ', pre_idx: ', pre_idx) + # print('iwww', IW[pre_queue], IA[pre_queue]) + if len(IA[pre_queue]) == 0: # Warm up bug.. + print('warmup') + a = 0 + else: + a = IW[pre_queue][pre_idx] + IA[pre_queue][pre_idx] + IA[queue].append(a) + else: + # Calculate IA + if Dl[i][2][0] == -1: + IA[queue].append(0) + else: + pre_queue = Dl[i][2][0] + pre_idx = Dl[i][2][1]-1 + print(pre_queue, pre_idx, IW[pre_queue], IA[pre_queue]) + if len(IA[pre_queue]) == 0: # Warm up bug.. + print('warmup') + a = 0 + else: + a = IW[pre_queue][pre_idx] + IA[pre_queue][pre_idx] + # print('i: ', i, ', prequeue: ', pre_queue, ', pre_idx: ', pre_idx) + # print('a', a) + IA[queue].append(a) + if W[queue][idx] <= 0: + v = -V[queue][idx]/mu * I(queue, k) + IW[queue].append(v) + else: + v = -V[queue][idx]/mu * I(queue, k) + IW[queue][idx-1] + # print('pre: ', IA[queue][idx-1]) + # print('it: ', IA[queue][idx]) + u = IA[queue][idx-1] - IA[queue][idx] + IW[queue].append(u + v) + + return IA, IW + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : [list] [rng.mrg32k3a.MRG32k3a] + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "average_queue_length": The time-average of queue length at each station + "expected_queue_length": The expected queue length calculated using stationary distribution + """ + # Designate random number generators. + arrival_rng = [rng_list[i] for i in range(self.factors["number_queues"])] + transition_rng = [rng_list[i + self.factors["number_queues"]] for i in range(self.factors["number_queues"])] + time_rng = [rng_list[i + 2*self.factors["number_queues"]] for i in range(self.factors["number_queues"])] + initialization_rng = rng_list[-1] + + def geometric(p): + return math.floor(np.log(1 - initialization_rng.uniform(0,1)) / math.log(p)) + #calculate the steady state of the queues to check the simulation + #calculate lambdas + routing_matrix = np.asarray(self.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.factors['number_queues']) - routing_matrix.T) @ self.factors["arrival_alphas"] + rho = lambdas/self.factors["service_mus"] + #calculate expected value of queue length as rho/(1-rho) + expected_queue_length = (rho)/(1-rho) + + if self.factors["steady_state_initialization"]: + # sample initialized queue lengths + queues = [geometric(rho[i]) for i in range(self.factors["number_queues"])] + completion_times = [math.inf for _ in range(self.factors["number_queues"])] + # Generate all interarrival, network routes, and service times before the simulation run. + next_arrivals = [arrival_rng[i].expovariate(self.factors["arrival_alphas"][i]) for i in range(self.factors["number_queues"])] + for i in range(self.factors["number_queues"]): + if queues[i] > 0: + completion_times[i] = time_rng[i].expovariate(self.factors["service_mus"][i]) + time_sum_queue_length = [0 for _ in range(self.factors["number_queues"])] + + else: + queues = [0 for _ in range(self.factors["number_queues"])] + # Generate all interarrival, network routes, and service times before the simulation run. + next_arrivals = [arrival_rng[i].expovariate(self.factors["arrival_alphas"][i]) + for i in range(self.factors["number_queues"])] + + # create list of each station's next completion time and initialize to infinity. + completion_times = [math.inf for _ in range(self.factors["number_queues"])] + + # initialize list of each station's average queue length + time_sum_queue_length = [0 for _ in range(self.factors["number_queues"])] + + + # Initiate clock variables for statistics tracking and event handling. + clock = 0 + previous_clock = 0 + + # warm-up period + if not self.factors["steady_state_initialization"]: + + while clock < self.factors['warm_up']: + next_arrival = min(next_arrivals) + next_completion = min(completion_times) + clock = min(next_arrival, next_completion) + if next_arrival < next_completion: # next event is an arrival + station = next_arrivals.index(next_arrival) + queues[station] += 1 + next_arrivals[station] += arrival_rng[station].expovariate(self.factors["arrival_alphas"][station]) + if queues[station] == 1: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + else: # next event is a departure + station = completion_times.index(next_completion) + queues[station] -= 1 + if queues[station] > 0: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + else: + completion_times[station] = math.inf + # schedule where the customer will go next + prob = transition_rng[station].random() + + if prob < np.cumsum(self.factors['routing_matrix'][station])[-1]: # customer stay in system + next_station = np.argmax(np.cumsum(self.factors['routing_matrix'][station]) > prob) + queues[next_station] += 1 + if queues[next_station] == 1: + completion_times[next_station] = clock + time_rng[next_station].expovariate(self.factors["service_mus"][next_station]) + next_arrivals = [next_arrivals[i] - clock for i in range(self.factors["number_queues"])] + completion_times = [completion_times[i] - clock for i in range(self.factors["number_queues"])] + clock = 0 + previous_clock = 0 + + # statistics needed for IPA - waiting_record, service_record, arrival_record, transfer_record, IPA_record + # waiting_record: records the waiting time of each customer before entering service. record when scheduling new completion times + # helper list: time_entered. records the time each customer enters the system. record when scheduling new arrival or departing to another station. + # pop when scheduling new completion times + # service_record: records the service time of each customer. record when scheduling new completion times + # arrival_record: records the arrival time of each customer. record when scheduling new arrivals + # transfer_record: records where the customer is transferred from, formatted as [previous station, previous index], if new : [-1] + # record when scheduling departures & new arrivals + # IPA_record: records the customer's index in the queue and the station it is transferred from, each element formatted as [station, index, [previous station, previous index]]. + # record at shceduling new completion times + # collect all statistics starting from warm-up period + waiting_record = [[] for _ in range(self.factors["number_queues"])] + time_entered = [deque() for _ in range(self.factors['number_queues'])] + service_record = [[] for _ in range(self.factors["number_queues"])] + arrival_record = [[] for _ in range(self.factors["number_queues"])] + transfer_record = [deque() for _ in range(self.factors["number_queues"])] + IPA_record = [] + + # Run simulation over time horizon. + while clock < self.factors['t_end']: + next_arrival = min(next_arrivals) + next_completion = min(completion_times) + clock = min(next_arrival, next_completion) + for i in range(self.factors['number_queues']): + time_sum_queue_length[i] += queues[i] * (clock - previous_clock) + + previous_clock = clock + if next_arrival < next_completion: # next event is an arrival + station = next_arrivals.index(next_arrival) + queues[station] += 1 + next_arrivals[station] += arrival_rng[station].expovariate(self.factors["arrival_alphas"][station]) + + time_entered[station].append(clock) + if queues[station] == 1: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + waiting_record[station].append(clock - time_entered[station].popleft()) + else: # next event is a departure + station = completion_times.index(next_completion) + queues[station] -= 1 + if queues[station] > 0: + completion_times[station] = clock + time_rng[station].expovariate(self.factors["service_mus"][station]) + waiting_record[station].append(clock - time_entered[station].popleft()) + else: + completion_times[station] = math.inf + # schedule where the customer will go next + prob = transition_rng[station].random() + + if prob < np.cumsum(self.factors['routing_matrix'][station])[-1]: # customer stay in system + next_station = np.argmax(np.cumsum(self.factors['routing_matrix'][station]) > prob) + queues[next_station] += 1 + time_entered[next_station].append(clock) + if queues[next_station] == 1: + completion_times[next_station] = clock + time_rng[next_station].expovariate(self.factors["service_mus"][next_station]) + waiting_record[next_station].append(clock - time_entered[next_station].popleft()) + # end of simulation + # Calculate the IPA gradient + # IPA_gradient = [] + # for j in range(self.factors['number_queues']): + # IPA_gradient.append(self.get_IPA(IPA_record, service_times, waiting_times, self.factors['number_queues'], j, self.factors['service_mus'][j])) + + # calculate average queue length + average_queue_length = [time_sum_queue_length[i]/clock for i in range(self.factors["number_queues"])] + gradient = [-lambdas[i]/(self.factors["service_mus"][i] - lambdas[i])**(2) for i in range(self.factors['number_queues'])] + # lagrange_obj = sum(lambdas[i]/(self.factors["service_mus"][i] - lambdas[i]) for i in range(self.factors['number_queues'])) + 0.5*sum(self.factors['service_mus']) + lagrange_obj = sum(average_queue_length) + 0.5*sum(self.factors['service_mus']) + lagrange_grad = [-lambdas[i]/(self.factors["service_mus"][i] - lambdas[i])**(2) + 1 for i in range(self.factors['number_queues'])] + + responses = {"total_jobs": sum(average_queue_length)} + # responses = {"average_queue_length": average_queue_length, 'lagrange_obj': lagrange_obj, "expected_queue_length" :expected_queue_length, + # "total_jobs": sum(average_queue_length)} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + + # gradients['average_queue_length']['service_mus'] = tuple(gradient) + gradients['total_jobs']['service_mus'] = tuple(gradient) + + return responses, gradients + + +""" +Summary(.) +------- +Minimize the expected total number of jobs in the system at a time +""" + +class OpenJacksonMinQueue(Problem): + """ + Class to Open Jackson simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + service_rates_budget: int + budget for total service rates sum + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : tuple + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="OPENJACKSON-1", fixed_factors=None, model_fixed_factors=None, random = False, random_rng = None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.model_default_factors = {} + self.model_decision_factors = {"service_mus"} + self.factors = fixed_factors + self.random = random + self.n_rngs = 1 + + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (11,11,11,11,11) + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 1000 + }, + "service_rates_budget" :{ + "description": "budget for total service rates sum", + "datatype": int, + "default": 100 # ask later: access model factors when setting default values for budget + }, + "gamma_mean":{ + "description": "scale of the mean of gamma distribution when generating service rates upper bound", + "datatype": float, + "default": 0.5 + }, + "gamma_scale":{ + "description": "shape of gamma distribution when generating service rates upper bound", + "datatype": tuple, + "default": 5 + } + + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "service_rates_budget": self.check_service_rates_budget + } + super().__init__(fixed_factors, model_fixed_factors) + self.model = OpenJackson(self.model_fixed_factors, random) + self.Ci = np.array([1 for _ in range(self.model.factors["number_queues"])]) + self.di = np.array([self.factors['service_rates_budget']]) + self.Ce = None + self.de = None + self.dim = self.model.factors["number_queues"] + self.lower_bounds = tuple(0 for _ in range(self.model.factors["number_queues"])) + self.upper_bounds = tuple(self.factors['service_rates_budget'] for _ in range(self.model.factors["number_queues"])) + # Instantiate model with fixed factors and overwritten defaults. + self.optimal_value = None # Change if f is changed. + self.optimal_solution = None # Change if f is changed. + if random and random_rng: + self.model.attach_rng(random_rng) + + # lambdas = self.model.calc_lambdas() + # r = self.factors["service_rates_budget"]/sum(lambdas) + # self.factors['initial_solution'] = tuple([r*lambda_i for lambda_i in lambdas]) + + def attach_rngs(self, random_rng): + self.random_rng = random_rng + lambdas = self.model.calc_lambdas() + + # generate service rates upper bound as the sum of lambdas plus a gamma random variable with parameter as an input + mean = self.factors["gamma_mean"] * sum(lambdas) + scale = self.factors["gamma_scale"] + gamma = random_rng[0].gammavariate(mean/scale, scale) + self.factors["service_rates_budget"] = sum(lambdas) + gamma + + lambdas = self.model.calc_lambdas() + r = self.factors["service_rates_budget"]/sum(lambdas) + self.factors['initial_solution'] = tuple([r*lambda_i for lambda_i in lambdas]) + + return + + def check_service_rates_budget(self): + routing_matrix = np.asarray(self.model.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.model.factors['number_queues']) - routing_matrix.T) @ self.model.factors["arrival_alphas"] + if sum(self.factors["service_rates_budget"]) < sum(lambdas) : + return False + return True + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "service_mus": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = (factor_dict["service_mus"],) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + if type(response_dict['total_jobs']) == tuple: + objectives = (response_dict['total_jobs'][0],) + else: + objectives = (response_dict['total_jobs'],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints + for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of + stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic + constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + # Superclass method will check box constraints. + # Can add other constraints here. + routing_matrix = np.asarray(self.model.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.model.factors['number_queues']) - routing_matrix.T) @ self.model.factors["arrival_alphas"] + box_feasible = all(x[i] > lambdas[i] for i in range(self.model.factors['number_queues'])) + upper_feasible = (sum(x) <= self.factors['service_rates_budget']) + return super().check_deterministic_constraints(x) * box_feasible * upper_feasible + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : vector of decision variables + """ + if (self.model.factors["steady_state_initialization"]==True): + x = [0]*self.model.factors["number_queues"] + lambdas = self.model.calc_lambdas() + sum_alphas = sum(self.model.factors["arrival_alphas"]) + for i in range(self.model.factors["number_queues"]): + x[i] = lambdas[i] + rand_sol_rng.uniform(0,1) * sum_alphas + else: + x = rand_sol_rng.continuous_random_vector_from_simplex(n_elements=self.model.factors["number_queues"], + summation=self.factors["service_rates_budget"], + exact_sum=False + ) + return x + +""" +Summary(.) +------- +Minimize the expected total number of jobs in the system at a time +""" + +class OpenJacksonMinQueueLagrange(Problem): + """ + Class to Open Jackson simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + service_rates_budget: int + budget for total service rates sum + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : tuple + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="OPENJACKSON-2", fixed_factors=None, model_fixed_factors=None, random = False, random_rng = None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.model_default_factors = {} + self.model_decision_factors = {"service_mus"} + self.factors = fixed_factors + self.random = random + self.n_rngs = 1 + + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (15,15,15,15,15) + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 500 + }, + "service_rates_factor" :{ + "description": "weight of the service rates in the objective function", + "datatype": int, + "default": 0.5 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "service_rates_factor": self.check_service_rates_factor + } + super().__init__(fixed_factors, model_fixed_factors) + self.model = OpenJackson(self.model_fixed_factors, random) + self.dim = self.model.factors["number_queues"] + lambdas = self.model.calc_lambdas() + self.lower_bounds = tuple(lambdas) + self.upper_bounds = (np.inf,) * self.dim + # Instantiate model with fixed factors and overwritten defaults. + self.optimal_value = None # Change if f is changed. + self.optimal_solution = None # Change if f is changed. + if random and random_rng: + self.model.attach_rng(random_rng) + + self.factors['initial_solution'] = tuple([1.1*lambda_i for lambda_i in lambdas]) + + def attach_rngs(self, random_rng): + self.random_rng = random_rng + self.factors["service_rates_factor"] = random_rng[0].uniform(0,5) + + return + + def check_service_rates_factor(self): + + return self.factors['service_rates_factor'] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "service_mus": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = (factor_dict["service_mus"],) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + if type(response_dict['lagrange_obj']) == tuple: + objectives = (response_dict['lagrange_obj'][0],) + else: + objectives = (response_dict['lagrange_obj'],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints + for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of + stochastic constraints + """ + det_stoch_constraints = tuple([0]*self.dim) + det_stoch_constraints_gradients = (0,) + return det_stoch_constraints, det_stoch_constraints_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic + constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + # Superclass method will check box constraints. + # Can add other constraints here. + routing_matrix = np.asarray(self.model.factors["routing_matrix"]) + lambdas = np.linalg.inv(np.identity(self.model.factors['number_queues']) - routing_matrix.T) @ self.model.factors["arrival_alphas"] + box_feasible = all(x[i] > lambdas[i] for i in range(self.model.factors['number_queues'])) + return super().check_deterministic_constraints(x) * box_feasible + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : vector of decision variables + """ + if (self.model.factors["steady_state_initialization"]==True): + x = [0]*self.model.factors["number_queues"] + lambdas = self.model.calc_lambdas() + sum_alphas = sum(self.model.factors["arrival_alphas"]) + for i in range(self.model.factors["number_queues"]): + x[i] = lambdas[i] + rand_sol_rng.uniform(0,1) * sum_alphas + else: + x = rand_sol_rng.continuous_random_vector_from_simplex(n_elements=self.model.factors["number_queues"], + summation=self.factors["service_rates_budget"], + exact_sum=False + ) + return x \ No newline at end of file diff --git a/simopt/models/random_smf.py b/simopt/models/random_smf.py new file mode 100644 index 000000000..7aef24bf1 --- /dev/null +++ b/simopt/models/random_smf.py @@ -0,0 +1,618 @@ +""" +Summary +------- +Simulate duration of a stochastic Max-Flow network (SMF). This is a version +that be able to generate random problem instances (with random model and +problem factors). +A detailed description of the model/problem can be found +`here `_. +""" + +import numpy as np +from ortools.graph.python import max_flow +from ..base import Model, Problem + + +class SMF(Model): + """ + A model that simulates a stochastic Max-Flow problem with + capacities deducted with multivariate distributed noise distributed durations + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None, random = False): + if fixed_factors is None: + fixed_factors = {} + self.name = "SMF" + self.n_rngs = 1 + self.n_random = 3 + self.random = random + self.n_responses = 1 + cov_fac = np.zeros((20, 20)) + np.fill_diagonal(cov_fac, 4) + cov_fac = cov_fac.tolist() + self.specifications = { + "num_nodes": { + "description": "number of nodes, 0 being the source, highest being the sink", + "datatype": int, + "default": 10 + }, + "source_index": { + "description": "source node index", + "datatype": int, + "default": 0 + }, + "sink_index": { + "description": "sink node index", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 4), (4, 2), (3, 2), (2, 5), (4, 5), (3, 6), (3, 7), (6, 2), (6, 5), (6, 7), (5, 8), (6, 8), (6, 9), (7, 9), (8, 9)] + }, + "num_arcs": { + "description": "number of arcs to be generated", + "datatype": int, + "default": 20 + }, + "assigned_capacities": { + "description": "Assigned capacity of each arc", + "datatype": list, + "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + # "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + }, + "mean_noise": { + "description": "The mean noise in reduction of arc capacities", + "datatype": list, + "default": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "cov_noise": { + "description": "Covariance matrix of noise", + "datatype": list, + "default": cov_fac + } + + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "assigned_capacities": self.check_capacities, + "mean_noise": self.check_mean, + "cov_noise": self.check_cov, + "source_index": self.check_s, + "sink_index": self.check_t, + "num_arcs": self.check_num_arcs, + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check source is connected to the sink. + graph = {node: set() for node in range(0, self.factors["num_nodes"])} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, self.factors["source_index"]) + if self.factors["source_index"] in visited and self.factors["sink_index"] in visited: + return True + return False + + def check_capacities(self): + positive = True + for x in list(self.factors["assigned_capacities"]): + positive = positive & (x > 0) + return (len(self.factors["assigned_capacities"]) == len(self.factors["arcs"])) & positive + + def check_mean(self): + return len(self.factors["mean_noise"]) == len(self.factors["arcs"]) + + def check_cov(self): + return np.array(self.factors["cov_noise"]).shape == (len(self.factors["arcs"]), len(self.factors["arcs"])) + + def check_s(self): + return self.factors["source_index"] >= 0 and self.factors["source_index"] <= self.factors["num_nodes"] + + def check_t(self): + return self.factors["sink_index"] >= 0 and self.factors["sink_index"] <= self.factors["num_nodes"] + + def check_num_arcs(self): + return self.factors["num_arcs"] > 0 + + + def get_arcs(self, num_nodes, num_arcs, source, end, uni_rng): + # Generate a random graph + self.rand_fuc = True + + set_arcs = [] + for n1 in range(0, num_nodes - 1): + for n2 in range(n1 + 1, num_nodes): + set_arcs.append((n1, n2)) + + arcs = [(source, source + 1), (end - 1, end)] + remove = [] + def get_in(arcs, num_nodes, ind, in_ind=True): + global remove + if len(arcs) <= 0: + return False + graph = {node: set() for node in range(0, num_nodes)} + for a in arcs: + if in_ind == True: + graph[a[0]].add(a[1]) + else: + graph[a[1]].add(a[0]) + set0 = graph[ind] + for i in graph[ind]: + set0 = {*set0, *graph[i]} + for j in graph[i]: + set0 = {*set0, *graph[j]} + + if in_ind == True: + for j in set0 - graph[ind]: + if j in graph[ind]: + remove.append((ind, j)) + + set0 = {*set0, ind} + return set0 + + set0 = get_in(arcs, num_nodes, source) + for i in range(source + 1, end): + set0 = get_in(arcs, num_nodes, source) + if i not in set0: + set1 = list(get_in(arcs, num_nodes, i, False)) + n2 = set1[uni_rng.randint(0, len(set1)-1)] + set2 = [i for i in set0 if i < n2] + n1 = list(set2)[uni_rng.randint(0, len(set2)-1)] + + arc = (n1, n2) + arcs = {*arcs, arc} + + for i in range(1, num_nodes - 1): + set9 = get_in(arcs, num_nodes, i) + if end not in set9: + set_out = list(get_in(arcs, num_nodes, end, False)) + n1 = list(set9)[uni_rng.randint(0, len(set9)-1)] + set2 = [i for i in set_out if i > n1] + n2 = set2[uni_rng.randint(0, len(set2)-1)] + arc = (n1, n2) + arcs = {*arcs, arc} + + if len(arcs) < num_arcs: + remain_num = num_arcs - len(arcs) + remain = list(set(set_arcs) - set(arcs)) + idx = uni_rng.sample(range(0, len(remain)), remain_num) + aa = set([remain[i] for i in idx]) + arcs = {*arcs, *aa} + + else: + return list(arcs) + + return list(arcs) + + def get_covariance(self, num_arcs, cov_rng): + # Generate random covariance matrix + self.rand_fuc = True + random_values = [cov_rng.uniform(0, 1) for i in range(num_arcs*num_arcs)] + random_values = np.array(random_values).reshape((num_arcs, num_arcs)) + covariance_matrix = np.cov(random_values, rowvar=False) + 1 * np.eye(num_arcs) + + return covariance_matrix.tolist() + + def attach_rng(self, random_rng): + self.random_rng = random_rng + self.rand_fuc = False + + self.factors["sink_index"] = self.factors["num_nodes"] - 1 + arcs_set = self.get_arcs(self.factors["num_nodes"], self.factors["num_arcs"], self.factors["source_index"], self.factors["sink_index"], random_rng[0]) + arcs_set.sort(key=lambda a: a[1]) + arcs_set.sort(key=lambda a: a[0]) + self.factors["arcs"] = arcs_set + print('arcs: ', arcs_set) + self.factors["num_arcs"] = len(self.factors["arcs"]) + + self.factors["mean_noise"] = [0 for i in range(len(self.factors["arcs"]))] + + self.factors["cov_noise"] = self.get_covariance(self.factors["num_arcs"], random_rng[1]) + + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + # Designate separate random number generators. + solver = max_flow.SimpleMaxFlow() + exp_rng = rng_list[0] + + # From input graph generate start end end nodes. + start_nodes = [] + end_nodes = [] + for i, j in self.factors["arcs"]: + start_nodes.append(i) + end_nodes.append(j) + # Generate actual capacity. + for i in range(len(self.factors["arcs"])): + noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + capacities = [] + for i in range(len(noise)): + capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + # Add arcs in bulk. + solver.add_arcs_with_capacity(start_nodes, end_nodes, capacities) + status = solver.solve(self.factors["source_index"], self.factors["sink_index"]) + if status != solver.OPTIMAL: + print('There was an issue with the max flow input.') + print(f'Status: {status}') + exit(1) + + # Construct gradient vector (=1 if has a outflow from min-cut nodes). + gradient = np.zeros(len(self.factors["arcs"])) + grad_arclist = [] + min_cut_nodes = solver.get_source_side_min_cut() + for i in min_cut_nodes: + for j in range(self.factors['num_nodes']): + if j not in min_cut_nodes: + grad_arc = (i, j) + if (i, j) in self.factors['arcs']: + grad_arclist.append(grad_arc) + for arc in grad_arclist: + gradient[self.factors['arcs'].index(arc)] = 1 + + responses = {"Max Flow": solver.optimal_flow() / 1000} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["Max Flow"]["assigned_capacities"] = gradient + return responses, gradients + + +""" +Summary +------- +Maximize the expected max flow from the source node s to the sink node t. +""" + + +class SMF_Max(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SMF-1", fixed_factors=None, model_fixed_factors=None, random=False, random_rng=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1, ) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"assigned_capacities"} + self.factors = fixed_factors + self.random = random + self.n_rngs = 1 + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (1, ) * 20 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 10000 + }, + "cap": { + "description": "total set-capacity to be allocated to arcs.", + "datatype": int, + "default": 100 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "cap": self.check_cap + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults and the random status. + self.model = SMF(self.model_fixed_factors, random) + if random and random_rng != None: + self.model.attach_rng(random_rng) + if self.model.rand_fuc == False: + print("Error: No random generator exists.") + return False + # self.dim = len(self.model.factors["arcs"]) + self.dim = self.model.factors["num_arcs"] + self.lower_bounds = (0, ) * self.dim + self.upper_bounds = (np.inf, ) * self.dim + self.factors["initial_solution"] = (1,) * self.dim + self.Ci = np.ones(self.dim) + self.Ce = None + self.di = np.array([self.factors["cap"]]) + self.de = None + + def check_cap(self): + return self.factors["cap"] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "assigned_capacities": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["assigned_capacities"]) + return vector + + def random_budget(self, uni_rng): + # Choose a random budget + l = [300, 400, 500, 600, 700, 800, 900, 1000] + budget = uni_rng.choice(l) * self.dim + + return budget + + def attach_rngs(self, random_rng): + # Attach rng for problem class and generate random problem factors for random instances + self.random_rng = random_rng + + # For random version, randomize problem factors + if self.random: + self.factors["budget"] = self.random_budget(random_rng[0]) + print('budget: ', self.factors["budget"]) + + return random_rng + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["Max Flow"], ) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0, ) + det_objectives_gradients = ((0, ) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return sum(self.factors["assigned_capacities"]) <= self.factors["cap"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + x = rand_sol_rng.continuous_random_vector_from_simplex(len(self.model.factors["arcs"]), self.factors["cap"], False) + return x diff --git a/simopt/models/san.py b/simopt/models/san.py index 8d219cd3f..7e476594c 100644 --- a/simopt/models/san.py +++ b/simopt/models/san.py @@ -6,6 +6,7 @@ `here `_. """ import numpy as np +import cvxpy as cp from ..base import Model, Problem @@ -15,7 +16,7 @@ class SAN(Model): A model that simulates a stochastic activity network problem with tasks that have exponentially distributed durations, and the selected means come with a cost. - +· Attributes ---------- name : string @@ -129,7 +130,7 @@ def replicate(self, rng_list): graph_in[a[1]].add(a[0]) graph_out[a[0]].add(a[1]) indegrees = [len(graph_in[n]) for n in range(1, self.factors["num_nodes"] + 1)] - # outdegrees = [len(graph_out[n]) for n in range(1, self.factors["num_nodes"]+1)] + queue = [] topo_order = [] for n in range(self.factors["num_nodes"]): @@ -212,6 +213,14 @@ class SANLongestPath(Problem): lower bound for each decision variable upper_bounds : tuple upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de gradient_available : bool indicates if gradient of objective function is available optimal_value : tuple @@ -282,7 +291,7 @@ def __init__(self, name="SAN-1", fixed_factors=None, model_fixed_factors=None): "arc_costs": { "description": "Cost associated to each arc.", "datatype": tuple, - "default": (1,) * 13 + "default": (1, ) * 13 } } self.check_factor_list = { @@ -294,8 +303,13 @@ def __init__(self, name="SAN-1", fixed_factors=None, model_fixed_factors=None): # Instantiate model with fixed factors and over-riden defaults. self.model = SAN(self.model_fixed_factors) self.dim = len(self.model.factors["arcs"]) - self.lower_bounds = (1e-2,) * self.dim - self.upper_bounds = (np.inf,) * self.dim + self.lower_bounds = (1e-2, ) * self.dim + # self.upper_bounds = (np.inf, ) * self.dim + self.upper_bounds = (1000, ) * self.dim + self.Ci = None + self.Ce = None + self.di = None + self.de = None def check_arc_costs(self): positive = True @@ -448,3 +462,455 @@ def get_random_solution(self, rand_sol_rng): """ x = tuple([rand_sol_rng.lognormalvariate(lq=0.1, uq=10) for _ in range(self.dim)]) return x + + +""" +Summary +------- +Minimize the duration of the longest path from a to i subject to a lower bound in sum of arc_means. +""" + + +class SANLongestPathConstr(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SAN-2", fixed_factors=None, model_fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"arc_means"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (20,) * 13 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 10000 + }, + "arc_costs": { + "description": "Cost associated to each arc.", + "datatype": tuple, + "default": (1,) * 13 + }, + "sum_lb": { + "description": "Lower bound for the sum of arc means", + "datatype": float, + "default": 100.0 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "arc_costs": self.check_arc_costs + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SAN(self.model_fixed_factors) + self.dim = len(self.model.factors["arcs"]) + self.lower_bounds = (1e-2,) * self.dim + self.upper_bounds = (100000,) * self.dim #np.inf, + self.Ci = -1 * np.ones(13) + self.Ce = None + self.di = -1 * np.array([self.factors["sum_lb"]]) + self.de = None + + def check_arc_costs(self): + positive = True + for x in list(self.factors["arc_costs"]): + positive = positive & x > 0 + return (len(self.factors["arc_costs"]) != self.model.factors["num_arcs"]) & positive + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "arc_means": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["arc_means"]) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["longest_path_length"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + return np.all(np.array(x) >= 0) + + # def get_random_solution(self, rand_sol_rng): + # """ + # Generate a random solution for starting or restarting solvers. + + # Arguments + # --------- + # rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + # random-number generator used to sample a new random solution + + # Returns + # ------- + # x : tuple + # vector of decision variables + # """ + # while True: + # x = [rand_sol_rng.lognormalvariate(lq = 0.1, uq = 10) for _ in range(self.dim)] + # if np.sum(x) >= self.factors['sum_lb']: + # break + # x= tuple(x) + # return x + + def find_feasible_initial(self, Ae, Ai, be, bi): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(self.upper_bounds) + lower_bound = np.array(self.lower_bounds) + + # Define decision variables. + x = cp.Variable(self.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound)) + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + + return x0 + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + + # Upper bound and lower bound. + lower_bound = np.array(self.lower_bounds) + upper_bound = np.array(self.upper_bounds) + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = self.Ci + di = self.di + Ce = self.Ce + de = self.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, self.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Hit and Run + start_pt = self.find_feasible_initial(None, C, None, d) + tol = 1e-6 + + x = start_pt + # Generate the markov chain for sufficiently long. + for _ in range(20): + # Generate a random direction to travel. + direction = np.array([rand_sol_rng.uniform(0, 1) for _ in range(self.dim)]) + direction = direction / np.linalg.norm(direction) + + dir = direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + + dir = -direction + ra = d.flatten() - C @ x + ra_d = C @ dir + # Initialize maximum step size. + s_star2 = np.inf + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star2: + s_star2 = s + + # Generate random point between lambdas. + # lam = rand_sol_rng.uniform(-1 * s_star2, s_star) + lam = rand_sol_rng.uniform(-1 * min(50, s_star2), min(50, s_star)) + + # Compute the new point. + x += lam * direction + + print('sol: ', x) + + x= tuple(x) + return x diff --git a/simopt/models/san_2.py b/simopt/models/san_2.py new file mode 100644 index 000000000..a4bf8896a --- /dev/null +++ b/simopt/models/san_2.py @@ -0,0 +1,1210 @@ +""" +Summary +------- +Simulate duration of a stochastic activity network (SAN). +A detailed description of the model/problem can be found +`here `_. +""" +import numpy as np +from scipy.optimize import linprog + +from ..base import Model, Problem + + +class SAN(Model): + """ + A model that simulates a stochastic activity network problem with + tasks that have exponentially distributed durations, and the selected + means come with a cost. + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None, random=False): + if fixed_factors is None: + fixed_factors = {} + self.name = "SAN" + self.n_rngs = 1 + self.n_responses = 1 + self.n_random = 2 # Number of rng used for the random instance + self.random = random + self.specifications = { + "num_nodes": { + "description": "number of nodes", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(1, 2), (1, 3), (2, 3), (2, 4), (2, 6), (3, 6), (4, 5), + (4, 7), (5, 6), (5, 8), (6, 9), (7, 8), (8, 9)] + }, + + "arc_means": { + "description": "mean task durations for each arc", + "datatype": tuple, + "default": (1,) * 13 + }, + "num_arcs": { + "description": "number of arcs to be generated", + "datatype": int, + "default": 13 + }, + "set_arcs": { + "description": "list of all possible arcs", + "datatype": list, + "default": [(1, 2), (1, 3),(1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), + (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), + (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), + (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), + (5, 6), (5, 7), (5, 8), (5, 9), + (6, 7), (6, 8), (6, 9), + (7, 8), (7, 9), + (8, 9)] + } + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "arc_means": self.check_arc_means, + "num_arcs": self.check_num_arcs, + "set_arcs": self.check_set_arcs + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check graph is connected. + graph = {node: set() for node in range(1, self.factors["num_nodes"] + 1)} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, 1) + if self.factors["num_nodes"] in visited: + return True + return False + + def check_arc_means(self): + positive = True + for x in list(self.factors["arc_means"]): + positive = positive & (x > 0) + return (len(self.factors["arc_means"]) == len(self.factors["arcs"])) & positive + + def check_num_arcs(self): + return self.factors["num_arcs"] > 0 + + def check_set_arcs(self): + return True + + def allPathsStartEnd(self, graph): + end = len(graph) + + def dfs(node, path, output): + if node == end: + output.append(path) + + for nx in graph[node]: + dfs(nx, path+[nx], output) + + output = [] + dfs(1,[1],output) + return output + + def get_arcs(self, num_nodes, num_arcs, uni_rng): + """ + Getting a random set of valid arcs. + + Arguments + --------- + num_nodes: int + number of nodes for the random graph + num_arcs: int + number of arcs for the random graph + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + arcs : list + Generated random arcs to be used in the following simulation + """ + # Calculate the total set of possible arcs in the graph + set_arcs = [] + for n1 in range(1, num_nodes): + for n2 in range(n1 + 1, num_nodes + 1): + set_arcs.append((n1, n2)) + + # Assign the arcs set with the necessary arcs + arcs = [(1, 2), (num_nodes - 1, num_nodes)] + remove = [] + def get_in(arcs, num_nodes, ind, in_ind=True): + global remove + if len(arcs) <= 0: + return False + graph = {node: set() for node in range(1, num_nodes + 1)} + for a in arcs: + if in_ind == True: + graph[a[0]].add(a[1]) + else: + graph[a[1]].add(a[0]) + set0 = graph[ind] + for i in graph[ind]: + set0 = {*set0, *graph[i]} + for j in graph[i]: + set0 = {*set0, *graph[j]} + + if in_ind == True: + for j in set0 - graph[ind]: + if j in graph[ind]: + remove.append((ind, j)) + + set0 = {*set0, ind} + return set0 + + # Check whether the first node can reach all other nodes + set0 = get_in(arcs, num_nodes, 1) + for i in range(2, num_nodes+1): + set0 = get_in(arcs, num_nodes, 1) # Get the set of nodes that starter node can reach + if i not in set0: + set1 = list(get_in(arcs, num_nodes, i, False)) # Get the set of nodes that can reach node i + n2 = set1[uni_rng.randint(0, len(set1)-1)] # Randomly choose one + set2 = [i for i in set0 if i < n2] + n1 = list(set2)[uni_rng.randint(0, len(set2)-1)] + arc = (n1, n2) # Connect the two nodes so that starter node can reach node i + arcs = {*arcs, arc} + + # Check whether each node can reach the end node + for i in range(2, num_nodes): + set9 = get_in(arcs, num_nodes, i) + if num_nodes not in set9: + set_out = list(get_in(arcs, num_nodes, num_nodes, False)) + n1 = list(set9)[uni_rng.randint(0, len(set9)-1)] + set2 = [i for i in set_out if i > n1] + n2 = set2[uni_rng.randint(0, len(set2)-1)] + arc = (n1, n2) + arcs = {*arcs, arc} + + if len(arcs) < num_arcs: # If the current arc set has less arcs than the input lower bound + remain_num = num_arcs - len(arcs) + remain = list(set(set_arcs) - set(arcs)) + idx = uni_rng.sample(range(0, len(remain)), remain_num) + aa = set([remain[i] for i in idx]) + arcs = {*arcs, *aa} + + else: + return list(arcs) + + return list(arcs) + + def attach_rng(self, random_rng): + """ + Attach rng to random model class and generate random factors and update corresponding problem dimension. + + Arguments + --------- + random_rng : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when generating random factors + + Returns + ------- + arcs : list + Generated random arcs to be used in the following simulation + """ + self.random_rng = random_rng + arcs_set = self.get_arcs(self.factors["num_nodes"], self.factors["num_arcs"], random_rng[0]) + + arcs_set.sort(key=lambda a: a[1]) + arcs_set.sort(key=lambda a: a[0]) + self.factors["arcs"] = arcs_set + print('arcs: ', arcs_set) + self.factors["num_arcs"] = len(self.factors["arcs"]) + self.factors["arc_means"] = (1,) * len(self.factors["arcs"]) + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + + # Designate separate random number generators. + exp_rng = rng_list[0] + + # Topological sort. + graph_in = {node: set() for node in range(1, self.factors["num_nodes"] + 1)} + graph_out = {node: set() for node in range(1, self.factors["num_nodes"] + 1)} + for a in self.factors["arcs"]: + graph_in[a[1]].add(a[0]) + graph_out[a[0]].add(a[1]) + indegrees = [len(graph_in[n]) for n in range(1, self.factors["num_nodes"] + 1)] + queue = [] + topo_order = [] + for n in range(self.factors["num_nodes"]): + if indegrees[n] == 0: + queue.append(n + 1) + while len(queue) != 0: + u = queue.pop(0) + topo_order.append(u) + for n in graph_out[u]: + indegrees[n - 1] -= 1 + if indegrees[n - 1] == 0: + queue.append(n) + + # Generate arc lengths. + arc_length = {} + for i in range(len(self.factors["arcs"])): + arc_length[str(self.factors["arcs"][i])] = exp_rng.expovariate(1 / self.factors["arc_means"][i]) + + ## Calculate the length of the longest path. + allpaths = self.allPathsStartEnd(graph_out) + L = [] + for p in allpaths: + l = 0 + for j in range(len(p)-1): + l += arc_length[str((p[j], p[j+1]))] + L.append(l) + longest_path = np.max(L) + longest_P = allpaths[np.argmax(L)] + + # Calculate the IPA gradient w.r.t. arc means. + # If an arc is on the longest path, the component of the gradient + # is the length of the length of that arc divided by its mean. + # If an arc is not on the longest path, the component of the gradient is zero. + gradient = np.zeros(len(self.factors["arcs"])) + + for i in range(len(longest_P)-1,0,-1): + backtrack = longest_P[i-1] + current = longest_P[i] + idx = self.factors["arcs"].index((backtrack, current)) + gradient[idx] = arc_length[str((backtrack, current))] / (self.factors["arc_means"][idx]) + + # Compose responses and gradients. + responses = {"longest_path_length": longest_path} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["longest_path_length"]["arc_means"] = gradient + return responses, gradients + + +""" +Summary +------- +Minimize the duration of the longest path from a to i plus cost. +""" + + +class SANLongestPath(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : float + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + random: bool + indicator of whether user want to build a random problem or a deterministic model + n_rng: int + Number of random number generator needed to build a random problem instance + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SAN-1", fixed_factors=None, model_fixed_factors=None, random=False, random_rng=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"arc_means"} + self.factors = fixed_factors + self.random = random # Randomlize problem and model or not + self.n_rngs = 3 # Number of rngs used for the random instance + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (8,) * 13 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 100000 + }, + "c": { + "description": "cost associated to each arc", + "datatype": tuple, + "default": (1,) * 13 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "c": self.check_arc_costs + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SAN(self.model_fixed_factors, random) + # If random, generate random model factors and update model class + if random==True and random_rng != None: + self.model.attach_rng(random_rng) + self.dim = len(self.model.factors["arcs"]) + # Update every value and dimension according to the randomly generated case + self.factors["initial_solution"] = (8,) * self.dim + self.factors["c"] = (1,) * self.dim + self.lower_bounds = (1e-2,) * self.dim + self.upper_bounds = (np.inf,) * self.dim + self.Ci = None + self.Ce = None + self.di = None + self.de = None + + def check_arc_costs(self): + """ + Check if the arc costs are positive. + + Returns + ------- + bool + indicates if arc costs are positive + """ + positive = True + for x in list(self.factors["c"]): + positive = positive & x > 0 + return (len(self.factors["c"]) != self.dim) & positive + + def check_budget(self): + """ + Check if the budget is positive. + + Returns + ------- + bool + indicates if the budget is positive + """ + return self.factors["budget"] > 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "arc_means": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["arc_means"]) + return vector + + def get_coefficient(self, exp_rng): + """ + Generate random coefficients for each arc. + + Arguments + --------- + exp_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random coefficients + + Returns + ------- + c : list + vector of coefficients + """ + c = [] + for i in range(len(self.factors["c"])): + ci = exp_rng.expovariate(1) + c.append(ci) + + return c + + def random_budget(self, uni_rng): + """ + Generate random budget for the problem, proportional to the dimension. + + Arguments + --------- + uni_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random budget + """ + l = [100, 200, 300, 400, 500] + budget = uni_rng.choice(l) * self.dim + return budget + + def attach_rngs(self, random_rng): + """ + Attach random-number generators to the problem. + + Arguments + --------- + random_rng : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of rngs for problem to use when generating random instances + """ + # Attach rng for problem class and generate random problem factors for random instances + self.random_rng = random_rng + + if self.random == True: + self.factors["budget"] = self.random_budget(random_rng[0]) + self.factors["c"] = self.get_coefficient(random_rng[1]) + + print('budget: ', self.factors['budget']) + print('c: ', self.factors["c"]) + + return random_rng + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["longest_path_length"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (np.sum(np.array(self.factors["c"]) / np.array(x))/len(x),) + det_objectives_gradients = (-np.array(self.factors["c"]) / np.array(x) ** 2 / len(x),) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + return np.all(np.array(x) >= 0) + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables· + """ + x = tuple([rand_sol_rng.lognormalvariate(lq=0.1, uq=10) for _ in range(self.dim)]) + return x + + +""" +Summary +------- +Minimize the duration of the longest path from a to i subject to some lower bounds in sum of arc_means. +""" + +class SANLongestPathConstr(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : float + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + random: bool + indicator of whether user want to build a random problem or a deterministic model + n_rng: int + Number of random number generator needed to build a random problem instance + random_const: bool + indicator of whether to generate random constraints for the random problem instance or not + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SAN-2", fixed_factors=None, model_fixed_factors=None, random=False, random_rng=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (-1,) + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"arc_means"} + self.factors = fixed_factors + self.random = random + self.random_const = False # Turn on if want to generate random constraints for random problem instance + if self.random_const: + self.num_con = 3 # Number of random constraints to generate + else: + self.num_con = 1 + self.n_rngs = 3 # Number of rngs used for the random instance + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (15,) * 13 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 100000 + }, + "arc_costs": { + "description": "cost associated to each arc", + "datatype": tuple, + "default": (1,) * 13 + }, + "r_const": { + "description": "random constraint for arc rates", + 'datatype': int, + "default": 0 + }, + "sum_lb": { + "description": "Lower bound for the sum of arc means", + "datatype": float, + "default": 100.0 + }, + "lbs":{ + "description": "Lower bounds for the selected sum of arc means", + "datatype": float, + "default": 0.0, + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "arc_costs": self.check_arc_costs, + "r_const": self.check_const, + "sum_lb": self.check_lb, + "lbs": self.check_lbs + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SAN(self.model_fixed_factors, random) + if random==True and random_rng != None: + self.model.attach_rng(random_rng) + self.dim = len(self.model.factors["arcs"]) + self.factors["initial_solution"] = (15,) * self.dim + self.factors["arc_costs"] = (1,) * self.dim + self.lower_bounds = (1e-2,) * self.dim + self.upper_bounds = (np.inf,) * self.dim + self.Ci = -1 * np.ones(self.dim) + self.Ce = None + self.di = -1 * np.array([self.factors["sum_lb"]]) + self.de = None + + def check_arc_costs(self): + """ + Check if the arc costs are positive. + + Returns + ------- + bool + indicates if arc costs are positive + """ + positive = True + for x in list(self.factors["arc_costs"]): + positive = positive & x > 0 + return (len(self.factors["arc_costs"]) != self.dim) & positive + + def check_budget(self): + """ + Check if the budget is positive. + + Returns + ------- + bool + indicates if the budget is positive + """ + return self.factors["budget"] > 0 + + def check_const(self): + """ + Check if the random constraint is positive. + + Returns + ------- + bool + indicates if the random constraint is positive + """ + return self.factors["r_const"] >= 0 + + def check_lb(self): + """ + Check if the lower bound for sum of all arc rates is positive. + + Returns + ------- + bool + indicates if the lower bound is positive + """ + return self.factors["sum_lb"] >= 0 + + def check_lbs(self): + """ + Check if other potential lower bound is positive. + + Returns + ------- + bool + indicates if other lower bound is positive + """ + return self.factors["lbs"] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "arc_means": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["arc_means"]) + return vector + + def get_coefficient(self, exp_rng): + """ + Generate random coefficients for each arc. + + Arguments + --------- + exp_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random coefficients + """ + if self.random == True: + c = [] + for i in range(len(self.factors["arc_costs"])): + ci = exp_rng.expovariate(1) + c.append(ci) + return c + else: + return self.factors['arc_costs'] + + def random_budget(self, random_rng): + """ + Generate random budget for the problem, proportional to the dimension. + + Arguments + --------- + uni_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random budget + """ + if self.random == True: + l = [10000, 20000] + budget = random_rng.choice(l) * self.dim + return budget + else: + return self.factors['budget'] + + def get_const(self, n, uni_rng): + """ + Generate random constraint for the problem, proportional to the dimension. + + Arguments + --------- + n : int + number of constraints want to generate + uni_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample random budget + """ + # Randomly choose a subset of arcs that have limited budget + C = [] + L = [] + for i in range(n): + if self.random_const == True: + const = uni_rng.sample(range(0, self.dim), int(self.dim/4)) + # lb = uni_rng.uniform(0, int(self.dim/4)) * uni_rng.uniform(1, 6) + lb = int(self.dim/4) * uni_rng.uniform(1, int(self.factors["sum_lb"]/self.dim)) + C.append(const) + L.append(lb) + else: + return [[i for i in range(self.dim)]], self.factors['sum_lb'] + return C, L + + def attach_rngs(self, random_rng): + """ + Attach random-number generators to the problem. + + Arguments + --------- + random_rng : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of rngs for problem to use when generating random instances + """ + # Attach rng for problem class and generate random problem factors for random instances + self.random_rng = random_rng + + if self.random: + self.factors["budget"] = self.random_budget(random_rng[0]) + + self.factors["arc_costs"] = self.get_coefficient(random_rng[1]) + print('c: ', self.factors["arc_costs"]) + + # Random constraint + if self.random_const: + self.factors["r_const"], self.factors['lbs'] = self.get_const(self.num_con, random_rng[2]) + self.factors["lbs"].append(self.factors["sum_lb"]) # Combine the sum_lb with the partial_lb + self.factors["r_const"].append([i for i in range(self.dim)]) # Combine the index related to sum_lb with the r_const + else: + self.factors["r_const"], self.factors['sum_lb'] = self.get_const(self.num_con, random_rng[2]) + self.factors["lbs"] = [self.factors["sum_lb"]] + else: + self.factors["r_const"] = [[i for i in range(self.dim)]] + self.factors["lbs"] = [self.factors["sum_lb"]] + + self.factors["lbs"] += [0 for i in range(self.dim)] # Require each arc means larger or equal to 0 + self.factors["r_const"] += [[i] for i in range(self.dim)] + print('r_const: ', self.factors["r_const"]) + print('lbs: ', self.factors['lbs']) + + return random_rng + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["longest_path_length"],) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = ((0,) * self.dim,) # tuple of tuples – of sizes self.dim by self.dim, full of zeros + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + + det_objectives = (0,) + det_objectives_gradients = ((0,) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return np.all(np.array(x) >= 0) + + def find_feasible(self): + """ + Find an initial feasible solution (if not user-provided) + by interior point method. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + """ + c = [0 for i in range(self.dim)] + l1 = [-1 for i in range(self.dim)] + A = [l1] + b = [-self.factors["sum_lb"]] + if self.random_const: + b.extend([-plb for plb in self.factors["lbs"]]) + for idx in self.factors["r_const"]: + l2 = [-1 if i in idx else 0 for i in range(self.dim)] + A.append(l2) + + res = linprog(c, A_ub=A, b_ub=b, bounds=(0, None), method='interior-point') + + return res.x + + def check_feasible(self, x): + """ + Check whether a solution is feasible or not. + + Arguments + --------- + x : ndarray/list + current point + + Returns + ------- + feasible : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + if sum(x) >= self.factors['sum_lb']: + for i in range(len(self.factors["lbs"])): + if sum([x[j] for j in self.factors["r_const"][i]]) < self.factors["lbs"][i]: + return False + return True + else: + return False + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + if self.check_feasible(self.factors["initial_solution"]): + x0 = self.factors["initial_solution"] + else: + x0 = self.find_feasible() + x = rand_sol_rng.hit_and_run(x0, [10 * self.factors['sum_lb']], [[i for i in range(self.dim)]], self.factors["lbs"], self.factors["r_const"], self.dim, 20) + + x = tuple(x) + return x \ No newline at end of file diff --git a/simopt/models/smf.py b/simopt/models/smf.py new file mode 100644 index 000000000..22cc9f472 --- /dev/null +++ b/simopt/models/smf.py @@ -0,0 +1,488 @@ +""" +Summary +------- +Simulate duration of a stochastic Max-Flow network (SMF). +A detailed description of the model/problem can be found +`here `_. +""" + +import numpy as np +from ortools.graph.python import max_flow +from ..base import Model, Problem + + +class SMF(Model): + """ + A model that simulates a stochastic Max-Flow problem with + capacities deducted with multivariate distributed noise distributed durations + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = "SMF" + self.n_rngs = 1 + self.n_random = 1 + self.n_responses = 1 + cov_fac = np.zeros((20, 20)) + np.fill_diagonal(cov_fac, 4) + cov_fac = cov_fac.tolist() + self.specifications = { + "num_nodes": { + "description": "number of nodes, 0 being the source, highest being the sink", + "datatype": int, + "default": 10 + }, + "source_index": { + "description": "source node index", + "datatype": int, + "default": 0 + }, + "sink_index": { + "description": "sink node index", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 4), (4, 2), (3, 2), (2, 5), (4, 5), (3, 6), (3, 7), (6, 2), (6, 5), (6, 7), (5, 8), (6, 8), (6, 9), (7, 9), (8, 9)] + }, + "assigned_capacities": { + "description": "Assigned capacity of each arc", + "datatype": list, + "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + }, + "mean_noise": { + "description": "The mean noise in reduction of arc capacities", + "datatype": list, + "default": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "cov_noise": { + "description": "Covariance matrix of noise", + "datatype": list, + "default": cov_fac + } + + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "assigned_capacities": self.check_capacities, + "mean_noise": self.check_mean, + "cov_noise": self.check_cov, + "source_index": self.check_s, + "sink_index": self.check_t + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check source is connected to the sink. + graph = {node: set() for node in range(0, self.factors["num_nodes"])} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, self.factors["source_index"]) + if self.factors["source_index"] in visited and self.factors["sink_index"] in visited: + return True + return False + + def check_capacities(self): + positive = True + for x in list(self.factors["assigned_capacities"]): + positive = positive & (x > 0) + return (len(self.factors["assigned_capacities"]) == len(self.factors["arcs"])) & positive + + def check_mean(self): + return len(self.factors["mean_noise"]) == len(self.factors["arcs"]) + + def check_cov(self): + return np.array(self.factors["cov_noise"]).shape == (len(self.factors["arcs"]), len(self.factors["arcs"])) + + def check_s(self): + return self.factors["source_index"] >= 0 and self.factors["source_index"] <= self.factors["num_nodes"] + + def check_t(self): + return self.factors["sink_index"] >= 0 and self.factors["sink_index"] <= self.factors["num_nodes"] + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + # Designate separate random number generators. + solver = max_flow.SimpleMaxFlow() + exp_rng = rng_list[0] + # From input graph generate start end end nodes. + start_nodes = [] + end_nodes = [] + for i, j in self.factors["arcs"]: + start_nodes.append(i) + end_nodes.append(j) + # Generate actual capacity. + for i in range(len(self.factors["arcs"])): + noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + capacities = [] + for i in range(len(noise)): + capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + # Add arcs in bulk. + solver.add_arcs_with_capacity(start_nodes, end_nodes, capacities) + status = solver.solve(self.factors["source_index"], self.factors["sink_index"]) + if status != solver.OPTIMAL: + print('There was an issue with the max flow input.') + print(f'Status: {status}') + exit(1) + + # Construct gradient vector (=1 if has a outflow from min-cut nodes). + gradient = np.zeros(len(self.factors["arcs"])) + grad_arclist = [] + min_cut_nodes = solver.get_source_side_min_cut() + for i in min_cut_nodes: + for j in range(self.factors['num_nodes']): + if j not in min_cut_nodes: + grad_arc = (i, j) + if (i, j) in self.factors['arcs']: + grad_arclist.append(grad_arc) + for arc in grad_arclist: + gradient[self.factors['arcs'].index(arc)] = 1 + + responses = {"Max Flow": solver.optimal_flow() / 1000} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["Max Flow"]["assigned_capacities"] = gradient + return responses, gradients + + +""" +Summary +------- +Maximize the expected max flow from the source node s to the sink node t. +""" + + +class SMF_Max(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SMF-1", fixed_factors=None, model_fixed_factors=None, random=False): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1, ) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.random = random + self.model_default_factors = {} + self.model_decision_factors = {"assigned_capacities"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (1, ) * 20 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 100000 + }, + "cap": { + "description": "total set-capacity to be allocated to arcs.", + "datatype": int, + "default": 100 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "cap": self.check_cap + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SMF(self.model_fixed_factors) + self.dim = len(self.model.factors["arcs"]) + self.lower_bounds = (0, ) * self.dim + # self.upper_bounds = (np.inf, ) * self.dim + self.upper_bounds = (self.factors["cap"], ) * self.dim + self.Ci = np.ones(20) + self.Ce = None + self.di = np.array([self.factors["cap"]]) + self.de = None + + def check_cap(self): + return self.factors["cap"] >= 0 + + def attach_rngs(self, random_rng): + self.random_rng = random_rng + self.model.attach_rng(random_rng) + return random_rng + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "assigned_capacities": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["assigned_capacities"]) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["Max Flow"], ) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0, ) + det_objectives_gradients = ((0, ) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return sum(self.factors["assigned_capacities"]) <= self.factors["cap"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + x = rand_sol_rng.continuous_random_vector_from_simplex(len(self.model.factors["arcs"]), self.factors["cap"], False) + return x diff --git a/simopt/models/smfcvx.py b/simopt/models/smfcvx.py new file mode 100644 index 000000000..27daa23f0 --- /dev/null +++ b/simopt/models/smfcvx.py @@ -0,0 +1,510 @@ +""" +Summary +------- +Simulate duration of a stochastic Max-Flow network (SMF). +A detailed description of the model/problem can be found +`here `_. +""" + +import numpy as np +from ortools.graph.python import max_flow +from ..base import Model, Problem + + +class SMFCVX0(Model): + """ + A model that simulates a stochastic Max-Flow problem with + capacities deducted with multivariate distributed noise distributed durations + + Attributes + ---------- + name : string + name of model + n_rngs : int + number of random-number generators used to run a simulation replication + n_responses : int + number of responses (performance measures) + factors : dict + changeable factors of the simulation model + specifications : dict + details of each factor (for GUI and data validation) + check_factor_list : dict + switch case for checking factor simulatability + + Arguments + --------- + fixed_factors : nested dict + fixed factors of the simulation model + + See also + -------- + base.Model + """ + def __init__(self, fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = "SMFCVX" + self.n_rngs = 1 + self.n_responses = 1 + cov_fac = np.zeros((20, 20)) + np.fill_diagonal(cov_fac, 4) + cov_fac = cov_fac.tolist() + self.specifications = { + "num_nodes": { + "description": "number of nodes, 0 being the source, highest being the sink", + "datatype": int, + "default": 10 + }, + "source_index": { + "description": "source node index", + "datatype": int, + "default": 0 + }, + "sink_index": { + "description": "sink node index", + "datatype": int, + "default": 9 + }, + "arcs": { + "description": "list of arcs", + "datatype": list, + "default": [(0, 1), (0, 2), (0, 3), (1, 2), (1, 4), (2, 4), (4, 2), (3, 2), (2, 5), (4, 5), (3, 6), (3, 7), (6, 2), (6, 5), (6, 7), (5, 8), (6, 8), (6, 9), (7, 9), (8, 9)] + }, + "assigned_capacities": { + "description": "Assigned capacity of each arc", + "datatype": list, + "default": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + }, + # "capacity_bound":{ + # "description": "upper bound capacity function", + # "datatype": 'function', + # "default": self.default_upper_fn + # }, + "mean_noise": { + "description": "The mean noise in reduction of arc capacities", + "datatype": list, + "default": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "cov_noise": { + "description": "Covariance matrix of noise", + "datatype": list, + "default": cov_fac + } + + } + self.check_factor_list = { + "num_nodes": self.check_num_nodes, + "arcs": self.check_arcs, + "assigned_capacities": self.check_capacities, + "mean_noise": self.check_mean, + "cov_noise": self.check_cov, + "source_index": self.check_s, + "sink_index": self.check_t + } + # Set factors of the simulation model. + super().__init__(fixed_factors) + self.num_arcs = len(self.factors["arcs"]) + + def check_num_nodes(self): + return self.factors["num_nodes"] > 0 + + def default_upper_fn(self,rng,k = 5,lamb = 1): + #return the upper bound of the capacities for a single edge x + #the multiplier X (xX) is an Erlang ~ (k,lamb) + + capacities = [] + for i in range(self.num_arcs): + capacities.append(1000*self.factors["assigned_capacities"][i]*sum([rng.expovariate(lamb) for j in range(k)])) + return capacities + + def pos_part_capacity(self): + #generate capacity of the form [x - noise]^{+} + for i in range(self.num_arcs): + noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + capacities = [] + for i in range(self.num_arcs): + capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + + return capacities + + + def dfs(self, graph, start, visited=None): + if visited is None: + visited = set() + visited.add(start) + for next in graph[start] - visited: + self.dfs(graph, next, visited) + return visited + + def check_arcs(self): + if len(self.factors["arcs"]) <= 0: + return False + # Check source is connected to the sink. + graph = {node: set() for node in range(0, self.factors["num_nodes"])} + for a in self.factors["arcs"]: + graph[a[0]].add(a[1]) + visited = self.dfs(graph, self.factors["source_index"]) + if self.factors["source_index"] in visited and self.factors["sink_index"] in visited: + return True + return False + + def check_capacities(self): + positive = True + for x in list(self.factors["assigned_capacities"]): + positive = positive & (x > 0) + return (len(self.factors["assigned_capacities"]) == len(self.factors["arcs"])) & positive + + def check_mean(self): + return len(self.factors["mean_noise"]) == len(self.factors["arcs"]) + + def check_cov(self): + return np.array(self.factors["cov_noise"]).shape == (len(self.factors["arcs"]), len(self.factors["arcs"])) + + def check_s(self): + return self.factors["source_index"] >= 0 and self.factors["source_index"] <= self.factors["num_nodes"] + + def check_t(self): + return self.factors["sink_index"] >= 0 and self.factors["sink_index"] <= self.factors["num_nodes"] + + def replicate(self, rng_list): + """ + Simulate a single replication for the current model factors. + + Arguments + --------- + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a + rngs for model to use when simulating a replication + + Returns + ------- + responses : dict + performance measures of interest + "longest_path_length" = length/duration of longest path + gradients : dict of dicts + gradient estimates for each response + """ + # Designate separate random number generators. + solver = max_flow.SimpleMaxFlow() + exp_rng = rng_list[0] + # From input graph generate start end end nodes. + start_nodes = [] + end_nodes = [] + for i, j in self.factors["arcs"]: + start_nodes.append(i) + end_nodes.append(j) + # Generate actual capacity. + #for i in range(len(self.factors["arcs"])): + # noise = exp_rng.mvnormalvariate(self.factors["mean_noise"], np.array(self.factors["cov_noise"])) + #capacities = [] + #for i in range(len(noise)): + #capacities.append(max(1000 * (self.factors["assigned_capacities"][i] - noise[i]), 0)) + # capacities.append(self.default_upper_fn(self.factors["assigned_capacities"][i],exp_rng)) + capacities = self.default_upper_fn(exp_rng) + #print("capacities: ", capacities) + + # Add arcs in bulk. + solver.add_arcs_with_capacity(start_nodes, end_nodes, capacities) + status = solver.solve(self.factors["source_index"], self.factors["sink_index"]) + if status != solver.OPTIMAL: + print('There was an issue with the max flow input.') + print(f'Status: {status}') + exit(1) + + # Construct gradient vector (=1 if has a outflow from min-cut nodes). + gradient = np.zeros(len(self.factors["arcs"])) + grad_arclist = [] + min_cut_nodes = solver.get_source_side_min_cut() + for i in min_cut_nodes: + for j in range(self.factors['num_nodes']): + if j not in min_cut_nodes: + grad_arc = (i, j) + if (i, j) in self.factors['arcs']: + grad_arclist.append(grad_arc) + for arc in grad_arclist: + gradient[self.factors['arcs'].index(arc)] = 1 + + responses = {"Max Flow": solver.optimal_flow() / 1000} + gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses} + gradients["Max Flow"]["assigned_capacities"] = gradient + return responses, gradients + + +""" +Summary +------- +Maximize the expected max flow from the source node s to the sink node t. +""" + + +class SMFCVX_Max0(Problem): + """ + Base class to implement simulation-optimization problems. + + Attributes + ---------- + name : string + name of problem + dim : int + number of decision variables + n_objectives : int + number of objectives + n_stochastic_constraints : int + number of stochastic constraints + minmax : tuple of int (+/- 1) + indicator of maximization (+1) or minimization (-1) for each objective + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + lower_bounds : tuple + lower bound for each decision variable + upper_bounds : tuple + upper bound for each decision variable + Ci : ndarray (or None) + Coefficient matrix for linear inequality constraints of the form Ci@x <= di + Ce : ndarray (or None) + Coefficient matrix for linear equality constraints of the form Ce@x = de + di : ndarray (or None) + Constraint vector for linear inequality constraints of the form Ci@x <= di + de : ndarray (or None) + Constraint vector for linear equality constraints of the form Ce@x = de + gradient_available : bool + indicates if gradient of objective function is available + optimal_value : tuple + optimal objective function value + optimal_solution : tuple + optimal solution + model : Model object + associated simulation model that generates replications + model_default_factors : dict + default values for overriding model-level default factors + model_fixed_factors : dict + combination of overriden model-level factors and defaults + model_decision_factors : set of str + set of keys for factors that are decision variables + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used to generate a random initial solution + or a random problem instance + factors : dict + changeable factors of the problem + initial_solution : list + default initial solution from which solvers start + budget : int > 0 + max number of replications (fn evals) for a solver to take + specifications : dict + details of each factor (for GUI, data validation, and defaults) + + Arguments + --------- + name : str + user-specified name for problem + fixed_factors : dict + dictionary of user-specified problem factors + model_fixed factors : dict + subset of user-specified non-decision factors to pass through to the model + + See also + -------- + base.Problem + """ + def __init__(self, name="SMFCVX-1", fixed_factors=None, model_fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + if model_fixed_factors is None: + model_fixed_factors = {} + self.name = name + self.n_objectives = 1 + self.n_stochastic_constraints = 0 + self.minmax = (1, ) + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_available = True + self.optimal_value = None + self.optimal_solution = None + self.model_default_factors = {} + self.model_decision_factors = {"assigned_capacities"} + self.factors = fixed_factors + self.specifications = { + "initial_solution": { + "description": "initial solution", + "datatype": tuple, + "default": (1, ) * 20 + }, + "budget": { + "description": "max # of replications for a solver to take", + "datatype": int, + "default": 30000 + }, + "cap": { + "description": "total set-capacity to be allocated to arcs.", + "datatype": int, + "default": 100 + } + } + self.check_factor_list = { + "initial_solution": self.check_initial_solution, + "budget": self.check_budget, + "cap": self.check_cap + } + super().__init__(fixed_factors, model_fixed_factors) + # Instantiate model with fixed factors and over-riden defaults. + self.model = SMFCVX0(self.model_fixed_factors) + self.dim = len(self.model.factors["arcs"]) + self.lower_bounds = (0, ) * self.dim + self.upper_bounds = (1000000, ) * self.dim #np.inf + self.Ci = np.array([20*[1]])#np.ones(20) + self.Ce = None + self.di = np.array([self.factors["cap"]]) + self.de = None + + def check_cap(self): + return self.factors["cap"] >= 0 + + def vector_to_factor_dict(self, vector): + """ + Convert a vector of variables to a dictionary with factor keys + + Arguments + --------- + vector : tuple + vector of values associated with decision variables + + Returns + ------- + factor_dict : dictionary + dictionary with factor keys and associated values + """ + factor_dict = { + "assigned_capacities": vector[:] + } + return factor_dict + + def factor_dict_to_vector(self, factor_dict): + """ + Convert a dictionary with factor keys to a vector + of variables. + + Arguments + --------- + factor_dict : dictionary + dictionary with factor keys and associated values + + Returns + ------- + vector : tuple + vector of values associated with decision variables + """ + vector = tuple(factor_dict["assigned_capacities"]) + return vector + + def response_dict_to_objectives(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of objectives. + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + objectives : tuple + vector of objectives + """ + objectives = (response_dict["Max Flow"], ) + return objectives + + def response_dict_to_stoch_constraints(self, response_dict): + """ + Convert a dictionary with response keys to a vector + of left-hand sides of stochastic constraints: E[Y] <= 0 + + Arguments + --------- + response_dict : dictionary + dictionary with response keys and associated values + + Returns + ------- + stoch_constraints : tuple + vector of LHSs of stochastic constraint + """ + stoch_constraints = None + return stoch_constraints + + def deterministic_stochastic_constraints_and_gradients(self, x): + """ + Compute deterministic components of stochastic constraints for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_stoch_constraints : tuple + vector of deterministic components of stochastic constraints + det_stoch_constraints_gradients : tuple + vector of gradients of deterministic components of stochastic constraints + """ + det_stoch_constraints = None + det_stoch_constraints_gradients = None + return det_stoch_constraints, det_stoch_constraints_gradients + + def deterministic_objectives_and_gradients(self, x): + """ + Compute deterministic components of objectives for a solution `x`. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + det_objectives : tuple + vector of deterministic components of objectives + det_objectives_gradients : tuple + vector of gradients of deterministic components of objectives + """ + det_objectives = (0, ) + det_objectives_gradients = ((0, ) * self.dim,) + return det_objectives, det_objectives_gradients + + def check_deterministic_constraints(self, x): + """ + Check if a solution `x` satisfies the problem's deterministic constraints. + + Arguments + --------- + x : tuple + vector of decision variables + + Returns + ------- + satisfies : bool + indicates if solution `x` satisfies the deterministic constraints. + """ + + return sum(self.factors["assigned_capacities"]) <= self.factors["cap"] + + def get_random_solution(self, rand_sol_rng): + """ + Generate a random solution for starting or restarting solvers. + + Arguments + --------- + rand_sol_rng : mrg32k3a.mrg32k3a.MRG32k3a object + random-number generator used to sample a new random solution + + Returns + ------- + x : tuple + vector of decision variables + """ + x = rand_sol_rng.continuous_random_vector_from_simplex(len(self.model.factors["arcs"]), self.factors["cap"], False) + return x diff --git a/simopt/solvers/Boom_FrankWolfe.py b/simopt/solvers/Boom_FrankWolfe.py new file mode 100644 index 000000000..be0511566 --- /dev/null +++ b/simopt/solvers/Boom_FrankWolfe.py @@ -0,0 +1,1547 @@ +import numpy as np +import cvxpy as cp +import gurobipy +import matplotlib.pyplot as plt +#import cdd + + +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver +#env = gurobipy.Env() +#env.setParam('FeasibilityTol', 1e-9) +#env.setParam('MIPGap',0) + + +class BoomFrankWolfe(Solver): + """ + """ + + def __init__(self, name="Boom-FW", fixed_factors={}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 30 + }, + "h": { + "description": "difference in finite difference gradient", + "datatype": float, + "default": 0.1 + }, + "step_f": { + "description": "step size function", + "datatype": "function", + "default": self.default_step_f + }, + "theta": { + "description": "constant in the line search condition", + "datatype": int, + "default": 0.2 + }, + "max_iters": { + "description": "maximum iterations", + "datatype": int, + "default": 300 + }, + "LSmethod":{ + "description": "methods for line search algorithm", + "datatype":str, + "default":self.backtrackLineSearch + }, + "line_search_max_iters": { + "description": "maximum iterations for line search", + "datatype": int, + "default": 20 + }, + "ratio": { + "description": "decay ratio in line search", + "datatype": float, + "default": 0.8 + }, + "curve_const": { + "description": "constant in curvature wolfe conditions, usually greater than theta", + "datatype": float, + "default": 0.3 + }, + "zoom_init_ratio": { + "description": "ratio of the max step size in Zoom lien search", + "datatype": float, + "default": 0.2 + }, + "zoom_inc_ratio": { + "description": "increment ratio in Zoom lien search", + "datatype": float, + "default": 1.1 + }, + "atom_vectors":{ + "description": "atom vectors for away/pairwise frank-wolfe", + "datatype": "matrix", + "default": None + }, + "max_gamma":{ + "description": "max distance to the next iteration", + "datatype": float, + "default": 1 + }, + "backtrack":{ + "description": "an indicator whether we do the backtrack", + "datatype": bool, + "default": 0 + }, + "algorithm":{ + "description": "type of FW algorithm", + "datatype": str, + "default": "normal" + #away, pairwise + } + + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "max_iters": self.check_alpha_max, + } + super().__init__(fixed_factors) + + def check_r(self): + return self.factors["r"] > 0 + + def check_alpha_max(self): + + return self.factors["alha_max"] > 0 + + def check_max_iters(self): + + return self.factors['max_iters'] > 0 + + def default_step_f(self,k): + """ + take in the current iteration k + """ + + return 1/(k+1) + + def is_feasible(self, x, Ci,di,Ce,de,lower, upper, tol = 1e-8): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + res = True + + if(lower is not None): + res = res & np.all(x >= lower) + if(upper is not None): + res = res & np.all(x <= upper) + + if (Ci is not None) and (di is not None): + res = res & np.all(Ci @ x <= di + tol) + if (Ce is not None) and (de is not None): + res = res & (np.allclose(np.dot(Ce, x), de)) + return res + + def get_max_gamma_ratio_test(self, cur_x, d, Ce, Ci, de, di, lower, upper): + ''' + perform a ratio test to find the max step size + ''' + #step = cp.Variable() + #objective = cp.Maximize(step) + #constraints = [step >= 0] + #ratio test: (bi - ai^Tx)/(ai^Td) + ratio_val = [] + denom = [] + dim = len(cur_x) + + if(lower is not None): + #constraints += [(cur_x + step*d) >= lower] + #vals += [(lower[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((cur_x - lower)/-d) + denom += list(-d) + if(upper is not None): + #constraints += [(cur_x + step*d) <= upper] + #vals += [(upper[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((upper - cur_x)/d) + denom += list(d) + if((Ci is not None) and (di is not None)): + #constraints += [Ci@(cur_x + step*d) <= di] + ratio_val += list((di - Ci@cur_x)/(Ci@d)) + denom += list(Ci@d) + + #print("ratio: ", ratio_val) + #print("denom: ", denom) + ratio_val = np.array(ratio_val) + denom = np.array(denom) + #print("denom: ", denom) + #print("ratio_val: ", ratio_val) + + return min(ratio_val[denom > 1e-6]) + #prob = cp.Problem(objective, constraints) + #prob.solve() + + def get_dir(self,g,Ce, Ci, de, di,lower, upper): + ''' + solve for the direction in each iteration + given a gradient vector g, find min_s{sg} + s.t. problem is feasible + ''' + + n = len(g) + s = cp.Variable(n) + + objective = cp.Minimize(s@g) + constraints = [] + + if(lower is not None): + constraints += [s >= lower] + if(upper is not None): + constraints += [s <= upper] + if((Ci is not None) and (di is not None)): + constraints += [Ci@s <= di] + if((Ce is not None) and (de is not None)): + constraints += [Ce@s == de] + + prob = cp.Problem(objective, constraints) + #prob.solve(solver=cp.GUROBI,env=env)#solver=cp.ECOS + prob.solve(solver=cp.SCIPY) + + return s.value + + def get_dir_unbd(self,g,Ce, Ci, de, di,lower, upper): + ''' + solve for the direction in each iteration + given a gradient vector g, find min_s{sg} + s.t. problem is feasible + ''' + + n = len(g) + s = cp.Variable(n) + + objective = cp.Minimize(s@g) + constraints = [] + + if(lower is not None): + constraints += [s >= lower] + if(upper is not None): + constraints += [s <= upper] + if((Ci is not None) and (di is not None)): + constraints += [Ci@s <= di] + if((Ce is not None) and (de is not None)): + constraints += [Ce@s == de] + + prob = cp.Problem(objective, constraints) + #prob.solve(solver=cp.GUROBI,env=env)#solver=cp.ECOS + prob.solve(solver=cp.GUROBI,InfUnbdInfo= 1) + + if('unbounded' in prob.status): + result = np.array([prob.solver_stats.extra_stats.getVars()[j].unbdray for j in range(n)]) + is_bounded = False + else: + result = s.value + is_bounded = True + + return result, is_bounded + + def get_FD_grad(self, x, problem, h, r): + """ + find a finite difference gradient from the problem at + the point x + """ + x = np.array(x) + d = len(x) + + if(d == 1): + #xsol = self.create_new_solution(tuple(x), problem) + x1 = x + h/2 + x2 = x - h/2 + + x1 = self.create_new_solution(tuple(x1), problem) + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate_up_to([x1], r) + problem.simulate_up_to([x2], r) + f1 = -1 * problem.minmax[0] * x1.objectives_mean + f2 = -1 * problem.minmax[0] * x2.objectives_mean + grad = (f1-f2)/h + else: + I = np.eye(d) + grad = 0 + + for i in range(d): + x1 = x + h*I[:,i]/2 + x2 = x - h*I[:,i]/2 + + x1 = self.create_new_solution(tuple(x1), problem) + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate_up_to([x1], r) + problem.simulate_up_to([x2], r) + + f1 = -1 * problem.minmax[0] * x1.objectives_mean + f2 = -1 * problem.minmax[0] * x2.objectives_mean + + grad += ((f1-f2)/h)*I[:,i] + + return grad, (2*d*r) + + #def min_quadratic(div0,f0,): + # """ + # find the (arg)minimum of the quadratic function from + # the given info q'(0), q(0), q(alpha) in the interval + # [a,b] where a < b + # """ + def get_gradient(self,problem,x,sol): + """ + getting the gradient of the function at point x where + sol is the solution data structure + """ + budget = 0 + #get the gradient of the new solution grad f(x + step*d) for curvature condition + if problem.gradient_available: + # Use IPA gradient if available. + gradient = -1 * problem.minmax[0] * sol.objectives_gradients_mean[0] + else: + gradient, budget_spent = self.get_FD_grad(x, problem, self.factors["h"], self.factors["r"]) + gradient = -1 * problem.minmax[0] * gradient + budget += budget_spent + + return gradient, budget + + def get_simulated_values(self,problem,x,value = 'both'): + """ + getting either sample path or gradient. The return "value" + can be specified to "val"|"gradient"|"both" + """ + r = self.factors["r"] + sol = self.create_new_solution(tuple(x), problem) + problem.simulate(sol, r) + budget = 0 + + #getting the function evaluation + if((value == "both") or (value == "val")): + budget += r + Fval = -1 * problem.minmax[0] * sol.objectives_mean + + if((value == "both") or (value == "gradient")): + if problem.gradient_available: + # Use IPA gradient if available. + gradient = -1 * problem.minmax[0] * sol.objectives_gradients_mean[0] + else: + gradient, budget_spent = self.get_FD_grad(x, problem, self.factors["h"], self.factors["r"]) + budget += budget_spent + + if(value == "val"): + return Fval, budget + elif(value == "gradient"): + return gradient, budget + else: + return Fval, gradient, budget + + def full_min_quadratic(self, div_a,Fa,Fb,a,b,problem): + ''' + return the minimum point which is the + next step size usig the quadratic + interpolation with the information q(a), + q(b), q'(a) and q'(b) where a < b + ''' + #print("div: ",div_a) + #print("Fa,Fb: ", (Fa,Fb)) + #print("(a,b): ", (a,b)) + #numerator = (a**2 - b**2)*div_a - 2*a*(Fa - Fb) + #denominator = 2*((a-b)*div_a - (Fa - Fb)) + #result = numerator/denominator + A = div_a/(a - b) - (Fa - Fb)/((a-b)**2) + B = div_a - 2*A*a + result = -B/(2*A) + + if(-problem.minmax[0] == np.sign(A)): + #if A and problem have the same sign, i.e. min and A > 0 + if(result < a): + return a + elif(result > b): + return b + else: + return result + else: + if(problem.minmax[0] > 0): + #maximization but A > 0 + return [a,b][np.argmax([Fa,Fb])] + + else: + #minization but A < 0 + return [a,b][np.argmin([Fa,Fb])] + + def quadratic_interpolate(self,x1,x2,div_x1,div_x2,Fx1,Fx2,problem): + ''' + interpolate the quadratic polynomial using given points + and return the lowest (arg)point + ''' + + if(x2 > x1): + #we use div_x1,x1,x2 + #return min_quadratic(div_x1,Fx1,Fx2,x2) + return self.full_min_quadratic(div_x1,Fx1,Fx2,x1,x2,problem) + else: + #we use div_x2,x2,x1 + #return min_quadratic(div_x2,Fx2,Fx1,x1) + return self.full_min_quadratic(div_x2,Fx2,Fx1,x2,x1,problem) + + def backtrackLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 + + cur_sol: starting point + d: direction + grad: gradient at the point cur_sol + max_step: literally max step + ratio: decay ratio if fails + """ + + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + #print("cur_x: ", cur_x) + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("FW-BT Line Search...") + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_sol.x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + while True: + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + new_x = cur_x + step_size*d + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + #print("newF: ",newF) + + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + step_size = step_size*ratio + cur_iter += 1 + #print("---------------") + #print("step from backtrack: ",step_size) + return step_size, added_budget + + def interpolateLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + """ + #print("Interpolation LS") + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + sign = -problem.minmax[0] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("max_step: ", max_step) + if(max_step == 0): + return max_step, added_budget + + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #line = -1*problem.minmax[0]*curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + + while True: + #while(not suff dec and cur iter) + #while((newF >= curF + self.factors['theta'] * step_size * np.dot(grad, d)) and (cur_iter < max_iter)): + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + #print("cur step size: ", step_size) + new_x = cur_x + step_size*d + #print("LS new x: ",new_x) + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + #sufficient decrease + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + #quadratic interpolation using phi(0), phi'(0), phi(step) + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size,problem) + #print("grad . d: ", grad.dot(d)) + #print("opt new step: ", new_step_size) + if(abs(new_step_size) >= 1e-4): + #if we can make some progress + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + step_size = min(new_step_size,max_step) + #elif(new_step_size == 0): + # step_size = 0 + # break; + else: + #if we did not make progress, use the informaiton {step*ratio} + temp_x = cur_x + (step_size*ratio)*d + temp_sol = self.create_new_solution(tuple(temp_x), problem) + problem.simulate(temp_sol, r) + added_budget += r + newF = -1 * problem.minmax[0] * temp_sol.objectives_mean + #print("another newF: ", newF) + #new_step_size = ((-grad.dot(d))*((step_size*ratio)**2))/(2*(newF-curF-(step_size*ratio)*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size*ratio,problem) + #check if it's in the interval + if(new_step_size <= 0): #outside interval (too small) + step_size = 0 + break; + elif(new_step_size > step_size*ratio): #outside interval (too big) + step_size = step_size*ratio + else: + step_size = new_step_size + + #print("new step: ", step_size) + cur_iter += 1 + #print("iteration: ", cur_iter) + #print("=============") + #print("Inter step: ",step_size) + #print("-------end of LS--------") + return step_size, added_budget + + def zoomLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + + NOTE: in this method, we increase the step size + """ + if(max_step == 0): + return 0,0 + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + sign = -problem.minmax[0] + + cur_iter = 0 + #step_size = max_step + cur_step_size = max_step*self.factors["zoom_init_ratio"] + last_step_size = 0 + last_grad = grad + added_budget = 0 + + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_sol.x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + lastF = curF + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + returned_steps = [0] + returned_vals = [curF] + #line = -curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + + while True: + #while(not suff dec and cur iter) + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + #print("cur_grad: ", grad.dot(d)) + #print("next_grad: ", next_grad.dot(d)) + #sufficient decrease doesn't satisfy, zoom into an interval + if((nextF >= curF + self.factors['theta'] * cur_step_size * np.dot(grad, d))): + #zoom into the interval {last_step,cur_step} + #step_lo, step_hi, Flo, Fhi, div_lo, div_hi + #print("zooming, NO SF") + return self.zoomSearch(last_step_size,cur_step_size,lastF,nextF, + last_grad.dot(d),next_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + #last_grad = next_grad + #get the gradient of the new solution grad f(x + step*d) for curvature condition + #next_grad, B = self.get_gradient(problem,next_x,new_sol) + #added_budget += B + + #check curvature, if satisfies then return + if((abs(next_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + #print("Satisfied - upper") + step_size = cur_step_size + break; + if((next_grad.dot(d)) >= 0): + #zoom + #print("zooming, sign") + return self.zoomSearch(cur_step_size,last_step_size,nextF,lastF, + next_grad.dot(d),last_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + returned_steps.append(cur_step_size) + returned_vals.append(nextF) + #print("new step: ", cur_step_size) + #print("sign*Fval: ",nextF) + + last_step_size = cur_step_size + cur_step_size = min(max_step,cur_step_size*self.factors["zoom_inc_ratio"]) + + if(cur_step_size >= max_step): + break; + # return max_step, added_budget + + lastF = nextF + last_grad = next_grad + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + cur_iter += 1 + #print("new step: ", cur_step_size) + #print("---------------") + if(cur_iter == self.factors["line_search_max_iters"] or (cur_step_size >= max_step)): + #if use all iterations, let's return the step which optimizes the sufficient decrease + return returned_steps[np.argmin(returned_vals)] ,added_budget + #return max_step*self.factors["zoom_init_ratio"], added_budget + else: + return cur_step_size, added_budget + + def zoomSearch(self,step_lo, step_hi, Flo, Fhi, div_lo, div_hi, problem,cur_x,curF, grad,d,added_budget,cur_iter): + """ + carry out the zoom search into the interval {} + *two of these are not ordered* + """ + max_iter = self.factors["line_search_max_iters"] + r = self.factors["r"] + sign = -1*problem.minmax[0] + + while(True): + if(cur_iter >= max_iter): + break; + + m1 = min([step_lo,step_hi]) + m2 = max([step_lo,step_hi]) + #print("zooming:: (",str(m1) + "," + str(m2) + ")") + #print("zooming:: (",str(step_lo) + "," + str(step_hi) + ")") + #use the actual value without the sign + new_step = self.quadratic_interpolate(step_lo,step_hi,sign*div_lo,sign*div_hi,sign*Flo,sign*Fhi,problem) + if(step_lo < step_hi): + left_dif = sign*div_lo;right_dif = sign*div_hi + left_val = sign*Flo;right_val = sign*Fhi + else: + left_dif = sign*div_hi;right_dif = sign*div_lo + left_val = sign*Fhi;right_val = sign*Flo + + #print("left div: ", left_dif) + #print("right div: ", right_dif) + #print("left val: ", left_val) + #print("right val: ", right_val) + + #print("new step: ", new_step) + + #xrange = np.arange(0,1,0.02) + #xrange = np.arange(m1,m2,(m2-m1)/20) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + if(abs(new_step - step_lo) < 1e-4 or abs(new_step - step_hi) < 1e-4): + return new_step, added_budget + + #new_grad = grad_f(cur_x + new_step*d).dot(d) + #newF = F(cur_x + new_step*d) + newF, new_grad, budget_spent = self.get_simulated_values(problem,cur_x + new_step*d,value = 'both') + added_budget += budget_spent + + #is_suff_decrese(nextF, curF, theta, cur_grad, cur_step_size, d) + #if(not is_suff_decrese(newF, curF, theta, grad.dot(d), new_step)): + if((newF >= curF + self.factors['theta'] * new_step * np.dot(grad, d))): + step_hi = new_step + #Fhi = F(cur_x + step_hi*d) + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + else: + #if(is_strong_curvature(new_grad, grad.dot(d), rho)): + #if(is_curvature(new_grad, grad.dot(d), rho)): + if((abs(new_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + return new_step, added_budget + if((new_grad.dot(d))*(step_hi - step_lo) >= 0): + step_hi = step_lo + + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + added_budget += budget_spent + + step_lo = new_step + #Flo = F(cur_x + step_lo*d) + #Fhi = F(cur_x + step_hi*d) + + Flo, div_lo, budget_spent = self.get_simulated_values(problem,cur_x + step_lo*d,value = 'both') + div_lo = div_lo.dot(d) + added_budget += budget_spent + #Fhi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'val') + #added_budget += budget_spent + + cur_iter += 1 + + return new_step, added_budget + + def find_feasible_initial(self, problem, Ce, Ci, de, di,lower, upper, tol = 1e-8): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + # Define decision variables. + x = cp.Variable(problem.dim) + + # Define constraints. + constraints = [] + + if(lower is not None): + constraints += [x >= lower] + if(upper is not None): + constraints += [x <= upper] + if (Ce is not None) and (de is not None): + constraints += [Ce @ x == de] + if (Ci is not None) and (di is not None): + constraints += [Ci @ x <= di] + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve() + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + if not self.is_feasible(x0, problem, tol): + raise ValueError("Could not find feasible x0") + + return x0 + + def get_atom_vectors(self,Ci,di): + """ + get vertices of a polytope defined by the + constraints Ci <= di + """ + a,b = Ci.shape + mat = np.concatenate((di.reshape(a,1),-Ci),axis = 1) + mat = cdd.Matrix(mat,linear=False,number_type='float') + + P = cdd.Polyhedron(mat) + poly = cdd.Polyhedron(mat) + ext = poly.get_generators() + + return np.array(ext)[:,1:] + + def get_random_vertex(self,Ci,di,lower,upper): + + num_var = Ci.shape[1] + x = cp.Variable(num_var) + #objective = cp.Minimize(cp.norm(Ci@x - di,1)) + objective = cp.Maximize(cp.sum(x)) + constraints = [Ci@x <= di] + if(lower is not None): + constraints += [x >= lower] + if(upper is not None): + constraints += [x <= upper] + + problem = cp.Problem(objective, constraints) + #problem.solve(solver=cp.GUROBI,env=env) + problem.solve(solver=cp.SCIPY) + return x.value + + def get_alpha_vec(self,x0,atom_vectors): + """ + get the coefficients of convex combination of the x0 + """ + + m,n = atom_vectors.shape + x = cp.Variable(m) + + objective = cp.Minimize(cp.norm(atom_vectors.T @ x - x0) + cp.norm(x,1)) + constraints = [x >= 0, + x <= 1] + + prob = cp.Problem(objective, constraints) + prob.solve() + + return x.value + + def solve(self, problem): + + max_iters = self.factors['max_iters'] + ls_type = self.factors['LSmethod'] + #self.factors['problem'] = problem + #print(ls_type) + if(ls_type == 'backtracking'): + self.factors["LSfn"] = self.backtrackLineSearch + elif(ls_type == 'interpolation'): + self.factors["LSfn"] = self.interpolateLineSearch + else: + self.factors["LSfn"] = self.zoomLineSearch + + + #print("Solved by Frank Wolfe - " + self.factors["algorithm"]) + + if(self.factors["algorithm"] == "normal"): + return self.normal_FrankWolfe(problem) + elif(self.factors["algorithm"] == "away"): + return self.away_FrankWolfe(problem) + elif(self.factors["algorithm"] == "normal_unbd"): + return self.normal_FrankWolfe_unbd(problem) + else: + return self.pairwise_FrankWolfe(problem) + + def normal_FrankWolfe(self, problem): + #print("Starting Frank Wolfe") + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + dim = problem.dim + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + r = self.factors["r"] + ratio = self.factors["ratio"] + LSmax_iter = self.factors["line_search_max_iters"] + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + #getting the gradient + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + # grad, budget_spent = self.finite_diff(new_solution, problem, r) + # expended_budget += budget_spent + # Update r after each iteration. + # r = int(self.factors["lambda"] * r) + + v = self.get_dir(grad,Ce, Ci, de, di,lower,upper) + #direction = (v-cur_x)/np.linalg.norm(v-cur_x) + direction = (v-cur_x) + #print("grad: ", grad) + #print("dir: ", v) + #max_gamma = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #max_gamma = max_gamma*self.factors["max_gamma"] + max_gamma = 1 + + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,self.factors["max_gamma"],problem,expended_budget) + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = min(self.factors["step_f"](k),self.factors["max_gamma"]) + gamma = min(self.factors["step_f"](k),max_gamma) + + #k = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #print("current max gamma: ", k) + #print("gamma: ", gamma) + #print("direction: ",direction) + #print("grad*direction: ", np.linalg.norm(grad.dot(direction))) + #new_x = (1 - gamma)*np.array(cur_x) + gamma*v + new_x = np.array(cur_x) + gamma*direction + #print("new x: ",new_x) + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("-----------------------------") + return recommended_solns, intermediate_budgets + + def normal_FrankWolfe_unbd(self, problem): + #print("Starting Frank Wolfe") + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + dim = problem.dim + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + r = self.factors["r"] + ratio = self.factors["ratio"] + LSmax_iter = self.factors["line_search_max_iters"] + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + #getting the gradient + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + + v, is_bounded = self.get_dir_unbd(grad,Ce, Ci, de, di,lower,upper) + max_gamma = 1 + + if(is_bounded):#go to a vertex + direction = v - cur_x + else:#go to the open space + direction = v + #print("dir: ", direction) + + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,self.factors["max_gamma"],problem,expended_budget) + gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = min(self.factors["step_f"](k),self.factors["max_gamma"]) + gamma = min(self.factors["step_f"](k),max_gamma) + + #print("gamma: ", gamma) + new_x = np.array(cur_x) + gamma*direction + #print("new x: ",new_x) + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("-----------------------------") + return recommended_solns, intermediate_budgets + + def away_FrankWolfe(self, problem): + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + last_step = [] + last_gamma = [] + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + scale_factor = self.factors["ratio"] + LSmax_iter = self.factors["line_search_max_iters"] + r = self.factors["r"] + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + #new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + new_x = self.get_random_vertex(Ci,di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + + #new_x = self.get_random_vertex(Ci,di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + atom_vectors = np.array([new_x]) + problem.simulate(new_solution, r) + + #initializing active set and all alpha coefficients, contains only one vector here + #active_vectors = {0:[]} + active_vectors = [np.array(new_x)] + alphas = {tuple(new_x):1} + #store the "active" infinite search direction + active_dirs = [] + betas = {} + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + # grad, budget_spent = self.finite_diff(new_solution, problem, r) + # expended_budget += budget_spent + # Update r after each iteration. + # r = int(self.factors["lambda"] * r) + + #print("grad: ", grad) + #print("active set: ") + #the list dot product values [grad_f.a for a in atom] + #s = self.get_dir(grad,Ce, Ci, de, di,lower, upper) + s, is_bounded = self.get_dir_unbd(grad,Ce, Ci, de, di,lower, upper) + + #list of dot product of [grad_f.v for v in active set] + #gv = np.array([grad.dot(a) for a in active_vectors[k]]) + if(len(active_vectors) > 0): + gv = np.array([grad.dot(a) for a in active_vectors]) + #v = active_vectors[k][np.argmax(gv)] + v = active_vectors[np.argmax(gv)] + d_away = cur_x - v + else: + d_away = np.zeros(problem.dim) + v = None + + #compute the directions of normal Frank-Wolfe + if(is_bounded): + d_FW = s - cur_x + else:#go to the open space + d_FW = s + s = d_FW + + #d_FW = d_FW/np.linalg.norm(d_FW) + #d_FW = d_away/np.linalg.norm(d_away) + #print("dFW: ",d_FW) + #print("d_away: ",d_away) + #print("forward step direction: ",s) + #print("is bounded: ",is_bounded) + #print("away step direction: ", v) + #print("current point: ",cur_x) + #print("v: ",v) + + #there is no way to move further since we finished early + if((d_FW == 0).all() and (d_away == 0).all()): + direction = d_FW #by default since it has no effect anyway + gamma = 0 + + elif((-grad.dot(d_FW) >= -grad.dot(d_away)) or (d_away == 0).all() or (not is_bounded)): + #FW step + #print("foward") + #ind.append('FW') + direction = d_FW + #print("dir: ", direction) + #max_gamma = 1 + #max_gamma = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #max_gamma = max_gamma*self.factors["max_gamma"] + #print("gamma: ", gamma) + #if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + # gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + # expended_budget += added_budget + #else: + # gamma = min(self.factors["step_f"](k),max_gamma) + + if(is_bounded): + #print("bounded") + max_gamma = 1 + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + gamma = min(self.factors["step_f"](k),max_gamma) + #update the active set + if(gamma < 1): + add = 0 + #check whether we have added this vertex s before + for vec in active_vectors: + if((vec != s).any()): + add = 1 #if could not find it, we must add it + else: + add = 0 + break; + if(add): #adding the new vertex s + active_vectors.append(s) + alphas[tuple(s)] = 0 + else: + #go the vertex s + active_vectors = [s] + alphas = {tuple(s):0} + + #print("active set change in forward: ", active_vectors) + #for atom in active_vectors[k]: + for atom in active_vectors: + if((atom == s).all()): + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + gamma + else: + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + + for dirs in active_dirs: + betas[tuple(dirs)] = (1-gamma)*betas[tuple(dirs)] + last_step.append("bounded") + else: + #print("unbounded") + #if we have consecutive extreme search + if(k > 0 and last_step[-1] == 'unbounded'): + gamma = last_gamma[-1]/self.factors["ratio"] + else: + max_gamma = 1 + #gamma = 1 + #print("max step: ",max_gamma) + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + gamma = min(self.factors["step_f"](k),max_gamma) + #searching in the infinite search direction + #add a new inf direction if not found yet + if(len(active_dirs) == 0): + #It's the first time we add the search direction + active_dirs.append(s) + betas[tuple(s)] = gamma + else: #we added some extreme direction before + diffs = np.array([sum(abs(vec - s)) for vec in active_dirs]) + if((diffs > 1e-6).all()):#s is a new inf direction + active_dirs.append(s) + betas[tuple(s)] = gamma + else: + betas[tuple(s)] += gamma + last_step.append("unbounded") + + else: + #away step + #print("away") + #ind.append('away') + direction = d_away #xt - v + #print("dir: ", direction) + #direction = d_away/np.linalg.norm(d_away) + #gamma = gamma_f(k) + #max_gamma = alphas[tuple(v)]/(1 - alphas[tuple(v)]) + gamma_star = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #print("gamma_star: ", gamma_star) + #gamma_star = gamma_star*self.factors["max_gamma"] + #print("the alpha in ratio: ",alphas[tuple(v)]) + #direction = direction*gamma_star #d' = gamma_star*d + #max_dist = 1 + #max_dist = min(1,alphas[tuple(v)]/(gamma_star*(1 - alphas[tuple(v)]))) + max_dist = min(gamma_star,alphas[tuple(v)]/((1 - alphas[tuple(v)]))) + #max_gamma = alphas[v]/(1 - alphas[v]) + #print("max_dist: ", max_dist) + #active_vectors[k+1] = active_vectors[k] + + if(self.factors["backtrack"]): + #gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_dist,problem,expended_budget) + gamma, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_dist,problem,expended_budget) + expended_budget += added_budget + else: + gamma = min(self.factors["step_f"](k),self.factors["max_gamma"]) + + #if gamma_max, then update St \ {vt} + if(gamma == 1 or gamma <= scale_factor**LSmax_iter): + #print("dropping: ", v) + #active_vectors[k+1] = [] + #for vec in active_vectors[k]: + new_active = [] + for vec in active_vectors: + if((vec != v).any()): + #if((np.linalg.norm(vec - v)) > 1e-4): + #active_vectors[k+1].append(vec) + new_active.append(vec) + active_vectors = new_active + removed_atom = alphas.pop(tuple(v)) + + for atom in active_vectors: + if((atom == v).all()): + #alphas[tuple(atom)] = (1+gamma)*alphas[tuple(atom)] - gamma + alphas[tuple(atom)] = (1+gamma*gamma_star)*alphas[tuple(atom)] - gamma*gamma_star + else: + alphas[tuple(atom)] = (1+gamma*gamma_star)*alphas[tuple(atom)] + last_step.append("away") + #print("alphas: ", alphas) + #print("Displaying Alphas:") + #for key,val in alphas.items(): + # print(key) + # print(val) + # print('**************') + + + #print("max_gamma: ", max_gamma) + #print("gamma: ", gamma) + #print("dir: ", tuple(direction)) + last_gamma.append(gamma) + new_x = cur_x + gamma*direction + #print("new_x: ",tuple(new_x)) + + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + #print("obj: ",candidate_solution.objectives_mean) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + #print("obj: ",candidate_solution.objectives_mean) + + k += 1 + #print("--------------") + return recommended_solns, intermediate_budgets + + def pairwise_FrankWolfe(self, problem): + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + r = self.factors["r"] + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + # Start with the initial solution. + #new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + #new_x = new_solution.x + + #if(not self.is_feasible(new_x, problem)): + # new_x = self.find_feasible_initial(problem, Ce, Ci, de, di) + # new_solution = self.create_new_solution(tuple(new_x), problem) + + # Start with the initial solution. + #new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + #new_x = new_solution.x + + #if(not self.is_feasible(new_x, Ci,di,Ce,de,lower,upper)): + #new_x = self.find_feasible_initial(problem, Ce, Ci, de, di,lower,upper) + # new_x = self.get_random_vertex(Ci,di,lower,upper) + # new_solution = self.create_new_solution(tuple(new_x), problem) + + new_x = self.get_random_vertex(Ci,di,lower,upper) + new_solution = self.create_new_solution(tuple(new_x), problem) + #initiailizing a dictionary of atom vectors and their coefficients + #atom_vectors = self.factors["atom_vectors"] + #if(self.factors["atom_vectors"] is None): + # atom_vectors = self.get_atom_vectors(Ci,di) + # num_atoms = atom_vectors.shape[0] + # alpha_vec = np.zeros(num_atoms) + # alpha_vec[0] = 1 + + # new_x = atom_vectors[0] + # new_solution = self.create_new_solution(tuple(new_x), problem) + #else: + # atom_vectors = self.factors["atom_vectors"] + # num_atoms = atom_vectors.shape[0] + # alpha_vec = self.get_alpha_vec(new_x,atom_vectors) + + #initiailizing a dictionary of atom vectors and their coefficients + #atom_vectors = self.factors["atom_vectors"] + #atom_vectors = self.get_atom_vectors(Ci,di) + #num_atoms = atom_vectors.shape[0] + #active_vectors = {0:[]} + #alphas = {tuple(v):0 for v in atom_vectors} + + atom_vectors = np.array([new_x]) + active_vectors = [np.array(new_x)] + #alphas = {tuple(v):0 for v in atom_vectors} + alphas = {tuple(new_x):1} + + #new_x = atom_vectors[0] + #new_solution = self.create_new_solution(tuple(new_x), problem) + + #for i in range(num_atoms): + # alphas[tuple(atom_vectors[i])] = alpha_vec[i] + # if(alpha_vec[i] > 0): + # active_vectors[0].append(atom_vectors[i]) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + k = 0 + + while expended_budget < problem.factors["budget"]: + cur_x = new_solution.x + + #print("cur_x: ", cur_x) + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + # grad, budget_spent = self.finite_diff(new_solution, problem, r) + # expended_budget += budget_spent + # Update r after each iteration. + # r = int(self.factors["lambda"] * r) + + s = self.get_dir(grad,Ce, Ci, de, di,lower, upper) + + #compute the directions + if(len(active_vectors) > 0): + gv = np.array([grad.dot(a) for a in active_vectors]) + #v = active_vectors[k][np.argmax(gv)] + v = active_vectors[np.argmax(gv)] + d_pw = s-v + else: + d_pw = np.zeros(problem.dim) + #direction = s - v + d_FW = s - cur_x + + #print("s-v: ", s-v) + #print("foward direction: ", s) + #print("pairwise direction: ", s-v) + #print("grad :", grad) + #print("current point: ",cur_x) + #print("dir: ", direction) + #print("v: ", v) + #max_gamma = min(alphas[tuple(v)]*np.linalg.norm(s-v),self.factors["max_gamma"]) + #max_gamma = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #away vector v = 0 + if((-grad.dot(d_FW) >= -grad.dot(d_pw)) or (d_pw == 0).all()): + #print('Forward') + direction = d_FW + #print("direcition: ", direction) + max_gamma = 1 + + if(self.factors["backtrack"]): + #gamma = LineSearch(F=F,x=cur_x,d=d_away,max_step=max_gamma/2) + gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = self.factors["step_f"](k) + gamma = min(self.factors["step_f"](k),max_gamma) + + #update the active set + if(gamma < 1): + for vec in active_vectors: + if((s != vec).any()): + add = 1 + else: + add = 0 + break; + if(add): + #active_vectors[k+1].append(s) + active_vectors.append(s) + alphas[tuple(s)] = 0 + else: + active_vectors = [s] + alphas = {tuple(s):0} + + #updating weights/coefficients + for atom in active_vectors: + if((atom == s).all()): + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + gamma + else: + alphas[tuple(atom)] = (1-gamma)*alphas[tuple(atom)] + + else: + #print("pairwise") + direction = d_pw + #print("direcition: ", direction) + max_gamma = alphas[tuple(v)] + #print("max_step: ", max_gamma) + if(self.factors["backtrack"]): + #gamma = LineSearch(F=F,x=cur_x,d=d_away,max_step=max_gamma/2) + gamma, added_budget = self.LineSearch(new_solution,grad,direction,max_gamma,problem,expended_budget) + expended_budget += added_budget + else: + #gamma = self.factors["step_f"](k) + gamma = min(self.factors["step_f"](k),max_gamma) + #print("active set in pairwise: ", active_vectors) + #found a new vertex not in the past vertices + for vec in active_vectors: + #different/a new vertex + if((s != vec).any()): + #if(sum(abs(s-vec)/(problem.dim*(vec+1e-10))) > 1e-2): + add = 1 + else: + add = 0 + break; + if(add): + active_vectors.append(s) + alphas[tuple(s)] = 0 + #print("active set in pairwise: ", active_vectors) + alphas[tuple(s)] = alphas[tuple(s)] + gamma + alphas[tuple(v)] = alphas[tuple(v)] - gamma + + #print("Displaying Alphas:") + #for key,val in alphas.items(): + # print(key) + # print(val) + # print('**************') + + + #print("max_gamma: ", max_gamma) + #print("gamma: ", gamma) + new_x = cur_x + gamma*direction + #print("next x: ",new_x) + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("------------------") + #print("------------------") + + return recommended_solns, intermediate_budgets + \ No newline at end of file diff --git a/simopt/solvers/Boom_ProxGD.py b/simopt/solvers/Boom_ProxGD.py new file mode 100644 index 000000000..2eda3a7e6 --- /dev/null +++ b/simopt/solvers/Boom_ProxGD.py @@ -0,0 +1,904 @@ +#https://github.com/bodono/apgpy +import numpy as np +import cvxpy as cp +import matplotlib.pyplot as plt +#from apgwrapper import NumpyWrapper +#from functools import partial + +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver + +class BoomProxGD(Solver): + """ + + """ + def __init__(self, name="Boom-PGD", fixed_factors={"max_iters": 300, "backtrack": 1, "curve_const": 0.3, "LSmethod": 'zoom', "algorithm": 'away'}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "LSmethod": { + "description": "method", + "datatype": 'zoom', + "default": 'zoom' + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 30 + }, + "h": { + "description": "difference in finite difference gradient", + "datatype": float, + "default": 0.1 + }, + "step_f": { + "description": "step size function", + "datatype": "function", + "default": self.default_step_f + }, + "max_step_size": { + "description": "maximum possible step size", + "datatype": float, + "default": 10 + }, + "max_iters": { + "description": "maximum iterations", + "datatype": int, + "default": 300 + }, + "theta": { + "description": "constant in the line search condition", + "datatype": int, + "default": 0.2 + }, + "line_search_max_iters": { + "description": "maximum iterations for line search", + "datatype": int, + "default": 20 + }, + "ratio": { + "description": "decay ratio in line search", + "datatype": float, + "default": 0.8 + }, + "curve_const": { + "description": "constant in curvature wolfe conditions, usually greater than theta", + "datatype": float, + "default": 0.3 + }, + "zoom_init_ratio": { + "description": "ratio of the max step size in Zoom lien search", + "datatype": float, + "default": 0.2 + }, + "zoom_inc_ratio": { + "description": "increment ratio in Zoom lien search", + "datatype": float, + "default": 1.1 + }, + "max_gamma":{ + "description": "max distance possible", + "datatype": float, + "default": 10 + }, + "backtrack":{ + "description": "an indicator whether we do the backtrack", + "datatype": bool, + "default": 0 + }, + "proj_thres":{ + "description": "proportion of the max iters to stop if have too many projections", + "datatype": float, + "default": 0.1 + }, + "algorithm":{ + "description": "type of FW algorithm", + "datatype": str, + "default": "normal" #away, pairwise + } + } + + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "max_iters": self.check_max_iters, + "proj_thres":self.check_proj_thres + } + super().__init__(fixed_factors) + + def default_step_f(self,k): + """ + take in the current iteration k + """ + + return 1/(k+1) + + def check_r(self): + + return self.factors['r'] > 0 + + def check_max_iters(self): + + return self.factors['max_iters'] > 0 + + def check_proj_thres(self): + + return self.factors["proj_thres"] > 0 and self.factors["proj_thres"] < 1 + + def proj(self,z,Ci,di,Ce,de,lower,upper): + ''' + project a point z onto the constraint + Ax <= b depending on the constraint type + ''' + n = len(z) + u = cp.Variable(n) + + objective = cp.Minimize(cp.square(cp.norm(u-z))) + constraints = [] + + if((lower is not None) and (lower > -np.inf).all()): + constraints += [u >= lower] + if((upper is not None) and (upper < np.inf).all()): + constraints += [u <= upper] + + if (Ci is not None) and (di is not None): + constraints += [Ci@u <= di] + if (Ce is not None) and (de is not None): + constraints += [Ce@u == de] + + #constraints = [A@u <= b] + prob = cp.Problem(objective, constraints) + prob.solve()#solver=cp.ECOS + + return u.value + + def is_feasible(self, x, Ci,di,Ce,de,lower,upper): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + res = True + + if(lower is not None): + res = res & np.all(x >= lower) + if(upper is not None): + res = res & np.all(x <= upper) + + if (Ci is not None) and (di is not None): + res = res & np.all(Ci @ x <= di) + if (Ce is not None) and (de is not None): + res = res & (np.allclose(np.dot(Ce, x), de)) + return res + + def get_max_gamma_ratio_test(self, cur_x, d, Ce, Ci, de, di, lower, upper): + ''' + perform a ratio test to find the max step size + ''' + #step = cp.Variable() + #objective = cp.Maximize(step) + #constraints = [step >= 0] + #ratio test: (bi - ai^Tx)/(ai^Td) + ratio_val = [] + denom = [] + dim = len(cur_x) + + if(lower is not None): + #constraints += [(cur_x + step*d) >= lower] + #vals += [(lower[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((cur_x - lower)/-d) + denom += list(-d) + if(upper is not None): + #constraints += [(cur_x + step*d) <= upper] + #vals += [(upper[0] - cur_x[i])/(d[i]) for i in range(dim)] + ratio_val += list((upper - cur_x)/d) + denom += list(d) + if((Ci is not None) and (di is not None)): + #constraints += [Ci@(cur_x + step*d) <= di] + ratio_val += list((di - Ci@cur_x)/(Ci@d)) + denom += list(Ci@d) + + #print("ratio: ", ratio) + ratio_val = np.array(ratio_val) + denom = np.array(denom) + #print("denom: ", denom) + #print("ratio_val: ", ratio_val) + + #if(len(ratio_val[denom > 1e-6]) == 0): + + return min(ratio_val[denom > 1e-6]) + + def full_min_quadratic(self, div_a,Fa,Fb,a,b,problem): + ''' + return the minimum point which is the + next step size usig the quadratic + interpolation with the information q(a), + q(b), q'(a) and q'(b) where a < b + ''' + #print("div: ",div_a) + #print("Fa,Fb: ", (Fa,Fb)) + #print("(a,b): ", (a,b)) + #numerator = (a**2 - b**2)*div_a - 2*a*(Fa - Fb) + #denominator = 2*((a-b)*div_a - (Fa - Fb)) + #result = numerator/denominator + + #if(result < a): + # return a + #elif(result > b): + # return b + #else: + # return result + + #return numerator/denominator + A = div_a/(a - b) - (Fa - Fb)/((a-b)**2) + B = div_a - 2*A*a + result = -B/(2*A) + + if(-problem.minmax[0] == np.sign(A)): + #if A and problem have the same sign, i.e. min and A > 0 or max and A < 0 + if(result < a): + return a + elif(result > b): + return b + else: + return result + else: + if(problem.minmax[0] > 0): + #maximization but A > 0 + return [a,b][np.argmax([Fa,Fb])] + + else: + #minization but A < 0 + return [a,b][np.argmin([Fa,Fb])] + + def quadratic_interpolate(self,x1,x2,div_x1,div_x2,Fx1,Fx2,problem): + ''' + interpolate the quadratic polynomial using given points + and return the lowest (arg)point + ''' + + if(x2 > x1): + #we use div_x1,x1,x2 + #return min_quadratic(div_x1,Fx1,Fx2,x2) + return self.full_min_quadratic(div_x1,Fx1,Fx2,x1,x2,problem) + else: + #we use div_x2,x2,x1 + #return min_quadratic(div_x2,Fx2,Fx1,x1) + return self.full_min_quadratic(div_x2,Fx2,Fx1,x2,x1,problem) + + def get_simulated_values(self,problem,x,value = 'both'): + """ + getting either sample path or gradient. The return "value" + can be specified to "val"|"gradient"|"both" + """ + r = self.factors["r"] + sol = self.create_new_solution(tuple(x), problem) + problem.simulate(sol, r) + budget = 0 + + #getting the function evaluation + if((value == "both") or (value == "val")): + budget += r + Fval = -1 * problem.minmax[0] * sol.objectives_mean + + if((value == "both") or (value == "gradient")): + if problem.gradient_available: + # Use IPA gradient if available. + gradient = -1 * problem.minmax[0] * sol.objectives_gradients_mean[0] + else: + gradient, budget_spent = self.get_FD_grad(x, problem, self.factors["h"], self.factors["r"]) + budget += budget_spent + + if(value == "val"): + return Fval, budget + elif(value == "gradient"): + return gradient, budget + else: + return Fval, gradient, budget + + def LineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 + + cur_sol: starting point + d: direction + grad: gradient at the point cur_sol + max_step: literally max step + ratio: decay ratio if fails + """ + + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + + while True: + if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + break + + new_x = cur_x + step_size*d + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + #if(newF < curF): + break + step_size = step_size*ratio + cur_iter += 1 + #print("newF: ", newF) + #print("linear F: ", curF + self.factors['theta'] * step_size * np.dot(grad, d)) + #print("inner iter: ", cur_iter) + return step_size, added_budget + + def backtrackLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 + + cur_sol: starting point + d: direction + grad: gradient at the point cur_sol + max_step: literally max step + ratio: decay ratio if fails + """ + #print("backtrack LS") + #print("max step: ", max_step) + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + #print("cur_x: ", cur_x) + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("Line Search...") + while True: + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + new_x = cur_x + step_size*d + #print("next x: ",new_x) + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + #newF, budget_spent = self.get_simulated_values(problem,cur_x + step_size*d,value = 'val') + #added_budget += budget_spent + #new_grad = -1 * problem.minmax[0] * new_sol.objectives_gradients_mean[0] + #print("newF: ",newF) + + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + step_size = step_size*ratio + cur_iter += 1 + #print("---------------") + return step_size, added_budget + + def interpolateLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + """ + #print("Interpolation LS") + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + sign = -problem.minmax[0] + + cur_iter = 0 + step_size = max_step + added_budget = 0 + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + #print("max_step: ", max_step) + if(max_step == 0): + return max_step, added_budget + + while True: + #while(not suff dec and cur iter) + #while((newF >= curF + self.factors['theta'] * step_size * np.dot(grad, d)) and (cur_iter < max_iter)): + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + #print("cur step size: ", step_size) + new_x = cur_x + step_size*d + #print("LS new x: ",new_x) + new_sol = self.create_new_solution(tuple(new_x), problem) + problem.simulate(new_sol, r) + added_budget += r + + newF = -1 * problem.minmax[0] * new_sol.objectives_mean + + #xrange = np.arange(0,1,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + # fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #line = -1*problem.minmax[0]*curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + + #sufficient decrease + if(newF < curF + self.factors['theta'] * step_size * np.dot(grad, d)): + break; + + #quadratic interpolation using phi(0), phi'(0), phi(step) + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size,problem) + #print("grad . d: ", grad.dot(d)) + #print("opt new step: ", new_step_size) + if(abs(new_step_size) >= 1e-4): + #if we can make some progress + #new_step_size = ((-grad.dot(d))*(step_size**2))/(2*(newF-curF-step_size*(grad.dot(d)))) + step_size = min(new_step_size,max_step) + #elif(new_step_size == 0): + # step_size = 0 + # break; + else: + #if we did not make progress, use the informaiton {step/2} + temp_x = cur_x + (step_size*ratio)*d + temp_sol = self.create_new_solution(tuple(temp_x), problem) + problem.simulate(temp_sol, r) + added_budget += r + newF = -1 * problem.minmax[0] * temp_sol.objectives_mean + #print("another newF: ", newF) + #new_step_size = ((-grad.dot(d))*((step_size/2)**2))/(2*(newF-curF-(step_size/2)*(grad.dot(d)))) + #new_step_size = ((-grad.dot(d))*((step_size*ratio)**2))/(2*(newF-curF-(step_size*ratio)*(grad.dot(d)))) + new_step_size = self.full_min_quadratic(sign*grad.dot(d),sign*curF,sign*newF,0,step_size*ratio,problem) + #check if it's in the interval + if(new_step_size <= 0): #outside interval (too small) + step_size = 0 + break; + elif(new_step_size > step_size*ratio): #outside interval (too big) + step_size = step_size*ratio + else: + step_size = new_step_size + + #print("new step: ", step_size) + cur_iter += 1 + #print("=============") + #print("determined step: ", step_size) + return step_size, added_budget + + def zoomLineSearch(self,cur_sol,grad,d,max_step,problem,expended_budget): + """ + carry out interpolation line search on the function F where we + min F(x + alpha*d) s.t. alpha >=0 where phi(a) = F(x + ad) + + NOTE: in this method, we increase the step size + """ + #print("ZOOM LS") + #print("max step: ",max_step) + if(max_step == 0): + return 0,0 + r = self.factors["r"] + ratio = self.factors["ratio"] + max_iter = self.factors["line_search_max_iters"] + + cur_iter = 0 + #step_size = max_step + cur_step_size = max_step*self.factors["zoom_init_ratio"] + last_step_size = 0 + last_grad = grad + added_budget = 0 + + #xrange = np.arange(0,max_step,0.01) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_sol.x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + cur_x = cur_sol.x + curF = -1 * problem.minmax[0] * cur_sol.objectives_mean + lastF = curF + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + #line = -curF + self.factors["theta"]*xrange*(-1*problem.minmax[0]* grad.dot(d)) + #plt.scatter(xrange,line,color='red') + #plt.show() + returned_steps = [] + returned_vals = [] + + while True: + #while(not suff dec and cur iter) + #if(expended_budget + added_budget > problem.factors["budget"] or cur_iter >= max_iter): + if(cur_iter >= max_iter): + break; + + #print("cur_grad: ", grad.dot(d)) + #print("next_grad: ", next_grad.dot(d)) + #sufficient decrease doesn't satisfy, zoom into an interval + if((nextF >= curF + self.factors['theta'] * cur_step_size * np.dot(grad, d))): + #zoom into the interval {last_step,cur_step} + #step_lo, step_hi, Flo, Fhi, div_lo, div_hi + #print("zooming, NO SF") + return self.zoomSearch(last_step_size,cur_step_size,lastF,nextF, + last_grad.dot(d),next_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + #check curvature, if satisfies then return + if((abs(next_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + #print("Satisfied - upper") + step_size = cur_step_size + break; + if((next_grad.dot(d)) >= 0): + #zoom + #print("zooming, sign") + return self.zoomSearch(cur_step_size,last_step_size,nextF,lastF, + next_grad.dot(d),last_grad.dot(d),problem, + cur_x,curF,grad,d,added_budget,cur_iter) + + returned_steps.append(cur_step_size) + returned_vals.append(nextF) + #print("new step: ", cur_step_size) + #print("sign*Fval: ",nextF) + + if(cur_step_size >= max_step): + break; + + last_step_size = cur_step_size + cur_step_size = min(max_step,cur_step_size*self.factors["zoom_inc_ratio"]) + + lastF = nextF + last_grad = next_grad + nextF, next_grad, budget_spent = self.get_simulated_values(problem,cur_x + cur_step_size*d,value = 'both') + added_budget += budget_spent + + cur_iter += 1 + #print("iter: ",cur_iter) + #print("added budget: ",added_budget) + #print("---------------") + #print("max step: ",max_step) + + if((cur_iter == max_iter) or (cur_step_size >= max_step)): + #return max_step*self.factors["zoom_init_ratio"], added_budget + #print("return from iteration or max step") + return returned_steps[np.argmin(returned_vals)] ,added_budget + #return max_step, added_budget + else: + return cur_step_size, added_budget + + def zoomSearch(self,step_lo, step_hi, Flo, Fhi, div_lo, div_hi, problem,cur_x,curF, grad,d,added_budget,cur_iter): + """ + carry out the zoom search into the interval {} + *two of these are not ordered* + """ + max_iter = self.factors["line_search_max_iters"] + r = self.factors["r"] + sign = -1*problem.minmax[0] + + while(True): + if(cur_iter >= max_iter): + break; + + #m1 = min([step_lo,step_hi]) + #m2 = max([step_lo,step_hi]) + #print("zooming:: (",str(m1) + "," + str(m2) + ")") + #use the actual value without the sign + new_step = self.quadratic_interpolate(step_lo,step_hi,sign*div_lo,sign*div_hi,sign*Flo,sign*Fhi,problem) + #print("left div: ", left_dif) + #print("right div: ", right_dif) + #print("left val: ", left_val) + #print("right val: ", right_val) + + #print("new step: ", new_step) + + #xrange = np.arange(m1,m2,(m2-m1)/20) + #nn = len(xrange) + #fval = np.zeros(nn) + #for i in range(nn): + # temp_x = cur_x + xrange[i]*d + # temp_sol = self.create_new_solution(tuple(temp_x), problem) + # problem.simulate(temp_sol, r) + #fval[i] = -1 * problem.minmax[0] * temp_sol.objectives_mean + # fval[i] = temp_sol.objectives_mean + #plt.scatter(xrange,fval) + #plt.show() + + if(abs(new_step - step_lo) < 1e-4 or abs(new_step - step_hi) < 1e-4): + return new_step, added_budget + + #new_grad = grad_f(cur_x + new_step*d).dot(d) + #newF = F(cur_x + new_step*d) + newF, new_grad, budget_spent = self.get_simulated_values(problem,cur_x + new_step*d,value = 'both') + added_budget += budget_spent + + #is_suff_decrese(nextF, curF, theta, cur_grad, cur_step_size, d) + #if(not is_suff_decrese(newF, curF, theta, grad.dot(d), new_step)): + if((newF >= curF + self.factors['theta'] * new_step * np.dot(grad, d))): + step_hi = new_step + #Fhi = F(cur_x + step_hi*d) + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + else: + #if(is_strong_curvature(new_grad, grad.dot(d), rho)): + #if(is_curvature(new_grad, grad.dot(d), rho)): + if((abs(new_grad.dot(d)) <= self.factors['curve_const']*abs(grad.dot(d)))): + return new_step, added_budget + if((new_grad.dot(d))*(step_hi - step_lo) >= 0): + step_hi = step_lo + + Fhi, div_hi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'both') + div_hi = div_hi.dot(d) + added_budget += budget_spent + + step_lo = new_step + #Flo = F(cur_x + step_lo*d) + #Fhi = F(cur_x + step_hi*d) + + Flo, div_lo, budget_spent = self.get_simulated_values(problem,cur_x + step_lo*d,value = 'both') + div_lo = div_lo.dot(d) + added_budget += budget_spent + #Fhi, budget_spent = self.get_simulated_values(problem,cur_x + step_hi*d,value = 'val') + #added_budget += budget_spent + + cur_iter += 1 + + return new_step, added_budget + + def get_FD_grad(self, x, problem, h, r): + """ + find a finite difference gradient from the problem at + the point x + """ + x = np.array(x) + d = len(x) + + if(d == 1): + #xsol = self.create_new_solution(tuple(x), problem) + x1 = x + h/2 + x2 = x - h/2 + + x1 = self.create_new_solution(tuple(x1), problem) + problem.simulate(x1, r) + f1 = -1 * problem.minmax[0] * x1.objectives_mean + + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate(x2, r) + f2 = -1 * problem.minmax[0] * x2.objectives_mean + grad = (f1-f2)/h + else: + I = np.eye(d) + grad = 0 + + for i in range(d): + x1 = x + h*I[:,i]/2 + x2 = x - h*I[:,i]/2 + + x1 = self.create_new_solution(tuple(x1), problem) + problem.simulate_up_to([x1], r) + f1 = -1 * problem.minmax[0] * x1.objectives_mean + + x2 = self.create_new_solution(tuple(x2), problem) + problem.simulate_up_to([x2], r) + f2 = -1 * problem.minmax[0] * x2.objectives_mean + + grad += ((f1-f2)/h)*I[:,i] + + return grad, (2*d*r) + + def solve(self, problem): + + #print("Starting PGD") + + max_iters = self.factors['max_iters'] + proj_thres = self.factors['proj_thres'] + r = self.factors["r"] + max_gamma = self.factors["max_gamma"] + + ls_type = self.factors['LSmethod'] + #self.factors['problem'] = problem + #print(ls_type) + if(ls_type == 'backtracking'): + self.factors["LSfn"] = self.backtrackLineSearch + elif(ls_type == 'interpolation'): + self.factors["LSfn"] = self.interpolateLineSearch + else: + self.factors["LSfn"] = self.zoomLineSearch + + #t = 1 #first max step size + dim = problem.dim + + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + lower = np.array(problem.lower_bounds) + upper = np.array(problem.upper_bounds) + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + action = "normal" #storing whether we do the projection in each step + last_action = "normal" + + #store consecutive projections + consec_proj = 0 + k = 0 + max_step = 1 #initial max step + last_normal_maxstep = 1 + last_proj_maxstep = 1 + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + #cur_x = new_solution.x + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + while expended_budget < problem.factors["budget"] and consec_proj < proj_thres*max_iters: + cur_x = new_solution.x + + proj_trace = int(proj_thres*max_iters) + + #computeing the gradients + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + #grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize = alpha) + grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + #while np.all((grad == 0)): + # if expended_budget > problem.factors["budget"]: + # break + #grad, budget_spent = self.finite_diff(new_solution, problem, r) + #grad, budget_spent = self.get_FD_grad(self, x, problem, h, r) + # grad, budget_spent = self.get_FD_grad(cur_x, problem, self.factors["h"], self.factors["r"]) + # expended_budget += budget_spent + # Update r after each iteration. + #r = int(self.factors["lambda"] * r) + + #print("max_step: ",max_step) + direction = -grad/np.linalg.norm(grad) + temp_x = cur_x + max_step * direction + #print("cur x: ",cur_x) + #print("temp x: ",temp_x) + + #if the new iterate is feasible, then no need to project + if(not self.is_feasible(temp_x, Ci,di,Ce,de,lower,upper)): + action = "project" + proj_x = self.proj(temp_x,Ci,di,Ce,de,lower,upper) + #print("proj x: ",proj_x) + direction = proj_x - cur_x #change direction to the projected point + max_step = 1 + + #if(last_action == "project"): + #consecutive projection: should increase max proj step + # max_step = min(self.factors["max_step_size"],last_proj_maxstep/self.factors["ratio"]) + #else: + #last step is normal + #max step is to go to the boundary + # max_step = 1 + consec_proj += 1 + else: + action = "normal" + #decrease consecutive projection if we don't have the projection + if(consec_proj > 0): + #consec_proj -= 1 + consec_proj = 0 + + #max_step_feas = self.get_max_gamma_ratio_test(cur_x, direction, Ce, Ci, de, di, lower, upper) + #print("max step: ", max_step) + #step sizes + if(self.factors["backtrack"]): + #t, added_budget = self.LineSearch(new_solution,grad,direction,self.factors["max_gamma"],problem,expended_budget) + #t, added_budget = self.LineSearch(new_solution,grad,direction,t,problem,expended_budget) + t, added_budget = self.factors['LSfn'](new_solution,grad,direction,max_step,problem,expended_budget) + expended_budget += added_budget + else: + #t = min(self.factors["step_f"](k),self.factors["max_gamma"]) + t = self.factors["step_f"](k)#*direction#np.linalg.norm(grad) + + if(action == "normal"): + #store the last max step size of the normal iteratopnm + last_normal_maxstep = max_step + if(t == max_step): + #if we reach max step, then next iteration should move further + #max_step = min(max_gamma,max_step/self.factors["ratio"]) + max_step = min(self.factors["max_step_size"],max_step/self.factors["ratio"]) + #t = min(t,max_step_feas) + else: + max_step = max(1,max_step*self.factors["ratio"]) + else: + #we have the projection, next max step is the max step from + #the iteration before projection + #store the max step in the projection iteration + #last_proj_maxstep = max_step + if(t == max_step): + #print("full projection") + #max_step = min(self.factors["max_step_size"],last_proj_maxstep/self.factors["ratio"]) + last_proj_maxstep = min(self.factors["max_step_size"],last_proj_maxstep/self.factors["ratio"]) + max_step = last_proj_maxstep + else: + #use this for the next iteration, assume to be normal + last_proj_maxstep = max(1,last_proj_maxstep*self.factors["ratio"]) + max_step = last_normal_maxstep + #print("act: ", action) + #if(t == max_step): + # max_step = min(max_gamma,max_step/self.factors["ratio"]) + #print("grad: ", grad) + #print("step: ", t) + #t = self.factors['step_f'](k) + #new_x = cur_x - t * grad + new_x = cur_x + t * direction + last_action = action + #update the max step for the next iteration + #t = min(self.factors["max_step_size"],t/self.factors["ratio"]) + #print("new_x before proj: ", new_x) + + #print("new_x after proj: ", new_x) + #print("new x: ",new_x) + + candidate_solution = self.create_new_solution(tuple(new_x), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + new_solution = candidate_solution + recommended_solns.append(candidate_solution) + intermediate_budgets.append(expended_budget) + + #print("current budget: ",expended_budget) + #print("========================") + + k += 1 + #print("obj: ",candidate_solution.objectives_mean) + #print("------------------------------------") + return recommended_solns, intermediate_budgets + \ No newline at end of file diff --git a/simopt/solvers/active_set.py b/simopt/solvers/active_set.py new file mode 100644 index 000000000..f85a82e27 --- /dev/null +++ b/simopt/solvers/active_set.py @@ -0,0 +1,643 @@ +""" +Summary +------- +ACTIVESET: An active set algorithm for problems with linear constraints i.e., Ce@x = de, Ci@x <= di. +A detailed description of the solver can be found `here `_. +""" +import numpy as np +import cvxpy as cp +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver + + +class ACTIVESET(Solver): + """ + The Active Set solver. + + Attributes + ---------- + name : string + name of solver + objective_type : string + description of objective types: + "single" or "multi" + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + gradient_needed : bool + indicates if gradient of objective function is needed + factors : dict + changeable factors (i.e., parameters) of the solver + specifications : dict + details of each factor (for GUI, data validation, and defaults) + rng_list : list of rng.MRG32k3a objects + list of RNGs used for the solver's internal purposes + + Arguments + --------- + name : str + user-specified name for solver + fixed_factors : dict + fixed_factors of the solver + + See also + -------- + base.Solver + """ + def __init__(self, name="ACTIVESET", fixed_factors={}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 50 #30, 50 + }, + "alpha": { + "description": "tolerance for sufficient decrease condition.", + "datatype": float, + "default": 0.2 #0.2 + }, + "beta": { + "description": "step size reduction factor in line search.", + "datatype": float, + "default": 0.9 #0.9 + }, + "alpha_max": { + "description": "maximum step size.", + "datatype": float, + "default": 5 #10.0, 5 + }, + "lambda": { + "description": "magnifying factor for r inside the finite difference function", + "datatype": int, + "default": 2 #2 + }, + "tol": { + "description": "floating point tolerance for checking tightness of constraints", + "datatype": float, + "default": 1e-7 + }, + "tol2": { + "description": "floating point tolerance for checking closeness of dot product to zero", + "datatype": float, + "default": 1e-5 + }, + "finite_diff_step": { + "description": "step size for finite difference", + "datatype": float, + "default": 1e-5 + } + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "alpha": self.check_alpha, + "beta": self.check_beta, + "alpha_max": self.check_alpha_max, + "lambda": self.check_lambda, + "tol": self.check_tol, + "tol2": self.check_tol2, + "finite_diff_step": self.check_finite_diff_step + } + super().__init__(fixed_factors) + + def check_r(self): + return self.factors["r"] > 0 + + def check_alpha(self): + return self.factors["alpha"] > 0 + + def check_beta(self): + return self.factors["beta"] > 0 & self.factors["beta"] < 1 + + def check_alpha_max(self): + return self.factors["alpha_max"] > 0 + + def check_lambda(self): + return self.factors["lambda"] > 0 + + def check_tol(self): + return self.factors["tol"] > 0 + + def check_tol2(self): + return self.factors["tol2"] > 0 + + def check_finite_diff_step(self): + return self.factors["finite_diff_step"] > 0 + + def solve(self, problem): + """ + Run a single macroreplication of a solver on a problem. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + crn_across_solns : bool + indicates if CRN are used when simulating different solutions + + Returns + ------- + recommended_solns : list of Solution objects + list of solutions recommended throughout the budget + intermediate_budgets : list of ints + list of intermediate budgets when recommended solutions changes + """ + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + + # Default values. + r = self.factors["r"] + alpha = self.factors["alpha"] + beta = self.factors["beta"] + tol = self.factors["tol"] + tol2 = self.factors["tol2"] + max_step = self.factors["alpha_max"] # Maximum step size + + # Upper bound and lower bound. + lower_bound = np.array(problem.lower_bounds) + upper_bound = np.array(problem.upper_bounds) + + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, problem.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + # Number of equality constraints. + if (Ce is not None) and (de is not None): + neq = len(de) + else: + neq = 0 + + # Checker for whether the problem is unconstrained. + unconstr_flag = (Ce is None) & (Ci is None) & (di is None) & (de is None) & (all(np.isinf(lower_bound))) & (all(np.isinf(upper_bound))) + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + # If the initial solution is not feasible, generate one using phase one simplex. + if (not unconstr_flag) & (not self._feasible(new_x, problem, tol)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di, tol) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + # Active constraint index vector. + acidx = [] + if not unconstr_flag: + # Initialize the active set to be the set of indices of the tight constraints. + cx = np.dot(C, new_x) + for j in range(cx.shape[0]): + if j < neq or np.isclose(cx[j], d[j], rtol=0, atol= tol): + acidx.append(j) + + while expended_budget < problem.factors["budget"]: + new_x = new_solution.x + # # Check variable bounds. + # forward = np.isclose(new_x, lower_bound, atol = tol).astype(int) + # backward = np.isclose(new_x, upper_bound, atol = tol).astype(int) + # # BdsCheck: 1 stands for forward, -1 stands for backward, 0 means central diff. + # BdsCheck = np.subtract(forward, backward) + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + grad, budget_spent = self.finite_diff(new_solution, problem, r, C, d, stepsize = alpha) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + while np.all((grad == 0)): + if expended_budget > problem.factors["budget"]: + break + grad, budget_spent = self.finite_diff(new_solution, problem, r, C, d) + expended_budget += budget_spent + # Update r after each iteration. + r = int(self.factors["lambda"] * r) + + # If the active set is empty, search on negative gradient. + if len(acidx) == 0: + dir = -grad + else: + # Find the search direction and Lagrange multipliers of the direction-finding problem. + dir, lmbd, = self.compute_search_direction(acidx, grad, problem, C) + # If the optimal search direction is 0 + if (np.isclose(np.linalg.norm(dir), 0, rtol=0, atol=tol2)): + print('dir: ', dir) + # Terminate if Lagrange multipliers of the inequality constraints in the active set are all nonnegative. + if unconstr_flag or np.all(lmbd[neq:] >= 0): + print('break') + break + # Otherwise, drop the inequality constraint in the active set with the most negative Lagrange multiplier. + else: + # q = acidx[neq + np.argmin(lmbd[neq:][lmbd[neq:] < 0])] + q = acidx[neq + np.argmin(lmbd[neq:])] + print('q: ', q) + acidx.remove(q) + else: + if not unconstr_flag: + idx = list(set(np.arange(C.shape[0])) - set(acidx)) # Constraints that are not in the active set. + # If all constraints are feasible. + if unconstr_flag or np.all(C[idx,:] @ dir <= 0): + # Line search to determine a step_size. + print('line search 1') + new_solution, step_size, expended_budget, _ = self.line_search(problem, expended_budget, r, grad, new_solution, max_step, dir, alpha, beta) + print('budget: ', expended_budget) + # Update maximum step size for the next iteration. + max_step = step_size + + # Ratio test to determine the maximum step size possible + else: + # Get all indices not in the active set such that Ai^Td>0 + r_idx = list(set(idx).intersection(set((C @ dir > 0).nonzero()[0]))) + # Compute the ratio test + ra = d[r_idx,:].flatten() - C[r_idx, :] @ new_x + ra_d = C[r_idx, :] @ dir + # Initialize maximum step size. + s_star = np.inf + # Initialize blocking constraint index. + q = -1 + # Perform ratio test. + for i in range(len(ra)): + if ra_d[i] - tol > 0: + s = ra[i]/ra_d[i] + if s < s_star: + s_star = s + q = r_idx[i] + # If there is no blocking constraint (i.e., s_star >= 1) + if s_star >= 1: + # print('no blocking c') + # Line search to determine a step_size. + print('line search 2') + new_solution, step_size, expended_budget, _ = self.line_search(problem, expended_budget, r, grad, new_solution, s_star, dir, alpha, beta) + print('budget: ', expended_budget) + # If there is a blocking constraint (i.e., s_star < 1) + else: + # Add blocking constraint to the active set. + if q not in acidx: + acidx.append(q) + # No need to do line search if s_star is 0. + if s_star > 0: + # Line search to determine a step_size. + print('line search 3') + new_solution, step_size, expended_budget, count = self.line_search(problem, expended_budget, r, grad, new_solution, s_star, dir, alpha, beta) + print('budget: ', expended_budget) + # Append new solution. + if (problem.minmax[0] * new_solution.objectives_mean > problem.minmax[0] * best_solution.objectives_mean): + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + print(problem.minmax[0] * new_solution.objectives_mean) + return recommended_solns, intermediate_budgets + + + def compute_search_direction(self, acidx, grad, problem, C): + ''' + Compute a search direction by solving a direction-finding quadratic subproblem at solution x. + + Arguments + --------- + acidx: list + list of indices of active constraints + grad : ndarray + the estimated objective gradient at new_solution + problem : Problem object + simulation-optimization problem to solve + C : ndarray + constraint matrix + + Returns + ------- + d : ndarray + search direction + lmbd : ndarray + Lagrange multipliers for this LP + ''' + # Define variables. + d = cp.Variable(problem.dim) + + # Define constraints. + constraints = [C[acidx, :] @ d == 0] + + # Define objective. + obj = cp.Minimize(grad @ d + 0.5 * cp.quad_form(d, np.identity(problem.dim))) + prob = cp.Problem(obj, constraints) + prob.solve() + # Get Lagrange multipliers + lmbd = prob.constraints[0].dual_value + + dir = np.array(d.value) + dir[np.abs(dir) < self.factors["tol"]] = 0 + + return dir, lmbd + + + def finite_diff(self, new_solution, problem, r, C, d, stepsize = 1e-5, tol = 1e-7): + ''' + Finite difference for approximating objective gradient at new_solution. + + Arguments + --------- + new_solution : Solution object + a solution to the problem + problem : Problem object + simulation-optimization problem to solve + r : int + number of replications taken at each solution + C : ndarray + constraint matrix + d : ndarray + constraint vector + stepsize: float + step size for finite differences + + Returns + ------- + grad : ndarray + the estimated objective gradient at new_solution + budget_spent : int + budget spent in finite difference + ''' + + BdsCheck = np.zeros(problem.dim) + fn = -1 * problem.minmax[0] * new_solution.objectives_mean + new_x = new_solution.x + # Store values for each dimension. + FnPlusMinus = np.zeros((problem.dim, 3)) + grad = np.zeros(problem.dim) + + for i in range(problem.dim): + # Initialization. + x1 = list(new_x) + x2 = list(new_x) + # Forward stepsize. + steph1 = stepsize + # Backward stepsize. + steph2 = stepsize + + dir1 = np.zeros(problem.dim) + dir1[i] = 1 + dir2 = np.zeros(problem.dim) + dir2[i] = -1 + + ra = d.flatten() - C @ new_x + ra_d = C @ dir1 + # Initialize maximum step size. + temp_steph1 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph1: + temp_steph1 = s + steph1 = min(temp_steph1, steph1) + + ra_d = C @ dir2 + # Initialize maximum step size. + temp_steph2 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph2: + temp_steph2 = s + steph2 = min(temp_steph2, steph2) + + if (steph1 != 0) & (steph2 != 0): + BdsCheck[i] = 0 + elif steph1 == 0: + BdsCheck[i] = -1 + else: + BdsCheck[i] = 1 + + # Decide stepsize. + # Central diff. + if BdsCheck[i] == 0: + FnPlusMinus[i, 2] = min(steph1, steph2) + x1[i] = x1[i] + FnPlusMinus[i, 2] + x2[i] = x2[i] - FnPlusMinus[i, 2] + # Forward diff. + elif BdsCheck[i] == 1: + FnPlusMinus[i, 2] = steph1 + x1[i] = x1[i] + FnPlusMinus[i, 2] + # Backward diff. + else: + FnPlusMinus[i, 2] = steph2 + x2[i] = x2[i] - FnPlusMinus[i, 2] + + x1_solution = self.create_new_solution(tuple(x1), problem) + if BdsCheck[i] != -1: + problem.simulate_up_to([x1_solution], r) + fn1 = -1 * problem.minmax[0] * x1_solution.objectives_mean + # First column is f(x+h,y). + FnPlusMinus[i, 0] = fn1 + x2_solution = self.create_new_solution(tuple(x2), problem) + if BdsCheck[i] != 1: + problem.simulate_up_to([x2_solution], r) + fn2 = -1 * problem.minmax[0] * x2_solution.objectives_mean + # Second column is f(x-h,y). + FnPlusMinus[i, 1] = fn2 + # Calculate gradient. + if BdsCheck[i] == 0: + grad[i] = (fn1 - fn2) / (2 * FnPlusMinus[i, 2]) + elif BdsCheck[i] == 1: + grad[i] = (fn1 - fn) / FnPlusMinus[i, 2] + elif BdsCheck[i] == -1: + grad[i] = (fn - fn2) / FnPlusMinus[i, 2] + budget_spent = (2 * problem.dim - np.sum(BdsCheck != 0)) * r + return grad, budget_spent + + def line_search(self, problem, expended_budget, r, grad, cur_sol, alpha_0, d, alpha, beta): + """ + A backtracking line-search along [x, x + rd] assuming all solution on the line are feasible. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + expended_budget: int + current expended budget + r : int + number of replications taken at each solution + grad : ndarray + objective gradient of cur_sol + cur_sol : Solution object + current solution + alpha_0 : float + maximum step size allowed + d : ndarray + search direction + alpha: float + tolerance for sufficient decrease condition + beta: float + step size reduction factor + + Returns + ------- + x_new_solution : Solution + a solution obtained by line search + step_size : float + computed step size + expended_budget : int + updated expended budget + """ + x = cur_sol.x + fx = -1 * problem.minmax[0] * cur_sol.objectives_mean + step_size = alpha_0 + count = 0 + x_new_solution = cur_sol + while True: + if expended_budget > problem.factors["budget"]: + break + x_new = x + step_size * d + # Create a solution object for x_new. + x_new_solution = self.create_new_solution(tuple(x_new), problem) + # Use r simulated observations to estimate the objective value. + problem.simulate(x_new_solution, r) + expended_budget += r + # Check the sufficient decrease condition. + f_new = -1 * problem.minmax[0] * x_new_solution.objectives_mean + if f_new < fx + alpha * step_size * np.dot(grad, d): + break + step_size *= beta + count += 1 + if count >= 50: + break + + return x_new_solution, step_size, expended_budget, count + + def find_feasible_initial(self, problem, Ae, Ai, be, bi, tol): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(problem.upper_bounds) + lower_bound = np.array(problem.lower_bounds) + + # Define decision variables. + x = cp.Variable(problem.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound)) + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + if not self._feasible(x0, problem, tol): + raise ValueError("Could not find feasible x0") + + return x0 + + def _feasible(self, x, problem, tol): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + lb = np.asarray(problem.lower_bounds) + ub = np.asarray(problem.upper_bounds) + res = True + if (problem.Ci is not None) and (problem.di is not None): + res = res & np.all(problem.Ci @ x <= problem.di + tol) + if (problem.Ce is not None) and (problem.de is not None): + res = res & (np.allclose(np.dot(problem.Ce, x), problem.de, rtol=0, atol=tol)) + return res & (np.all(x >= lb)) & (np.all(x <= ub)) \ No newline at end of file diff --git a/simopt/solvers/gasso.py b/simopt/solvers/gasso.py new file mode 100644 index 000000000..504555341 --- /dev/null +++ b/simopt/solvers/gasso.py @@ -0,0 +1,276 @@ +""" +Summary +------- +Iteratively generates population of candidate solutions from a sample distribution and use its performance +to update the sample distribution. +A detailed description of the solver can be found `here `_. +""" +from ..base import Solver +import numpy as np +import warnings +warnings.filterwarnings("ignore") + +class GASSO(Solver): + """ + A solver that iteratively generates population of candidate solutions from a sample distribution + and use its performance to update the sample distribution. + + Attributes + ---------- + name : string + name of solver + objective_type : string + description of objective types: + "single" or "multi" + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + gradient_needed : bool + indicates if gradient of objective function is needed + factors : dict + changeable factors (i.e., parameters) of the solver + specifications : dict + details of each factor (for GUI, data validation, and defaults) + rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects + list of RNGs used for the solver's internal purposes + + Arguments + --------- + name : str + user-specified name for solver + fixed_factors : dict + fixed_factors of the solver + + See also + -------- + base.Solver + """ + def __init__(self, name="GASSO", fixed_factors=None): + if fixed_factors is None: + fixed_factors = {} + self.name = name + self.objective_type = "single" + self.constraint_type = "box" + self.variable_type = "continuous" + self.gradient_needed = True + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "sample_size": { + "description": "sample size per solution", + "datatype": int, + "default": 10 + }, + "max_iter": { + "description": "maximum number of iterations", + "datatype": int, + "default": 10000 + }, + "rho": { + "description": "quantile parameter", + "datatype": float, + "default": 0.15 + }, + "M": { + "description": "times of simulations for each candidate solution", + "datatype": int, + "default": 15 + }, + "alpha_0": { + "description": "step size numerator", + "datatype": int, + "default": 15 + }, + "alpha_c": { + "description": "step size denominator constant", + "datatype": int, + "default": 150 + }, + "alpha_p": { + "description": "step size denominator exponent", + "datatype": float, + "default": 0.6 + }, + "MaxNumSoln": { + "description": "maximum number of solutions that can be reported within max budget", + "datatype": int, + "default": 10002 + } + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "sample_size": self.check_sample_size, + "max_iter": self.check_max_iter, + "rho": self.check_rho, + "M": self.check_M, + "alpha_0": self.check_alpha_0, + "alpha_c": self.check_alpha_c, + "alpha_p": self.check_alpha_p, + "MaxNumSoln": self.check_MaxNumSoln + } + super().__init__(fixed_factors) + + def check_sample_size(self): + return self.factors["sample_size"] > 0 + + def check_max_iter(self): + return self.factors["max_iter"] > 0 + + def check_rho(self): + return 0 < self.factors["rho"] < 1 + + def check_M(self): + return self.factors["M"] > 0 + + def check_alpha_0(self): + return self.factors["alpha_0"] > 0 + + def check_alpha_c(self): + return self.factors["alpha_c"] > 0 + + def check_alpha_p(self): + return 0 < self.factors["alpha_p"] < 1 + + def check_MaxNumSoln(self): + return self.factors["MaxNumSoln"] > 0 + + def solve(self, problem): + """ + Run a single macroreplication of a solver on a problem. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + + Returns + ------- + recommended_solns : list of Solution objects + list of solutions recommended throughout the budget + intermediate_budgets : list of ints + list of intermediate budgets when recommended solutions changes + """ + dim = problem.dim + rand_sol_rng = self.rng_list[0] + x_ini = [problem.get_random_solution(rand_sol_rng) for i in range(9)] + x_ini.append(problem.factors["initial_solution"]) + + # Initialize sampling distribution based on the initial solution population + mu_k = np.mean(x_ini, axis = 0) # The mean for each dim + var_k = np.var(x_ini, axis = 0) + theta1_k = mu_k/var_k + theta2_k = -0.5 * np.ones(problem.dim) / var_k + theta_k = np.append(theta1_k, theta2_k) + N = int(50 * np.sqrt(dim)) + K = int(np.floor(problem.factors['budget']/(N * self.factors['M']))) + MaxNumSoln = K + 2 + + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + # Designate random number generator for random sampling. + find_next_soln_rng = self.rng_list[1] + + # Get random solutions from normal distribution (truncated) + x = np.zeros((N, dim)) + kk = 0 + while kk < N: + normal_vec = np.array([find_next_soln_rng.normalvariate() for i in range(N * dim)]).reshape((N,dim)) + X_k = np.multiply(normal_vec, np.sqrt(var_k)) + np.ones((N, dim)) * mu_k + for i in range(N): + if all(X_k[i, :] >= problem.lower_bounds) and all(X_k[i, :] <= problem.upper_bounds) and kk < N: + x[kk, :] = X_k[i, :] + kk += 1 + X_k = x + + # Create the initial solution based on the truncated normal + new_solution = self.create_new_solution(problem.factors['initial_solution'], problem) + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + problem.simulate(new_solution, self.factors['M']) + expended_budget += self.factors['M'] + + # Track the internal updates + Hbar = np.zeros(K) + xbar = np.zeros((K, dim)) + hvar = np.zeros(K) + k = 0 + + # Sequentially generate random solutions and simulate them. + while expended_budget < problem.factors['budget'] and k < K: + # Update alpha + alpha_k = self.factors['alpha_0'] / (k + self.factors['alpha_c']) ** self.factors['alpha_p'] + H = np.zeros(N) + H_var = np.zeros(N) + # Sample N new solution candidates by updated distribution + for i in range(N): + new_solution = self.create_new_solution(X_k[i, :], problem) + X_k[i, :] = new_solution.x + problem.simulate(new_solution, self.factors['M']) + expended_budget += self.factors['M'] + H[i] = problem.minmax * new_solution.objectives_mean + H_var[i] = np.var(new_solution.objectives) + # Find the best one among the N candidates + Hbar[k], idx = np.max(H), np.argmax(H) + hvar[k] = H_var[idx] + xbar[k, :] = X_k[idx, :] + new_solution = X_k[idx, :] + + if k >= 1: + # Compare the new best candidate to previous best solution, if better then update + if Hbar[k] < Hbar[k-1] and Hbar[k] != None: + Hbar[k] = Hbar[k-1] + xbar[k, :] = xbar[k-1, :] + hvar[k] = hvar[k-1] + new_solution = xbar[k, :] + + # Track the new candidate and update recommended_sols and budgets + new_solution = self.create_new_solution(tuple(xbar[k, :]), problem) + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + # Shape function + G_sort = np.sort(H)[::-1] + gm = G_sort[int(np.ceil(N * self.factors['rho']))] + S_theta = H > gm + + # Estimate gradient and hessian + w_k = S_theta/sum(S_theta) + CX_k = np.hstack((X_k, X_k * X_k)).T + grad_k = np.matmul(w_k.T, CX_k.T) - np.vstack((mu_k, var_k + mu_k * mu_k)).reshape(1, -1) + Hes_k = -np.cov(CX_k) + Hes_k_inv = np.linalg.inv(Hes_k + 1e-5 * np.eye(2*dim)) @ np.diag(np.ones(2*dim)) + # Update the parameter using an SA iteration + theta_k -= (alpha_k * (Hes_k_inv @ grad_k.T)).reshape(1, -1)[0] + theta1_k = theta_k[:dim] + theta2_k = theta_k[dim: 2 * dim] + var_k = -0.5/theta2_k + mu_k = theta1_k * var_k + + # Project mu_k and var_k to feasible parameter space + for i in range(dim): + if mu_k[i] < problem.lower_bounds[i]: + mu_k[i] = problem.lower_bounds[i] + if mu_k[i] > problem.upper_bounds[i]: + mu_k[i] = problem.upper_bounds[i] + var_k = abs(var_k) + + # Generate new candidate solutions from the truncated normal distribution + x = np.zeros((N, dim)) + kk = 0 + while kk < N: + normal_vec = np.array([find_next_soln_rng.normalvariate() for i in range(N * dim)]).reshape((N,dim)) + X_k = np.multiply(normal_vec, np.sqrt(var_k)) + np.ones((N, dim)) * mu_k + for i in range(N): + if all(X_k[i, :] >= problem.lower_bounds) and all(X_k[i, :] <= problem.upper_bounds) and kk < N: + x[kk, :] = X_k[i, :] + kk += 1 + k += 1 + X_k = x + + return recommended_solns, intermediate_budgets \ No newline at end of file diff --git a/simopt/solvers/pgdss.py b/simopt/solvers/pgdss.py new file mode 100644 index 000000000..2c985c9f1 --- /dev/null +++ b/simopt/solvers/pgdss.py @@ -0,0 +1,576 @@ +""" +Summary +------- +PGD-SS: A projected gradient descent algorithm with adaptive step search +for problems with linear constraints, i.e., Ce@x = de, Ci@x <= di. +A detailed description of the solver can be found `here `_. +""" +import numpy as np +import cvxpy as cp +import warnings +warnings.filterwarnings("ignore") + +from ..base import Solver + + +class PGDSS(Solver): + """ + The PGD solver with adaptive step search. + + Attributes + ---------- + name : string + name of solver + objective_type : string + description of objective types: + "single" or "multi" + constraint_type : string + description of constraints types: + "unconstrained", "box", "deterministic", "stochastic" + variable_type : string + description of variable types: + "discrete", "continuous", "mixed" + gradient_needed : bool + indicates if gradient of objective function is needed + factors : dict + changeable factors (i.e., parameters) of the solver + specifications : dict + details of each factor (for GUI, data validation, and defaults) + rng_list : list of rng.MRG32k3a objects + list of RNGs used for the solver's internal purposes + + Arguments + --------- + name : str + user-specified name for solver + fixed_factors : dict + fixed_factors of the solver + + See also + -------- + base.Solver + """ + def __init__(self, name="PGD-SS", fixed_factors={}): + self.name = name + self.objective_type = "single" + self.constraint_type = "deterministic" + self.variable_type = "continuous" + self.gradient_needed = False + self.specifications = { + "crn_across_solns": { + "description": "use CRN across solutions?", + "datatype": bool, + "default": True + }, + "r": { + "description": "number of replications taken at each solution", + "datatype": int, + "default": 30 + }, + "theta": { + "description": "constant in the Armijo condition", + "datatype": int, + "default": 0.2 + }, + "gamma": { + "description": "constant for shrinking the step size", + "datatype": int, + "default": 0.8 + }, + "alpha_max": { + "description": "maximum step size", + "datatype": int, + "default": 10 + }, + "alpha_0": { + "description": "initial step size", + "datatype": int, + "default": 1 + }, + "epsilon_f": { + "description": "additive constant in the Armijo condition", + "datatype": int, + "default": 1e-3 # In the paper, this value is estimated for every epoch but a value > 0 is justified in practice. + }, + "lambda": { + "description": "magnifying factor for r inside the finite difference function", + "datatype": int, + "default": 2 + }, + "tol": { + "description": "floating point comparison tolerance", + "datatype": float, + "default": 1e-7 + }, + "finite_diff_step": { + "description": "step size for finite difference", + "datatype": float, + "default": 1e-5 + } + + } + self.check_factor_list = { + "crn_across_solns": self.check_crn_across_solns, + "r": self.check_r, + "theta": self.check_theta, + "gamma": self.check_gamma, + "alpha_max": self.check_alpha_max, + "alpha_0": self.check_alpha_0, + "epsilon_f": self.check_epsilon_f, + "lambda": self.check_lambda, + "tol": self.check_tol, + "finite_diff_step": self.check_finite_diff_step + } + super().__init__(fixed_factors) + + def check_r(self): + return self.factors["r"] > 0 + + def check_theta(self): + return self.factors["theta"] > 0 & self.factors["theta"] < 1 + + def check_gamma(self): + return self.factors["gamma"] > 0 & self.factors["gamma"] < 1 + + def check_alpha_max(self): + return self.factors["alpha_max"] > 0 + + def check_alpha_0(self): + return self.factors["alpha_0"] > 0 + + def check_epsilon_f(self): + return self.factors["epsilon_f"] > 0 + + def check_tol(self): + return self.factors["tol"] > 0 + + def check_lambda(self): + return self.factors["lambda"] > 0 + + def check_finite_diff_step(self): + return self.factors["finite_diff_step"] > 0 + + def solve(self, problem): + """ + Run a single macroreplication of a solver on a problem. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + crn_across_solns : bool + indicates if CRN are used when simulating different solutions + + Returns + ------- + recommended_solns : list of Solution objects + list of solutions recommended throughout the budget + intermediate_budgets : list of ints + list of intermediate budgets when recommended solutions changes + """ + recommended_solns = [] + intermediate_budgets = [] + expended_budget = 0 + + # Default values. + r = self.factors["r"] + tol = self.factors["tol"] + theta = self.factors["theta"] + gamma = self.factors["gamma"] + alpha_max = self.factors["alpha_max"] + alpha_0 = self.factors["alpha_0"] + epsilon_f = self.factors["epsilon_f"] + + # Upper bound and lower bound. + lower_bound = np.array(problem.lower_bounds) + upper_bound = np.array(problem.upper_bounds) + + # Initialize stepsize. + alpha = alpha_0 + + # Input inequality and equlaity constraint matrix and vector. + # Cix <= di + # Cex = de + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + # Checker for whether the problem is unconstrained. + unconstr_flag = (Ce is None) & (Ci is None) & (di is None) & (de is None) & (all(np.isinf(lower_bound))) & (all(np.isinf(upper_bound))) + + # Start with the initial solution. + new_solution = self.create_new_solution(problem.factors["initial_solution"], problem) + new_x = new_solution.x + + # If the initial solution is not feasible, generate one using phase one simplex. + if (not unconstr_flag) & (not self._feasible(new_x, problem, tol)): + new_x = self.find_feasible_initial(problem, Ce, Ci, de, di, tol) + new_solution = self.create_new_solution(tuple(new_x), problem) + + # Use r simulated observations to estimate the objective value. + problem.simulate(new_solution, r) + expended_budget += r + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + while expended_budget < problem.factors["budget"]: + new_x = new_solution.x + # Check variable bounds. + # forward = np.isclose(new_x, lower_bound, atol = tol).astype(int) + # backward = np.isclose(new_x, upper_bound, atol = tol).astype(int) + # # BdsCheck: 1 stands for forward, -1 stands for backward, 0 means central diff. + # BdsCheck = np.subtract(forward, backward) + + if problem.gradient_available: + # Use IPA gradient if available. + grad = -1 * problem.minmax[0] * new_solution.objectives_gradients_mean[0] + else: + # Use finite difference to estimate gradient if IPA gradient is not available. + grad, budget_spent = self.finite_diff(new_solution, problem, r, stepsize=alpha) + expended_budget += budget_spent + # A while loop to prevent zero gradient. + while np.all((grad == 0)): + if expended_budget > problem.factors["budget"]: + break + grad, budget_spent = self.finite_diff(new_solution, problem, r) + expended_budget += budget_spent + # Update r after each iteration. + r = int(self.factors["lambda"] * r) + + # Get search direction by taking negative normalized gradient. + dir = -grad / np.linalg.norm(grad) + + # Get a temp solution. + temp_x = new_x + alpha * dir + + if unconstr_flag or self._feasible(temp_x, problem, tol): + candidate_solution = self.create_new_solution(tuple(temp_x), problem) + else: + # If not feasible, project temp_x back to the feasible set. + proj_x = self.project_grad(problem, temp_x, Ce, Ci, de, di) + candidate_solution = self.create_new_solution(tuple(proj_x), problem) + # Get new search direction based on projection. + dir = proj_x - new_x + + # Use r simulated observations to estimate the objective value. + problem.simulate(candidate_solution, r) + expended_budget += r + + # Check the modified Armijo condition for sufficient decrease. + if (-1 * problem.minmax[0] * candidate_solution.objectives_mean) <= ( + -1 * problem.minmax[0] * new_solution.objectives_mean + alpha * theta * np.dot(grad, dir) + 2 * epsilon_f): + # Successful step + new_solution = candidate_solution + # Enlarge step size. + alpha = min(alpha_max, alpha / gamma) + else: + # Unsuccessful step - reduce step size. + alpha = gamma * alpha + # Append new solution. + if (problem.minmax[0] * new_solution.objectives_mean > problem.minmax[0] * best_solution.objectives_mean): + best_solution = new_solution + recommended_solns.append(new_solution) + intermediate_budgets.append(expended_budget) + + return recommended_solns, intermediate_budgets + + + def finite_diff(self, new_solution, problem, r, stepsize = 1e-5, tol = 1e-7): + ''' + Finite difference for approximating objective gradient at new_solution. + + Arguments + --------- + new_solution : Solution object + a solution to the problem + problem : Problem object + simulation-optimization problem to solve + r : int + number of replications taken at each solution + stepsize: float + step size for finite differences + + Returns + ------- + grad : ndarray + the estimated objective gradient at new_solution + budget_spent : int + budget spent in finite difference + ''' + Ci = problem.Ci + di = problem.di + Ce = problem.Ce + de = problem.de + + # Upper bound and lower bound. + lower_bound = np.array(problem.lower_bounds) + upper_bound = np.array(problem.upper_bounds) + + # Remove redundant upper/lower bounds. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + + # Form a constraint coefficient matrix where all the equality constraints are put on top and + # all the bound constraints in the bottom and a constraint coefficient vector. + if (Ce is not None) and (de is not None) and (Ci is not None) and (di is not None): + C = np.vstack((Ce, Ci)) + d = np.vstack((de.T, di.T)) + elif (Ce is not None) and (de is not None): + C = Ce + d = de.T + elif (Ci is not None) and (di is not None): + C = Ci + d = di.T + else: + C = np.empty([1, problem.dim]) + d = np.empty([1, 1]) + + if len(ub_inf_idx) > 0: + C = np.vstack((C, np.identity(upper_bound.shape[0]))) + d = np.vstack((d, upper_bound[np.newaxis].T)) + if len(lb_inf_idx) > 0: + C = np.vstack((C, -np.identity(lower_bound.shape[0]))) + d = np.vstack((d, -lower_bound[np.newaxis].T)) + + BdsCheck = np.zeros(problem.dim) + fn = -1 * problem.minmax[0] * new_solution.objectives_mean + new_x = new_solution.x + # Store values for each dimension. + FnPlusMinus = np.zeros((problem.dim, 3)) + grad = np.zeros(problem.dim) + + for i in range(problem.dim): + # Initialization. + x1 = list(new_x) + x2 = list(new_x) + # Forward stepsize. + steph1 = stepsize + # Backward stepsize. + steph2 = stepsize + + dir1 = np.zeros(problem.dim) + dir1[i] = 1 + dir2 = np.zeros(problem.dim) + dir2[i] = -1 + + ra = d.flatten() - C @ new_x + ra_d = C @ dir1 + # Initialize maximum step size. + temp_steph1 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph1: + temp_steph1 = s + steph1 = min(temp_steph1, steph1) + + ra_d = C @ dir2 + # Initialize maximum step size. + temp_steph2 = np.inf + # Perform ratio test. + for j in range(len(ra)): + if ra_d[j] - tol > 0: + s = ra[j]/ra_d[j] + if s < steph2: + temp_steph2 = s + steph2 = min(temp_steph2, steph2) + + if (steph1 != 0) & (steph2 != 0): + BdsCheck[i] = 0 + elif steph1 == 0: + BdsCheck[i] = -1 + else: + BdsCheck[i] = 1 + + # Decide stepsize. + # Central diff. + if BdsCheck[i] == 0: + FnPlusMinus[i, 2] = min(steph1, steph2) + x1[i] = x1[i] + FnPlusMinus[i, 2] + x2[i] = x2[i] - FnPlusMinus[i, 2] + # Forward diff. + elif BdsCheck[i] == 1: + FnPlusMinus[i, 2] = steph1 + x1[i] = x1[i] + FnPlusMinus[i, 2] + # Backward diff. + else: + FnPlusMinus[i, 2] = steph2 + x2[i] = x2[i] - FnPlusMinus[i, 2] + + x1_solution = self.create_new_solution(tuple(x1), problem) + if BdsCheck[i] != -1: + problem.simulate_up_to([x1_solution], r) + fn1 = -1 * problem.minmax[0] * x1_solution.objectives_mean + # First column is f(x+h,y). + FnPlusMinus[i, 0] = fn1 + x2_solution = self.create_new_solution(tuple(x2), problem) + if BdsCheck[i] != 1: + problem.simulate_up_to([x2_solution], r) + fn2 = -1 * problem.minmax[0] * x2_solution.objectives_mean + # Second column is f(x-h,y). + FnPlusMinus[i, 1] = fn2 + + # Calculate gradient. + if BdsCheck[i] == 0: + grad[i] = (fn1 - fn2) / (2 * FnPlusMinus[i, 2]) + elif BdsCheck[i] == 1: + grad[i] = (fn1 - fn) / FnPlusMinus[i, 2] + elif BdsCheck[i] == -1: + grad[i] = (fn - fn2) / FnPlusMinus[i, 2] + budget_spent = (2 * problem.dim - np.sum(BdsCheck != 0)) * r + return grad, budget_spent + + def _feasible(self, x, problem, tol): + """ + Check whether a solution x is feasible to the problem. + + Arguments + --------- + x : tuple + a solution vector + problem : Problem object + simulation-optimization problem to solve + tol: float + Floating point comparison tolerance + """ + x = np.asarray(x) + lb = np.asarray(problem.lower_bounds) + ub = np.asarray(problem.upper_bounds) + res = True + if (problem.Ci is not None) and (problem.di is not None): + res = res & np.all(problem.Ci @ x <= problem.di + tol) + if (problem.Ce is not None) and (problem.de is not None): + res = res & (np.allclose(np.dot(problem.Ce, x), problem.de, rtol=0, atol=tol)) + return res & (np.all(x >= lb)) & (np.all(x <= ub)) + + def project_grad(self, problem, x, Ae, Ai, be, bi): + """ + Project the vector x onto the hyperplane H: Ae x = be, Ai x <= bi by solving a quadratic projection problem: + + min d^Td + s.t. Ae(x + d) = be + Ai(x + d) <= bi + (x + d) >= lb + (x + d) <= ub + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + x : ndarray + vector to be projected + Ae: ndarray + equality constraint coefficient matrix + be: ndarray + equality constraint coefficient vector + Ai: ndarray + inequality constraint coefficient matrix + bi: ndarray + inequality constraint coefficient vector + Returns + ------- + x_new : ndarray + the projected vector + """ + # Define variables. + d = cp.Variable(problem.dim) + + # Define objective. + obj = cp.Minimize(cp.quad_form(d, np.identity(problem.dim))) + + # Define constraints. + constraints = [] + if (Ae is not None) and (be is not None): + constraints.append(Ae @ (x + d) == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ (x + d) <= bi.ravel()) + + upper_bound = np.array(problem.upper_bounds) + lower_bound = np.array(problem.lower_bounds) + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append((x + d)[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append((x + d)[i] >= lower_bound[i]) + + # Form and solve problem. + prob = cp.Problem(obj, constraints) + prob.solve() + + # Get the projected vector. + x_new = x + d.value + + # Avoid floating point error + x_new[np.abs(x_new) < self.factors["tol"]] = 0 + + return x_new + + def find_feasible_initial(self, problem, Ae, Ai, be, bi, tol): + ''' + Find an initial feasible solution (if not user-provided) + by solving phase one simplex. + + Arguments + --------- + problem : Problem object + simulation-optimization problem to solve + C: ndarray + constraint coefficient matrix + d: ndarray + constraint coefficient vector + + Returns + ------- + x0 : ndarray + an initial feasible solution + tol: float + Floating point comparison tolerance + ''' + upper_bound = np.array(problem.upper_bounds) + lower_bound = np.array(problem.lower_bounds) + + # Define decision variables. + x = cp.Variable(problem.dim) + + # Define constraints. + constraints = [] + + if (Ae is not None) and (be is not None): + constraints.append(Ae @ x == be.ravel()) + if (Ai is not None) and (bi is not None): + constraints.append(Ai @ x <= bi.ravel()) + + # Removing redundant bound constraints. + ub_inf_idx = np.where(~np.isinf(upper_bound))[0] + if len(ub_inf_idx) > 0: + for i in ub_inf_idx: + constraints.append(x[i] <= upper_bound[i]) + lb_inf_idx = np.where(~np.isinf(lower_bound))[0] + if len(lb_inf_idx) > 0: + for i in lb_inf_idx: + constraints.append(x[i] >= lower_bound[i]) + + # Define objective function. + obj = cp.Minimize(0) + + # Create problem. + model = cp.Problem(obj, constraints) + + # Solve problem. + model.solve(solver = cp.SCIPY) + + # Check for optimality. + if model.status not in [cp.OPTIMAL, cp.OPTIMAL_INACCURATE] : + raise ValueError("Could not find feasible x0") + x0 = x.value + if not self._feasible(x0, problem, tol): + raise ValueError("Could not find feasible x0") + + return x0