From 5eeffde0b36302a6ab073bd5e3e6a5de2b6f6655 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Fri, 23 Jun 2023 09:42:08 -0400 Subject: [PATCH 001/113] Initial version of the SAS solver interfaces and unit tests. --- pyomo/solvers/plugins/solvers/SAS.py | 700 ++++++++++++++++++++++ pyomo/solvers/plugins/solvers/__init__.py | 1 + pyomo/solvers/tests/checks/test_SAS.py | 462 ++++++++++++++ 3 files changed, 1163 insertions(+) create mode 100644 pyomo/solvers/plugins/solvers/SAS.py create mode 100644 pyomo/solvers/tests/checks/test_SAS.py diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py new file mode 100644 index 00000000000..7f50b7a2970 --- /dev/null +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -0,0 +1,700 @@ +__all__ = ['SAS'] + +import logging +import sys +import os + +from io import StringIO +from abc import ABC, abstractmethod +from contextlib import redirect_stdout + +from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver +from pyomo.opt.base.solvers import SolverFactory +from pyomo.common.collections import Bunch +from pyomo.opt.results import ( + SolverResults, + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, +) +from pyomo.common.tempfiles import TempfileManager +from pyomo.core.base import Var +from pyomo.core.base.block import _BlockData +from pyomo.core.kernel.block import IBlock + + +logger = logging.getLogger('pyomo.solvers') + + +STATUS_TO_SOLVERSTATUS = { + "OK": SolverStatus.ok, + "SYNTAX_ERROR": SolverStatus.error, + "DATA_ERROR": SolverStatus.error, + "OUT_OF_MEMORY": SolverStatus.aborted, + "IO_ERROR": SolverStatus.error, + "ERROR": SolverStatus.error, +} + +# This combines all status codes from OPTLP/solvelp and OPTMILP/solvemilp +SOLSTATUS_TO_TERMINATIONCOND = { + "OPTIMAL": TerminationCondition.optimal, + "OPTIMAL_AGAP": TerminationCondition.optimal, + "OPTIMAL_RGAP": TerminationCondition.optimal, + "OPTIMAL_COND": TerminationCondition.optimal, + "TARGET": TerminationCondition.optimal, + "CONDITIONAL_OPTIMAL": TerminationCondition.optimal, + "FEASIBLE": TerminationCondition.feasible, + "INFEASIBLE": TerminationCondition.infeasible, + "UNBOUNDED": TerminationCondition.unbounded, + "INFEASIBLE_OR_UNBOUNDED": TerminationCondition.infeasibleOrUnbounded, + "SOLUTION_LIM": TerminationCondition.maxEvaluations, + "NODE_LIM_SOL": TerminationCondition.maxEvaluations, + "NODE_LIM_NOSOL": TerminationCondition.maxEvaluations, + "ITERATION_LIMIT_REACHED": TerminationCondition.maxIterations, + "TIME_LIM_SOL": TerminationCondition.maxTimeLimit, + "TIME_LIM_NOSOL": TerminationCondition.maxTimeLimit, + "TIME_LIMIT_REACHED": TerminationCondition.maxTimeLimit, + "ABORTED": TerminationCondition.userInterrupt, + "ABORT_SOL": TerminationCondition.userInterrupt, + "ABORT_NOSOL": TerminationCondition.userInterrupt, + "OUTMEM_SOL": TerminationCondition.solverFailure, + "OUTMEM_NOSOL": TerminationCondition.solverFailure, + "FAILED": TerminationCondition.solverFailure, + "FAIL_SOL": TerminationCondition.solverFailure, + "FAIL_NOSOL": TerminationCondition.solverFailure, +} + + +SOLSTATUS_TO_MESSAGE = { + "OPTIMAL": "The solution is optimal.", + "OPTIMAL_AGAP": "The solution is optimal within the absolute gap specified by the ABSOBJGAP= option.", + "OPTIMAL_RGAP": "The solution is optimal within the relative gap specified by the RELOBJGAP= option.", + "OPTIMAL_COND": "The solution is optimal, but some infeasibilities (primal, bound, or integer) exceed tolerances due to scaling or choice of a small INTTOL= value.", + "TARGET": "The solution is not worse than the target specified by the TARGET= option.", + "CONDITIONAL_OPTIMAL": "The solution is optimal, but some infeasibilities (primal, dual or bound) exceed tolerances due to scaling or preprocessing.", + "FEASIBLE": "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", + "INFEASIBLE": "The problem is infeasible.", + "UNBOUNDED": "The problem is unbounded.", + "INFEASIBLE_OR_UNBOUNDED": "The problem is infeasible or unbounded.", + "SOLUTION_LIM": "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", + "NODE_LIM_SOL": "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", + "NODE_LIM_NOSOL": "The solver reached the maximum number of nodes specified by the MAXNODES= option and did not find a solution.", + "ITERATION_LIMIT_REACHED": "The maximum allowable number of iterations was reached.", + "TIME_LIM_SOL": "The solver reached the execution time limit specified by the MAXTIME= option and found a solution.", + "TIME_LIM_NOSOL": "The solver reached the execution time limit specified by the MAXTIME= option and did not find a solution.", + "TIME_LIMIT_REACHED": "The solver reached its execution time limit.", + "ABORTED": "The solver was interrupted externally.", + "ABORT_SOL": "The solver was stopped by the user but still found a solution.", + "ABORT_NOSOL": "The solver was stopped by the user and did not find a solution.", + "OUTMEM_SOL": "The solver ran out of memory but still found a solution.", + "OUTMEM_NOSOL": "The solver ran out of memory and either did not find a solution or failed to output the solution due to insufficient memory.", + "FAILED": "The solver failed to converge, possibly due to numerical issues.", + "FAIL_SOL": "The solver stopped due to errors but still found a solution.", + "FAIL_NOSOL": "The solver stopped due to errors and did not find a solution.", +} + + +CAS_OPTION_NAMES = [ + "hostname", + "port", + "username", + "password", + "session", + "locale", + "name", + "nworkers", + "authinfo", + "protocol", + "path", + "ssl_ca_list", + "authcode", +] + + +@SolverFactory.register('sas', doc='The SAS LP/MIP solver') +class SAS(OptSolver): + """The SAS optimization solver""" + + def __new__(cls, *args, **kwds): + mode = kwds.pop('solver_io', None) + if mode != None: + return SolverFactory(mode) + else: + # Choose solver factory automatically + # bassed on what can be loaded. + s = SolverFactory('_sas94', **kwds) + if not s.available(): + s = SolverFactory('_sascas', **kwds) + return s + + +class SASAbc(ABC, OptSolver): + """Abstract base class for the SAS solver interfaces. Simply to avoid code duplication.""" + + def __init__(self, **kwds): + """Initialize the SAS solver interfaces.""" + kwds['type'] = 'sas' + super(SASAbc, self).__init__(**kwds) + + # + # Set up valid problem formats and valid results for each + # problem format + # + self._valid_problem_formats = [ProblemFormat.mps] + self._valid_result_formats = {ProblemFormat.mps: [ResultsFormat.soln]} + + self._keepfiles = False + self._capabilities.linear = True + self._capabilities.integer = True + + super(SASAbc, self).set_problem_format(ProblemFormat.mps) + + def _presolve(self, *args, **kwds): + """ "Set things up for the actual solve.""" + # create a context in the temporary file manager for + # this plugin - is "pop"ed in the _postsolve method. + TempfileManager.push() + + # Get the warmstart flag + self.warmstart_flag = kwds.pop('warmstart', False) + + # Call parent presolve function + super(SASAbc, self)._presolve(*args, **kwds) + + # Store the model, too bad this is not done in the base class + for arg in args: + if isinstance(arg, (_BlockData, IBlock)): + # Store the instance + self._instance = arg + self._vars = [] + for block in self._instance.block_data_objects(active=True): + for vardata in block.component_data_objects( + Var, active=True, descend_into=False + ): + self._vars.append(vardata) + # Store the symbal map, we need this for example when writing the warmstart file + if isinstance(self._instance, IBlock): + self._smap = getattr(self._instance, "._symbol_maps")[self._smap_id] + else: + self._smap = self._instance.solutions.symbol_map[self._smap_id] + + # Create the primalin data + if self.warmstart_flag: + filename = self._warm_start_file_name = TempfileManager.create_tempfile( + ".sol", text=True + ) + smap = self._smap + numWritten = 0 + with open(filename, 'w') as file: + file.write('_VAR_,_VALUE_\n') + for var in self._vars: + if (var.value is not None) and (id(var) in smap.byObject): + name = smap.byObject[id(var)] + file.write( + "{name},{value}\n".format(name=name, value=var.value) + ) + numWritten += 1 + if numWritten == 0: + # No solution available, disable warmstart + self.warmstart_flag = False + + def available(self, exception_flag=False): + """True if the solver is available""" + return self._python_api_exists + + def _has_integer_variables(self): + """True if the problem has integer variables.""" + for vardata in self._vars: + if vardata.is_binary() or vardata.is_integer(): + return True + return False + + def _create_results_from_status(self, status, solution_status): + """Create a results object and set the status code and messages.""" + results = SolverResults() + results.solver.name = "SAS" + results.solver.status = STATUS_TO_SOLVERSTATUS[status] + if results.solver.status == SolverStatus.ok: + results.solver.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + solution_status + ] + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE[solution_status] + results.solver.status = TerminationCondition.to_solver_status( + results.solver.termination_condition + ) + elif results.solver.status == SolverStatus.aborted: + results.solver.termination_condition = TerminationCondition.userInterrupt + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE["ABORTED"] + else: + results.solver.termination_condition = TerminationCondition.error + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE["FAILED"] + return results + + @abstractmethod + def _apply_solver(self): + """The routine that performs the solve""" + raise NotImplemented("This is an abstract function and thus not implemented!") + + def _postsolve(self): + """Clean up at the end, especially the temp files.""" + # Let the base class deal with returning results. + results = super(SASAbc, self)._postsolve() + + # Finally, clean any temporary files registered with the temp file + # manager, created populated *directly* by this plugin. does not + # include, for example, the execution script. but does include + # the warm-start file. + TempfileManager.pop(remove=not self._keepfiles) + + return results + + def warm_start_capable(self): + """True if the solver interface supports MILP warmstarting.""" + return True + + +@SolverFactory.register('_sas94', doc='SAS 9.4 interface') +class SAS94(SASAbc): + """ + Solver interface for SAS 9.4 using saspy. See the saspy documentation about + how to create a connection. + """ + + def __init__(self, **kwds): + """Initialize the solver interface and see if the saspy package is available.""" + super(SAS94, self).__init__(**kwds) + + try: + import saspy + + self._sas = saspy + except ImportError: + self._python_api_exists = False + except Exception as e: + self._python_api_exists = False + # For other exceptions, raise it so that it does not get lost + raise e + else: + self._python_api_exists = True + self._sas.logger.setLevel(logger.level) + + def _create_statement_str(self, statement): + """Helper function to create the strings for the statements of the proc OPTLP/OPTMILP code.""" + stmt = self.options.pop(statement, None) + if stmt: + return ( + statement.strip() + + " " + + " ".join(option + "=" + str(value) for option, value in stmt.items()) + + ";" + ) + else: + return "" + + def _apply_solver(self): + """ "Prepare the options and run the solver. Then store the data to be returned.""" + logger.debug("Running SAS") + + # Set return code to issue an error if we get interrupted + self._rc = -1 + + # Figure out if the problem has integer variables + with_opt = self.options.pop("with", None) + if with_opt == "lp": + proc = "OPTLP" + elif with_opt == "milp": + proc = "OPTMILP" + else: + # Check if there are integer variables, this might be slow + proc = "OPTMILP" if self._has_integer_variables() else "OPTLP" + + # Remove CAS options in case they were specified + for opt in CAS_OPTION_NAMES: + self.options.pop(opt, None) + + # Get the rootnode options + decomp_str = self._create_statement_str("decomp") + decompmaster_str = self._create_statement_str("decompmaster") + decompmasterip_str = self._create_statement_str("decompmasterip") + decompsubprob_str = self._create_statement_str("decompsubprob") + rootnode_str = self._create_statement_str("rootnode") + + # Handle warmstart + warmstart_str = "" + if self.warmstart_flag: + # Set the warmstart basis option + if proc != "OPTLP": + warmstart_str = """ + proc import datafile='{primalin}' + out=primalin + dbms=csv + replace; + getnames=yes; + run; + """.format( + primalin=self._warm_start_file_name + ) + self.options["primalin"] = "primalin" + + # Convert options to string + opt_str = " ".join( + option + "=" + str(value) for option, value in self.options.items() + ) + + # Start a SAS session, submit the code and return the results`` + with self._sas.SASsession() as sas: + # Find the version of 9.4 we are using + if sas.sasver.startswith("9.04.01M5"): + # In 9.4M5 we have to create an MPS data set from an MPS file first + # Earlier versions will not work because the MPS format in incompatible + res = sas.submit( + """ + {warmstart} + %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA=mpsdata, MAXLEN=256, FORMAT=FREE); + proc {proc} data=mpsdata {options} primalout=primalout dualout=dualout; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + options=opt_str, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + else: + # Since 9.4M6+ optlp/optmilp can read mps files directly + res = sas.submit( + """ + {warmstart} + proc {proc} mpsfile=\"{mpsfile}\" {options} primalout=primalout dualout=dualout; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + options=opt_str, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + + # Store log and ODS output + self._log = res["LOG"] + self._lst = res["LST"] + # Print log if requested by the user + if self._tee: + print(self._log) + if "ERROR 22-322: Syntax error" in self._log: + raise ValueError( + "An option passed to the SAS solver caused a syntax error: {log}".format( + log=self._log + ) + ) + self._macro = dict( + (key.strip(), value.strip()) + for key, value in ( + pair.split("=") for pair in sas.symget("_OR" + proc + "_").split() + ) + ) + primal_out = sas.sd2df("primalout") + dual_out = sas.sd2df("dualout") + + # Prepare the solver results + results = self.results = self._create_results_from_status( + self._macro.get("STATUS", "ERROR"), self._macro.get("SOLUTION_STATUS","ERROR") + ) + + if "Objective Sense Maximization" in self._lst: + results.problem.sense = ProblemSense.maximize + else: + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if results.solver.termination_condition == TerminationCondition.optimal: + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = TerminationCondition.optimal + + # Store objective value in solution + sol.objective['__default_objective__'] = {'Value': self._macro["OBJECTIVE"]} + + if proc == "OPTLP": + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = primal_out[['_VAR_', '_VALUE_', '_STATUS_', '_R_COST_']] + primal_out = primal_out.set_index('_VAR_', drop=True) + primal_out = primal_out.rename( + {'_VALUE_': 'Value', '_STATUS_': 'Status', '_R_COST_': 'rc'}, + axis='columns', + ) + sol.variable = primal_out.to_dict('index') + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = dual_out[['_ROW_', '_VALUE_', '_STATUS_', '_ACTIVITY_']] + dual_out = dual_out.set_index('_ROW_', drop=True) + dual_out = dual_out.rename( + {'_VALUE_': 'dual', '_STATUS_': 'Status', '_ACTIVITY_': 'slack'}, + axis='columns', + ) + sol.constraint = dual_out.to_dict('index') + else: + # Convert primal out data set to variable dictionary + # Use pandas functions for efficiency + primal_out = primal_out[['_VAR_', '_VALUE_']] + primal_out = primal_out.set_index('_VAR_', drop=True) + primal_out = primal_out.rename({'_VALUE_': 'Value'}, axis='columns') + sol.variable = primal_out.to_dict('index') + + self._rc = 0 + return Bunch(rc=self._rc, log=self._log) + + +class SASLogWriter: + """Helper class to take the log from stdout and put it also in a StringIO.""" + + def __init__(self, tee): + """Set up the two outputs.""" + self.tee = tee + self._log = StringIO() + self.stdout = sys.stdout + + def write(self, message): + """If the tee options is specified, write to both outputs.""" + if self.tee: + self.stdout.write(message) + self._log.write(message) + + def flush(self): + """Nothing to do, just here for compatibility reasons.""" + # Do nothing since we flush right away + pass + + def log(self): + """ "Get the log as a string.""" + return self._log.getvalue() + + +@SolverFactory.register('_sascas', doc='SAS Viya CAS Server interface') +class SASCAS(SASAbc): + """ + Solver interface connection to a SAS Viya CAS server using swat. + See the documentation for the swat package about how to create a connection. + The swat connection options can be passed as options to the solve function. + """ + + def __init__(self, **kwds): + """Initialize and try to load the swat package.""" + super(SASCAS, self).__init__(**kwds) + + try: + import swat + + self._sas = swat + except ImportError: + self._python_api_exists = False + except Exception as e: + self._python_api_exists = False + # For other exceptions, raise it so that it does not get lost + raise e + else: + self._python_api_exists = True + + def _apply_solver(self): + """ "Prepare the options and run the solver. Then store the data to be returned.""" + logger.debug("Running SAS Viya") + + # Set return code to issue an error if we get interrupted + self._rc = -1 + + # Extract CAS connection options + cas_opts = {} + for opt in CAS_OPTION_NAMES: + val = self.options.pop(opt, None) + if val != None: + cas_opts[opt] = val + + # Figure out if the problem has integer variables + with_opt = self.options.pop("with", None) + if with_opt == "lp": + action = "solveLp" + elif with_opt == "milp": + action = "solveMilp" + else: + # Check if there are integer variables, this might be slow + action = "solveMilp" if self._has_integer_variables() else "solveLp" + + # Connect to CAS server + with redirect_stdout(SASLogWriter(self._tee)) as self._log_writer: + s = self._sas.CAS(**cas_opts) + try: + # Load the optimization action set + s.loadactionset('optimization') + + # Upload mps file to CAS + if os.stat(self._problem_files[0]).st_size >= 2 * 1024**3: + # For large files, use convertMPS, first create file for upload + mpsWithIdFileName = TempfileManager.create_tempfile( + ".mps.csv", text=True + ) + with open(mpsWithIdFileName, 'w') as mpsWithId: + mpsWithId.write('_ID_\tText\n') + with open(self._problem_files[0], 'r') as f: + id = 0 + for line in f: + id += 1 + mpsWithId.write(str(id) + '\t' + line.rstrip() + '\n') + + # Upload .mps.csv file + s.upload_file( + mpsWithIdFileName, + casout={"name": "mpscsv", "replace": True}, + importoptions={"filetype": "CSV", "delimiter": "\t"}, + ) + + # Convert .mps.csv file to .mps + s.optimization.convertMps( + data="mpscsv", + casOut={"name": "mpsdata", "replace": True}, + format="FREE", + ) + else: + # For small files, use loadMPS + with open(self._problem_files[0], 'r') as mps_file: + s.optimization.loadMps( + mpsFileString=mps_file.read(), + casout={"name": "mpsdata", "replace": True}, + format="FREE", + ) + + if self.warmstart_flag: + # Upload warmstart file to CAS + s.upload_file( + self._warm_start_file_name, + casout={"name": "primalin", "replace": True}, + importoptions={"filetype": "CSV"}, + ) + self.options["primalin"] = "primalin" + + # Solve the problem in CAS + if action == "solveMilp": + r = s.optimization.solveMilp( + data={"name": "mpsdata"}, + primalOut={"name": "primalout", "replace": True}, + **self.options + ) + else: + r = s.optimization.solveLp( + data={"name": "mpsdata"}, + primalOut={"name": "primalout", "replace": True}, + dualOut={"name": "dualout", "replace": True}, + **self.options + ) + + # Prepare the solver results + if r: + # Get back the primal and dual solution data sets + results = self.results = self._create_results_from_status( + r.get("status", "ERROR"), r.get("solutionStatus", "ERROR") + ) + + if r.ProblemSummary["cValue1"][1] == "Maximization": + results.problem.sense = ProblemSense.maximize + else: + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if ( + results.solver.termination_condition + == TerminationCondition.optimal + ): + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = TerminationCondition.optimal + + # Store objective value in solution + sol.objective['__default_objective__'] = { + 'Value': r["objective"] + } + + if action == "solveMilp": + primal_out = s.CASTable(name="primalout") + # Use pandas functions for efficiency + primal_out = primal_out[['_VAR_', '_VALUE_']] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {'Value': row[1]} + else: + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = s.CASTable(name="primalout") + primal_out = primal_out[ + ['_VAR_', '_VALUE_', '_STATUS_', '_R_COST_'] + ] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = { + 'Value': row[1], + 'Status': row[2], + 'rc': row[3], + } + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = s.CASTable(name="dualout") + dual_out = dual_out[ + ['_ROW_', '_VALUE_', '_STATUS_', '_ACTIVITY_'] + ] + sol.constraint = {} + for row in dual_out.itertuples(index=False): + sol.constraint[row[0]] = { + 'dual': row[1], + 'Status': row[2], + 'slack': row[3], + } + else: + results = self.results = SolverResults() + results.solver.name = "SAS" + results.solver.status = SolverStatus.error + raise ValueError( + "An option passed to the SAS solver caused a syntax error." + ) + + finally: + s.close() + + self._log = self._log_writer.log() + if self._tee: + print(self._log) + self._rc = 0 + return Bunch(rc=self._rc, log=self._log) diff --git a/pyomo/solvers/plugins/solvers/__init__.py b/pyomo/solvers/plugins/solvers/__init__.py index c5fbfa97e42..23b7fe06526 100644 --- a/pyomo/solvers/plugins/solvers/__init__.py +++ b/pyomo/solvers/plugins/solvers/__init__.py @@ -30,3 +30,4 @@ import pyomo.solvers.plugins.solvers.mosek_persistent import pyomo.solvers.plugins.solvers.xpress_direct import pyomo.solvers.plugins.solvers.xpress_persistent +import pyomo.solvers.plugins.solvers.SAS diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py new file mode 100644 index 00000000000..4592343b17f --- /dev/null +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -0,0 +1,462 @@ +import os +import pyomo.common.unittest as unittest +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + NonNegativeIntegers, + NonNegativeReals, + Reals, + Integers, + maximize, + minimize, + Suffix, +) +from pyomo.opt.results import ( + SolverStatus, + TerminationCondition, + ProblemSense, +) +from pyomo.opt import ( + SolverFactory, + check_available_solvers, +) + + +CAS_OPTIONS = { + "hostname": os.environ.get('CAS_SERVER', None), + "port": os.environ.get('CAS_PORT', None), + "authinfo": os.environ.get('CAS_AUTHINFO', None), +} + + +sas_available = check_available_solvers('sas') + + +class SASTestAbc: + solver_io = '_sas94' + base_options = {} + + def setObj(self): + X = self.instance.X + self.instance.Obj = Objective( + expr=2 * X[1] - 3 * X[2] - 4 * X[3], sense=minimize + ) + + def setX(self): + self.instance.X = Var([1, 2, 3], within=NonNegativeReals) + + def setUp(self): + instance = self.instance = ConcreteModel() + self.setX() + X = instance.X + instance.R1 = Constraint(expr=-2 * X[2] - 3 * X[3] >= -5) + instance.R2 = Constraint(expr=X[1] + X[2] + 2 * X[3] <= 4) + instance.R3 = Constraint(expr=X[1] + 2 * X[2] + 3 * X[3] <= 7) + self.setObj() + + # Declare suffixes for solution information + instance.status = Suffix(direction=Suffix.IMPORT) + instance.slack = Suffix(direction=Suffix.IMPORT) + instance.rc = Suffix(direction=Suffix.IMPORT) + instance.dual = Suffix(direction=Suffix.IMPORT) + + self.opt_sas = SolverFactory('sas', solver_io=self.solver_io) + + def tearDown(self): + del self.opt_sas + del self.instance + + def run_solver(self, **kwargs): + opt_sas = self.opt_sas + instance = self.instance + + # Add base options for connection data etc. + options = kwargs.get("options", {}) + if self.base_options: + kwargs["options"] = {**options, **self.base_options} + + # Call the solver + self.results = opt_sas.solve(instance, **kwargs) + + +class SASTestLP(SASTestAbc, unittest.TestCase): + def checkSolution(self): + instance = self.instance + results = self.results + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7.5) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 2.5) + self.assertAlmostEqual(instance.X[3].value, 0.0) + + # Check reduced cost + self.assertAlmostEqual(instance.rc[instance.X[1]], sense * 2.0) + self.assertAlmostEqual(instance.rc[instance.X[2]], sense * 0.0) + self.assertAlmostEqual(instance.rc[instance.X[3]], sense * 0.5) + + # Check slack + self.assertAlmostEqual(instance.slack[instance.R1], -5.0) + self.assertAlmostEqual(instance.slack[instance.R2], 2.5) + self.assertAlmostEqual(instance.slack[instance.R3], 5.0) + + # Check dual solution + self.assertAlmostEqual(instance.dual[instance.R1], sense * 1.5) + self.assertAlmostEqual(instance.dual[instance.R2], sense * 0.0) + self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) + + # Check basis status + self.assertEqual(instance.status[instance.X[1]], 'L') + self.assertEqual(instance.status[instance.X[2]], 'B') + self.assertEqual(instance.status[instance.X[3]], 'L') + self.assertEqual(instance.status[instance.R1], 'U') + self.assertEqual(instance.status[instance.R2], 'B') + self.assertEqual(instance.status[instance.R3], 'B') + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_default(self): + self.run_solver() + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_primal(self): + self.run_solver(options={"algorithm": "ps"}) + self.assertIn("NOTE: The Primal Simplex algorithm is used.", self.opt_sas._log) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_ipm(self): + self.run_solver(options={"algorithm": "ip"}) + self.assertIn("NOTE: The Interior Point algorithm is used.", self.opt_sas._log) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_intoption(self): + self.run_solver(options={"maxiter": 20}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_invalidoption(self): + with self.assertRaisesRegex(ValueError, "syntax error"): + self.run_solver(options={"foo": "bar"}) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_max(self): + X = self.instance.X + self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) + self.instance.Obj.sense = maximize + self.run_solver() + self.checkSolution() + self.assertEqual(self.results.problem.sense, ProblemSense.maximize) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_infeasible(self): + instance = self.instance + X = instance.X + instance.R4 = Constraint(expr=-2 * X[2] - 3 * X[3] <= -6) + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.solver.message, "The problem is infeasible.") + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_infeasible_or_unbounded(self): + self.instance.X.domain = Reals + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasibleOrUnbounded, + ) + self.assertEqual( + results.solver.message, "The problem is infeasible or unbounded." + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_unbounded(self): + self.instance.X.domain = Reals + self.run_solver(options={"presolver": "none", "algorithm": "primal"}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.solver.message, "The problem is unbounded.") + + def checkSolutionDecomp(self): + instance = self.instance + results = self.results + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7.5) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 2.5) + self.assertAlmostEqual(instance.X[3].value, 0.0) + + # Check reduced cost + self.assertAlmostEqual(instance.rc[instance.X[1]], sense * 2.0) + self.assertAlmostEqual(instance.rc[instance.X[2]], sense * 0.0) + self.assertAlmostEqual(instance.rc[instance.X[3]], sense * 0.5) + + # Check slack + self.assertAlmostEqual(instance.slack[instance.R1], -5.0) + self.assertAlmostEqual(instance.slack[instance.R2], 2.5) + self.assertAlmostEqual(instance.slack[instance.R3], 5.0) + + # Check dual solution + self.assertAlmostEqual(instance.dual[instance.R1], sense * 1.5) + self.assertAlmostEqual(instance.dual[instance.R2], sense * 0.0) + self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) + + # Don't check basis status for decomp + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_decomp(self): + self.run_solver( + options={ + "decomp": {"absobjgap": 0.0}, + "decompmaster": {"algorithm": "dual"}, + "decompsubprob": {"presolver": "none"}, + } + ) + self.assertIn( + "NOTE: The DECOMP method value DEFAULT is applied.", self.opt_sas._log + ) + self.checkSolutionDecomp() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_iis(self): + self.run_solver(options={"iis": "true"}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.feasible + ) + self.assertIn("NOTE: The IIS= option is enabled.", self.opt_sas._log) + self.assertEqual( + results.solver.message, + "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_maxiter(self): + self.run_solver(options={"maxiter": 1}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) + self.assertEqual( + results.solver.message, + "The maximum allowable number of iterations was reached.", + ) + + +class SASTestLPCAS(SASTestLP): + solver_io = '_sascas' + base_options = CAS_OPTIONS + + +class SASTestMILP(SASTestAbc, unittest.TestCase): + def setX(self): + self.instance.X = Var([1, 2, 3], within=NonNegativeIntegers) + + def checkSolution(self): + instance = self.instance + results = self.results + + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 1.0) + self.assertAlmostEqual(instance.X[3].value, 1.0) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_default(self): + self.run_solver(options={}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_presolve(self): + self.run_solver(options={"presolver": "none"}) + self.assertIn( + "NOTE: The MILP presolver value NONE is applied.", self.opt_sas._log + ) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_intoption(self): + self.run_solver(options={"maxnodes": 20}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_invalidoption(self): + with self.assertRaisesRegex(ValueError, "syntax error"): + self.run_solver(options={"foo": "bar"}) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_max(self): + X = self.instance.X + self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) + self.instance.Obj.sense = maximize + self.run_solver() + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_infeasible(self): + instance = self.instance + X = instance.X + instance.R4 = Constraint(expr=-2 * X[2] - 3 * X[3] <= -6) + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.solver.message, "The problem is infeasible.") + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + @unittest.skip("Returns wrong status for some versions.") + def test_solver_infeasible_or_unbounded(self): + self.instance.X.domain = Integers + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasibleOrUnbounded, + ) + self.assertEqual( + results.solver.message, "The problem is infeasible or unbounded." + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_unbounded(self): + self.instance.X.domain = Integers + self.run_solver( + options={"presolver": "none", "rootnode": {"algorithm": "primal"}} + ) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.solver.message, "The problem is unbounded.") + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_decomp(self): + self.run_solver( + options={ + "decomp": {"hybrid": "off"}, + "decompmaster": {"algorithm": "dual"}, + "decompmasterip": {"presolver": "none"}, + "decompsubprob": {"presolver": "none"}, + } + ) + self.assertIn( + "NOTE: The DECOMP method value DEFAULT is applied.", self.opt_sas._log + ) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_rootnode(self): + self.run_solver(options={"rootnode": {"presolver": "automatic"}}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_maxnodes(self): + self.run_solver(options={"maxnodes": 0}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxEvaluations + ) + self.assertEqual( + results.solver.message, + "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_maxsols(self): + self.run_solver(options={"maxsols": 1}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxEvaluations + ) + self.assertEqual( + results.solver.message, + "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_target(self): + self.run_solver(options={"target": -6.0}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual( + results.solver.message, + "The solution is not worse than the target specified by the TARGET= option.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_primalin(self): + X = self.instance.X + X[1] = None + X[2] = 3 + X[3] = 7 + self.run_solver(warmstart=True) + self.checkSolution() + self.assertIn( + "NOTE: The input solution is infeasible or incomplete. Repair heuristics are applied.", + self.opt_sas._log, + ) + + +class SASTestMILPCAS(SASTestMILP): + solver_io = '_sascas' + base_options = CAS_OPTIONS + + +if __name__ == '__main__': + unittest.main() From be423b99adfa97a6cc3d3cb20985398891acecbe Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Fri, 23 Jun 2023 09:56:54 -0400 Subject: [PATCH 002/113] Just some black adjustments --- pyomo/solvers/plugins/solvers/SAS.py | 3 ++- pyomo/solvers/tests/checks/test_SAS.py | 11 ++--------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index 7f50b7a2970..ed0e63d44d6 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -427,7 +427,8 @@ def _apply_solver(self): # Prepare the solver results results = self.results = self._create_results_from_status( - self._macro.get("STATUS", "ERROR"), self._macro.get("SOLUTION_STATUS","ERROR") + self._macro.get("STATUS", "ERROR"), + self._macro.get("SOLUTION_STATUS", "ERROR"), ) if "Objective Sense Maximization" in self._lst: diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 4592343b17f..654820f5060 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -13,15 +13,8 @@ minimize, Suffix, ) -from pyomo.opt.results import ( - SolverStatus, - TerminationCondition, - ProblemSense, -) -from pyomo.opt import ( - SolverFactory, - check_available_solvers, -) +from pyomo.opt.results import SolverStatus, TerminationCondition, ProblemSense +from pyomo.opt import SolverFactory, check_available_solvers CAS_OPTIONS = { From 284ab98e470cf67c5273837a28c11afbb2b153a5 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Fri, 23 Jun 2023 09:42:08 -0400 Subject: [PATCH 003/113] Initial version of the SAS solver interfaces and unit tests. --- pyomo/solvers/plugins/solvers/SAS.py | 700 ++++++++++++++++++++++ pyomo/solvers/plugins/solvers/__init__.py | 1 + pyomo/solvers/tests/checks/test_SAS.py | 462 ++++++++++++++ 3 files changed, 1163 insertions(+) create mode 100644 pyomo/solvers/plugins/solvers/SAS.py create mode 100644 pyomo/solvers/tests/checks/test_SAS.py diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py new file mode 100644 index 00000000000..7f50b7a2970 --- /dev/null +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -0,0 +1,700 @@ +__all__ = ['SAS'] + +import logging +import sys +import os + +from io import StringIO +from abc import ABC, abstractmethod +from contextlib import redirect_stdout + +from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver +from pyomo.opt.base.solvers import SolverFactory +from pyomo.common.collections import Bunch +from pyomo.opt.results import ( + SolverResults, + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, +) +from pyomo.common.tempfiles import TempfileManager +from pyomo.core.base import Var +from pyomo.core.base.block import _BlockData +from pyomo.core.kernel.block import IBlock + + +logger = logging.getLogger('pyomo.solvers') + + +STATUS_TO_SOLVERSTATUS = { + "OK": SolverStatus.ok, + "SYNTAX_ERROR": SolverStatus.error, + "DATA_ERROR": SolverStatus.error, + "OUT_OF_MEMORY": SolverStatus.aborted, + "IO_ERROR": SolverStatus.error, + "ERROR": SolverStatus.error, +} + +# This combines all status codes from OPTLP/solvelp and OPTMILP/solvemilp +SOLSTATUS_TO_TERMINATIONCOND = { + "OPTIMAL": TerminationCondition.optimal, + "OPTIMAL_AGAP": TerminationCondition.optimal, + "OPTIMAL_RGAP": TerminationCondition.optimal, + "OPTIMAL_COND": TerminationCondition.optimal, + "TARGET": TerminationCondition.optimal, + "CONDITIONAL_OPTIMAL": TerminationCondition.optimal, + "FEASIBLE": TerminationCondition.feasible, + "INFEASIBLE": TerminationCondition.infeasible, + "UNBOUNDED": TerminationCondition.unbounded, + "INFEASIBLE_OR_UNBOUNDED": TerminationCondition.infeasibleOrUnbounded, + "SOLUTION_LIM": TerminationCondition.maxEvaluations, + "NODE_LIM_SOL": TerminationCondition.maxEvaluations, + "NODE_LIM_NOSOL": TerminationCondition.maxEvaluations, + "ITERATION_LIMIT_REACHED": TerminationCondition.maxIterations, + "TIME_LIM_SOL": TerminationCondition.maxTimeLimit, + "TIME_LIM_NOSOL": TerminationCondition.maxTimeLimit, + "TIME_LIMIT_REACHED": TerminationCondition.maxTimeLimit, + "ABORTED": TerminationCondition.userInterrupt, + "ABORT_SOL": TerminationCondition.userInterrupt, + "ABORT_NOSOL": TerminationCondition.userInterrupt, + "OUTMEM_SOL": TerminationCondition.solverFailure, + "OUTMEM_NOSOL": TerminationCondition.solverFailure, + "FAILED": TerminationCondition.solverFailure, + "FAIL_SOL": TerminationCondition.solverFailure, + "FAIL_NOSOL": TerminationCondition.solverFailure, +} + + +SOLSTATUS_TO_MESSAGE = { + "OPTIMAL": "The solution is optimal.", + "OPTIMAL_AGAP": "The solution is optimal within the absolute gap specified by the ABSOBJGAP= option.", + "OPTIMAL_RGAP": "The solution is optimal within the relative gap specified by the RELOBJGAP= option.", + "OPTIMAL_COND": "The solution is optimal, but some infeasibilities (primal, bound, or integer) exceed tolerances due to scaling or choice of a small INTTOL= value.", + "TARGET": "The solution is not worse than the target specified by the TARGET= option.", + "CONDITIONAL_OPTIMAL": "The solution is optimal, but some infeasibilities (primal, dual or bound) exceed tolerances due to scaling or preprocessing.", + "FEASIBLE": "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", + "INFEASIBLE": "The problem is infeasible.", + "UNBOUNDED": "The problem is unbounded.", + "INFEASIBLE_OR_UNBOUNDED": "The problem is infeasible or unbounded.", + "SOLUTION_LIM": "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", + "NODE_LIM_SOL": "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", + "NODE_LIM_NOSOL": "The solver reached the maximum number of nodes specified by the MAXNODES= option and did not find a solution.", + "ITERATION_LIMIT_REACHED": "The maximum allowable number of iterations was reached.", + "TIME_LIM_SOL": "The solver reached the execution time limit specified by the MAXTIME= option and found a solution.", + "TIME_LIM_NOSOL": "The solver reached the execution time limit specified by the MAXTIME= option and did not find a solution.", + "TIME_LIMIT_REACHED": "The solver reached its execution time limit.", + "ABORTED": "The solver was interrupted externally.", + "ABORT_SOL": "The solver was stopped by the user but still found a solution.", + "ABORT_NOSOL": "The solver was stopped by the user and did not find a solution.", + "OUTMEM_SOL": "The solver ran out of memory but still found a solution.", + "OUTMEM_NOSOL": "The solver ran out of memory and either did not find a solution or failed to output the solution due to insufficient memory.", + "FAILED": "The solver failed to converge, possibly due to numerical issues.", + "FAIL_SOL": "The solver stopped due to errors but still found a solution.", + "FAIL_NOSOL": "The solver stopped due to errors and did not find a solution.", +} + + +CAS_OPTION_NAMES = [ + "hostname", + "port", + "username", + "password", + "session", + "locale", + "name", + "nworkers", + "authinfo", + "protocol", + "path", + "ssl_ca_list", + "authcode", +] + + +@SolverFactory.register('sas', doc='The SAS LP/MIP solver') +class SAS(OptSolver): + """The SAS optimization solver""" + + def __new__(cls, *args, **kwds): + mode = kwds.pop('solver_io', None) + if mode != None: + return SolverFactory(mode) + else: + # Choose solver factory automatically + # bassed on what can be loaded. + s = SolverFactory('_sas94', **kwds) + if not s.available(): + s = SolverFactory('_sascas', **kwds) + return s + + +class SASAbc(ABC, OptSolver): + """Abstract base class for the SAS solver interfaces. Simply to avoid code duplication.""" + + def __init__(self, **kwds): + """Initialize the SAS solver interfaces.""" + kwds['type'] = 'sas' + super(SASAbc, self).__init__(**kwds) + + # + # Set up valid problem formats and valid results for each + # problem format + # + self._valid_problem_formats = [ProblemFormat.mps] + self._valid_result_formats = {ProblemFormat.mps: [ResultsFormat.soln]} + + self._keepfiles = False + self._capabilities.linear = True + self._capabilities.integer = True + + super(SASAbc, self).set_problem_format(ProblemFormat.mps) + + def _presolve(self, *args, **kwds): + """ "Set things up for the actual solve.""" + # create a context in the temporary file manager for + # this plugin - is "pop"ed in the _postsolve method. + TempfileManager.push() + + # Get the warmstart flag + self.warmstart_flag = kwds.pop('warmstart', False) + + # Call parent presolve function + super(SASAbc, self)._presolve(*args, **kwds) + + # Store the model, too bad this is not done in the base class + for arg in args: + if isinstance(arg, (_BlockData, IBlock)): + # Store the instance + self._instance = arg + self._vars = [] + for block in self._instance.block_data_objects(active=True): + for vardata in block.component_data_objects( + Var, active=True, descend_into=False + ): + self._vars.append(vardata) + # Store the symbal map, we need this for example when writing the warmstart file + if isinstance(self._instance, IBlock): + self._smap = getattr(self._instance, "._symbol_maps")[self._smap_id] + else: + self._smap = self._instance.solutions.symbol_map[self._smap_id] + + # Create the primalin data + if self.warmstart_flag: + filename = self._warm_start_file_name = TempfileManager.create_tempfile( + ".sol", text=True + ) + smap = self._smap + numWritten = 0 + with open(filename, 'w') as file: + file.write('_VAR_,_VALUE_\n') + for var in self._vars: + if (var.value is not None) and (id(var) in smap.byObject): + name = smap.byObject[id(var)] + file.write( + "{name},{value}\n".format(name=name, value=var.value) + ) + numWritten += 1 + if numWritten == 0: + # No solution available, disable warmstart + self.warmstart_flag = False + + def available(self, exception_flag=False): + """True if the solver is available""" + return self._python_api_exists + + def _has_integer_variables(self): + """True if the problem has integer variables.""" + for vardata in self._vars: + if vardata.is_binary() or vardata.is_integer(): + return True + return False + + def _create_results_from_status(self, status, solution_status): + """Create a results object and set the status code and messages.""" + results = SolverResults() + results.solver.name = "SAS" + results.solver.status = STATUS_TO_SOLVERSTATUS[status] + if results.solver.status == SolverStatus.ok: + results.solver.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + solution_status + ] + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE[solution_status] + results.solver.status = TerminationCondition.to_solver_status( + results.solver.termination_condition + ) + elif results.solver.status == SolverStatus.aborted: + results.solver.termination_condition = TerminationCondition.userInterrupt + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE["ABORTED"] + else: + results.solver.termination_condition = TerminationCondition.error + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE["FAILED"] + return results + + @abstractmethod + def _apply_solver(self): + """The routine that performs the solve""" + raise NotImplemented("This is an abstract function and thus not implemented!") + + def _postsolve(self): + """Clean up at the end, especially the temp files.""" + # Let the base class deal with returning results. + results = super(SASAbc, self)._postsolve() + + # Finally, clean any temporary files registered with the temp file + # manager, created populated *directly* by this plugin. does not + # include, for example, the execution script. but does include + # the warm-start file. + TempfileManager.pop(remove=not self._keepfiles) + + return results + + def warm_start_capable(self): + """True if the solver interface supports MILP warmstarting.""" + return True + + +@SolverFactory.register('_sas94', doc='SAS 9.4 interface') +class SAS94(SASAbc): + """ + Solver interface for SAS 9.4 using saspy. See the saspy documentation about + how to create a connection. + """ + + def __init__(self, **kwds): + """Initialize the solver interface and see if the saspy package is available.""" + super(SAS94, self).__init__(**kwds) + + try: + import saspy + + self._sas = saspy + except ImportError: + self._python_api_exists = False + except Exception as e: + self._python_api_exists = False + # For other exceptions, raise it so that it does not get lost + raise e + else: + self._python_api_exists = True + self._sas.logger.setLevel(logger.level) + + def _create_statement_str(self, statement): + """Helper function to create the strings for the statements of the proc OPTLP/OPTMILP code.""" + stmt = self.options.pop(statement, None) + if stmt: + return ( + statement.strip() + + " " + + " ".join(option + "=" + str(value) for option, value in stmt.items()) + + ";" + ) + else: + return "" + + def _apply_solver(self): + """ "Prepare the options and run the solver. Then store the data to be returned.""" + logger.debug("Running SAS") + + # Set return code to issue an error if we get interrupted + self._rc = -1 + + # Figure out if the problem has integer variables + with_opt = self.options.pop("with", None) + if with_opt == "lp": + proc = "OPTLP" + elif with_opt == "milp": + proc = "OPTMILP" + else: + # Check if there are integer variables, this might be slow + proc = "OPTMILP" if self._has_integer_variables() else "OPTLP" + + # Remove CAS options in case they were specified + for opt in CAS_OPTION_NAMES: + self.options.pop(opt, None) + + # Get the rootnode options + decomp_str = self._create_statement_str("decomp") + decompmaster_str = self._create_statement_str("decompmaster") + decompmasterip_str = self._create_statement_str("decompmasterip") + decompsubprob_str = self._create_statement_str("decompsubprob") + rootnode_str = self._create_statement_str("rootnode") + + # Handle warmstart + warmstart_str = "" + if self.warmstart_flag: + # Set the warmstart basis option + if proc != "OPTLP": + warmstart_str = """ + proc import datafile='{primalin}' + out=primalin + dbms=csv + replace; + getnames=yes; + run; + """.format( + primalin=self._warm_start_file_name + ) + self.options["primalin"] = "primalin" + + # Convert options to string + opt_str = " ".join( + option + "=" + str(value) for option, value in self.options.items() + ) + + # Start a SAS session, submit the code and return the results`` + with self._sas.SASsession() as sas: + # Find the version of 9.4 we are using + if sas.sasver.startswith("9.04.01M5"): + # In 9.4M5 we have to create an MPS data set from an MPS file first + # Earlier versions will not work because the MPS format in incompatible + res = sas.submit( + """ + {warmstart} + %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA=mpsdata, MAXLEN=256, FORMAT=FREE); + proc {proc} data=mpsdata {options} primalout=primalout dualout=dualout; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + options=opt_str, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + else: + # Since 9.4M6+ optlp/optmilp can read mps files directly + res = sas.submit( + """ + {warmstart} + proc {proc} mpsfile=\"{mpsfile}\" {options} primalout=primalout dualout=dualout; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + options=opt_str, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + + # Store log and ODS output + self._log = res["LOG"] + self._lst = res["LST"] + # Print log if requested by the user + if self._tee: + print(self._log) + if "ERROR 22-322: Syntax error" in self._log: + raise ValueError( + "An option passed to the SAS solver caused a syntax error: {log}".format( + log=self._log + ) + ) + self._macro = dict( + (key.strip(), value.strip()) + for key, value in ( + pair.split("=") for pair in sas.symget("_OR" + proc + "_").split() + ) + ) + primal_out = sas.sd2df("primalout") + dual_out = sas.sd2df("dualout") + + # Prepare the solver results + results = self.results = self._create_results_from_status( + self._macro.get("STATUS", "ERROR"), self._macro.get("SOLUTION_STATUS","ERROR") + ) + + if "Objective Sense Maximization" in self._lst: + results.problem.sense = ProblemSense.maximize + else: + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if results.solver.termination_condition == TerminationCondition.optimal: + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = TerminationCondition.optimal + + # Store objective value in solution + sol.objective['__default_objective__'] = {'Value': self._macro["OBJECTIVE"]} + + if proc == "OPTLP": + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = primal_out[['_VAR_', '_VALUE_', '_STATUS_', '_R_COST_']] + primal_out = primal_out.set_index('_VAR_', drop=True) + primal_out = primal_out.rename( + {'_VALUE_': 'Value', '_STATUS_': 'Status', '_R_COST_': 'rc'}, + axis='columns', + ) + sol.variable = primal_out.to_dict('index') + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = dual_out[['_ROW_', '_VALUE_', '_STATUS_', '_ACTIVITY_']] + dual_out = dual_out.set_index('_ROW_', drop=True) + dual_out = dual_out.rename( + {'_VALUE_': 'dual', '_STATUS_': 'Status', '_ACTIVITY_': 'slack'}, + axis='columns', + ) + sol.constraint = dual_out.to_dict('index') + else: + # Convert primal out data set to variable dictionary + # Use pandas functions for efficiency + primal_out = primal_out[['_VAR_', '_VALUE_']] + primal_out = primal_out.set_index('_VAR_', drop=True) + primal_out = primal_out.rename({'_VALUE_': 'Value'}, axis='columns') + sol.variable = primal_out.to_dict('index') + + self._rc = 0 + return Bunch(rc=self._rc, log=self._log) + + +class SASLogWriter: + """Helper class to take the log from stdout and put it also in a StringIO.""" + + def __init__(self, tee): + """Set up the two outputs.""" + self.tee = tee + self._log = StringIO() + self.stdout = sys.stdout + + def write(self, message): + """If the tee options is specified, write to both outputs.""" + if self.tee: + self.stdout.write(message) + self._log.write(message) + + def flush(self): + """Nothing to do, just here for compatibility reasons.""" + # Do nothing since we flush right away + pass + + def log(self): + """ "Get the log as a string.""" + return self._log.getvalue() + + +@SolverFactory.register('_sascas', doc='SAS Viya CAS Server interface') +class SASCAS(SASAbc): + """ + Solver interface connection to a SAS Viya CAS server using swat. + See the documentation for the swat package about how to create a connection. + The swat connection options can be passed as options to the solve function. + """ + + def __init__(self, **kwds): + """Initialize and try to load the swat package.""" + super(SASCAS, self).__init__(**kwds) + + try: + import swat + + self._sas = swat + except ImportError: + self._python_api_exists = False + except Exception as e: + self._python_api_exists = False + # For other exceptions, raise it so that it does not get lost + raise e + else: + self._python_api_exists = True + + def _apply_solver(self): + """ "Prepare the options and run the solver. Then store the data to be returned.""" + logger.debug("Running SAS Viya") + + # Set return code to issue an error if we get interrupted + self._rc = -1 + + # Extract CAS connection options + cas_opts = {} + for opt in CAS_OPTION_NAMES: + val = self.options.pop(opt, None) + if val != None: + cas_opts[opt] = val + + # Figure out if the problem has integer variables + with_opt = self.options.pop("with", None) + if with_opt == "lp": + action = "solveLp" + elif with_opt == "milp": + action = "solveMilp" + else: + # Check if there are integer variables, this might be slow + action = "solveMilp" if self._has_integer_variables() else "solveLp" + + # Connect to CAS server + with redirect_stdout(SASLogWriter(self._tee)) as self._log_writer: + s = self._sas.CAS(**cas_opts) + try: + # Load the optimization action set + s.loadactionset('optimization') + + # Upload mps file to CAS + if os.stat(self._problem_files[0]).st_size >= 2 * 1024**3: + # For large files, use convertMPS, first create file for upload + mpsWithIdFileName = TempfileManager.create_tempfile( + ".mps.csv", text=True + ) + with open(mpsWithIdFileName, 'w') as mpsWithId: + mpsWithId.write('_ID_\tText\n') + with open(self._problem_files[0], 'r') as f: + id = 0 + for line in f: + id += 1 + mpsWithId.write(str(id) + '\t' + line.rstrip() + '\n') + + # Upload .mps.csv file + s.upload_file( + mpsWithIdFileName, + casout={"name": "mpscsv", "replace": True}, + importoptions={"filetype": "CSV", "delimiter": "\t"}, + ) + + # Convert .mps.csv file to .mps + s.optimization.convertMps( + data="mpscsv", + casOut={"name": "mpsdata", "replace": True}, + format="FREE", + ) + else: + # For small files, use loadMPS + with open(self._problem_files[0], 'r') as mps_file: + s.optimization.loadMps( + mpsFileString=mps_file.read(), + casout={"name": "mpsdata", "replace": True}, + format="FREE", + ) + + if self.warmstart_flag: + # Upload warmstart file to CAS + s.upload_file( + self._warm_start_file_name, + casout={"name": "primalin", "replace": True}, + importoptions={"filetype": "CSV"}, + ) + self.options["primalin"] = "primalin" + + # Solve the problem in CAS + if action == "solveMilp": + r = s.optimization.solveMilp( + data={"name": "mpsdata"}, + primalOut={"name": "primalout", "replace": True}, + **self.options + ) + else: + r = s.optimization.solveLp( + data={"name": "mpsdata"}, + primalOut={"name": "primalout", "replace": True}, + dualOut={"name": "dualout", "replace": True}, + **self.options + ) + + # Prepare the solver results + if r: + # Get back the primal and dual solution data sets + results = self.results = self._create_results_from_status( + r.get("status", "ERROR"), r.get("solutionStatus", "ERROR") + ) + + if r.ProblemSummary["cValue1"][1] == "Maximization": + results.problem.sense = ProblemSense.maximize + else: + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if ( + results.solver.termination_condition + == TerminationCondition.optimal + ): + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = TerminationCondition.optimal + + # Store objective value in solution + sol.objective['__default_objective__'] = { + 'Value': r["objective"] + } + + if action == "solveMilp": + primal_out = s.CASTable(name="primalout") + # Use pandas functions for efficiency + primal_out = primal_out[['_VAR_', '_VALUE_']] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {'Value': row[1]} + else: + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = s.CASTable(name="primalout") + primal_out = primal_out[ + ['_VAR_', '_VALUE_', '_STATUS_', '_R_COST_'] + ] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = { + 'Value': row[1], + 'Status': row[2], + 'rc': row[3], + } + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = s.CASTable(name="dualout") + dual_out = dual_out[ + ['_ROW_', '_VALUE_', '_STATUS_', '_ACTIVITY_'] + ] + sol.constraint = {} + for row in dual_out.itertuples(index=False): + sol.constraint[row[0]] = { + 'dual': row[1], + 'Status': row[2], + 'slack': row[3], + } + else: + results = self.results = SolverResults() + results.solver.name = "SAS" + results.solver.status = SolverStatus.error + raise ValueError( + "An option passed to the SAS solver caused a syntax error." + ) + + finally: + s.close() + + self._log = self._log_writer.log() + if self._tee: + print(self._log) + self._rc = 0 + return Bunch(rc=self._rc, log=self._log) diff --git a/pyomo/solvers/plugins/solvers/__init__.py b/pyomo/solvers/plugins/solvers/__init__.py index c5fbfa97e42..23b7fe06526 100644 --- a/pyomo/solvers/plugins/solvers/__init__.py +++ b/pyomo/solvers/plugins/solvers/__init__.py @@ -30,3 +30,4 @@ import pyomo.solvers.plugins.solvers.mosek_persistent import pyomo.solvers.plugins.solvers.xpress_direct import pyomo.solvers.plugins.solvers.xpress_persistent +import pyomo.solvers.plugins.solvers.SAS diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py new file mode 100644 index 00000000000..4592343b17f --- /dev/null +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -0,0 +1,462 @@ +import os +import pyomo.common.unittest as unittest +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + NonNegativeIntegers, + NonNegativeReals, + Reals, + Integers, + maximize, + minimize, + Suffix, +) +from pyomo.opt.results import ( + SolverStatus, + TerminationCondition, + ProblemSense, +) +from pyomo.opt import ( + SolverFactory, + check_available_solvers, +) + + +CAS_OPTIONS = { + "hostname": os.environ.get('CAS_SERVER', None), + "port": os.environ.get('CAS_PORT', None), + "authinfo": os.environ.get('CAS_AUTHINFO', None), +} + + +sas_available = check_available_solvers('sas') + + +class SASTestAbc: + solver_io = '_sas94' + base_options = {} + + def setObj(self): + X = self.instance.X + self.instance.Obj = Objective( + expr=2 * X[1] - 3 * X[2] - 4 * X[3], sense=minimize + ) + + def setX(self): + self.instance.X = Var([1, 2, 3], within=NonNegativeReals) + + def setUp(self): + instance = self.instance = ConcreteModel() + self.setX() + X = instance.X + instance.R1 = Constraint(expr=-2 * X[2] - 3 * X[3] >= -5) + instance.R2 = Constraint(expr=X[1] + X[2] + 2 * X[3] <= 4) + instance.R3 = Constraint(expr=X[1] + 2 * X[2] + 3 * X[3] <= 7) + self.setObj() + + # Declare suffixes for solution information + instance.status = Suffix(direction=Suffix.IMPORT) + instance.slack = Suffix(direction=Suffix.IMPORT) + instance.rc = Suffix(direction=Suffix.IMPORT) + instance.dual = Suffix(direction=Suffix.IMPORT) + + self.opt_sas = SolverFactory('sas', solver_io=self.solver_io) + + def tearDown(self): + del self.opt_sas + del self.instance + + def run_solver(self, **kwargs): + opt_sas = self.opt_sas + instance = self.instance + + # Add base options for connection data etc. + options = kwargs.get("options", {}) + if self.base_options: + kwargs["options"] = {**options, **self.base_options} + + # Call the solver + self.results = opt_sas.solve(instance, **kwargs) + + +class SASTestLP(SASTestAbc, unittest.TestCase): + def checkSolution(self): + instance = self.instance + results = self.results + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7.5) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 2.5) + self.assertAlmostEqual(instance.X[3].value, 0.0) + + # Check reduced cost + self.assertAlmostEqual(instance.rc[instance.X[1]], sense * 2.0) + self.assertAlmostEqual(instance.rc[instance.X[2]], sense * 0.0) + self.assertAlmostEqual(instance.rc[instance.X[3]], sense * 0.5) + + # Check slack + self.assertAlmostEqual(instance.slack[instance.R1], -5.0) + self.assertAlmostEqual(instance.slack[instance.R2], 2.5) + self.assertAlmostEqual(instance.slack[instance.R3], 5.0) + + # Check dual solution + self.assertAlmostEqual(instance.dual[instance.R1], sense * 1.5) + self.assertAlmostEqual(instance.dual[instance.R2], sense * 0.0) + self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) + + # Check basis status + self.assertEqual(instance.status[instance.X[1]], 'L') + self.assertEqual(instance.status[instance.X[2]], 'B') + self.assertEqual(instance.status[instance.X[3]], 'L') + self.assertEqual(instance.status[instance.R1], 'U') + self.assertEqual(instance.status[instance.R2], 'B') + self.assertEqual(instance.status[instance.R3], 'B') + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_default(self): + self.run_solver() + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_primal(self): + self.run_solver(options={"algorithm": "ps"}) + self.assertIn("NOTE: The Primal Simplex algorithm is used.", self.opt_sas._log) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_ipm(self): + self.run_solver(options={"algorithm": "ip"}) + self.assertIn("NOTE: The Interior Point algorithm is used.", self.opt_sas._log) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_intoption(self): + self.run_solver(options={"maxiter": 20}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_invalidoption(self): + with self.assertRaisesRegex(ValueError, "syntax error"): + self.run_solver(options={"foo": "bar"}) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_max(self): + X = self.instance.X + self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) + self.instance.Obj.sense = maximize + self.run_solver() + self.checkSolution() + self.assertEqual(self.results.problem.sense, ProblemSense.maximize) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_infeasible(self): + instance = self.instance + X = instance.X + instance.R4 = Constraint(expr=-2 * X[2] - 3 * X[3] <= -6) + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.solver.message, "The problem is infeasible.") + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_infeasible_or_unbounded(self): + self.instance.X.domain = Reals + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasibleOrUnbounded, + ) + self.assertEqual( + results.solver.message, "The problem is infeasible or unbounded." + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_unbounded(self): + self.instance.X.domain = Reals + self.run_solver(options={"presolver": "none", "algorithm": "primal"}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.solver.message, "The problem is unbounded.") + + def checkSolutionDecomp(self): + instance = self.instance + results = self.results + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7.5) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 2.5) + self.assertAlmostEqual(instance.X[3].value, 0.0) + + # Check reduced cost + self.assertAlmostEqual(instance.rc[instance.X[1]], sense * 2.0) + self.assertAlmostEqual(instance.rc[instance.X[2]], sense * 0.0) + self.assertAlmostEqual(instance.rc[instance.X[3]], sense * 0.5) + + # Check slack + self.assertAlmostEqual(instance.slack[instance.R1], -5.0) + self.assertAlmostEqual(instance.slack[instance.R2], 2.5) + self.assertAlmostEqual(instance.slack[instance.R3], 5.0) + + # Check dual solution + self.assertAlmostEqual(instance.dual[instance.R1], sense * 1.5) + self.assertAlmostEqual(instance.dual[instance.R2], sense * 0.0) + self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) + + # Don't check basis status for decomp + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_decomp(self): + self.run_solver( + options={ + "decomp": {"absobjgap": 0.0}, + "decompmaster": {"algorithm": "dual"}, + "decompsubprob": {"presolver": "none"}, + } + ) + self.assertIn( + "NOTE: The DECOMP method value DEFAULT is applied.", self.opt_sas._log + ) + self.checkSolutionDecomp() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_iis(self): + self.run_solver(options={"iis": "true"}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.feasible + ) + self.assertIn("NOTE: The IIS= option is enabled.", self.opt_sas._log) + self.assertEqual( + results.solver.message, + "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_maxiter(self): + self.run_solver(options={"maxiter": 1}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) + self.assertEqual( + results.solver.message, + "The maximum allowable number of iterations was reached.", + ) + + +class SASTestLPCAS(SASTestLP): + solver_io = '_sascas' + base_options = CAS_OPTIONS + + +class SASTestMILP(SASTestAbc, unittest.TestCase): + def setX(self): + self.instance.X = Var([1, 2, 3], within=NonNegativeIntegers) + + def checkSolution(self): + instance = self.instance + results = self.results + + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 1.0) + self.assertAlmostEqual(instance.X[3].value, 1.0) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_default(self): + self.run_solver(options={}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_presolve(self): + self.run_solver(options={"presolver": "none"}) + self.assertIn( + "NOTE: The MILP presolver value NONE is applied.", self.opt_sas._log + ) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_intoption(self): + self.run_solver(options={"maxnodes": 20}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_invalidoption(self): + with self.assertRaisesRegex(ValueError, "syntax error"): + self.run_solver(options={"foo": "bar"}) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_max(self): + X = self.instance.X + self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) + self.instance.Obj.sense = maximize + self.run_solver() + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_infeasible(self): + instance = self.instance + X = instance.X + instance.R4 = Constraint(expr=-2 * X[2] - 3 * X[3] <= -6) + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.solver.message, "The problem is infeasible.") + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + @unittest.skip("Returns wrong status for some versions.") + def test_solver_infeasible_or_unbounded(self): + self.instance.X.domain = Integers + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasibleOrUnbounded, + ) + self.assertEqual( + results.solver.message, "The problem is infeasible or unbounded." + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_unbounded(self): + self.instance.X.domain = Integers + self.run_solver( + options={"presolver": "none", "rootnode": {"algorithm": "primal"}} + ) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.solver.message, "The problem is unbounded.") + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_decomp(self): + self.run_solver( + options={ + "decomp": {"hybrid": "off"}, + "decompmaster": {"algorithm": "dual"}, + "decompmasterip": {"presolver": "none"}, + "decompsubprob": {"presolver": "none"}, + } + ) + self.assertIn( + "NOTE: The DECOMP method value DEFAULT is applied.", self.opt_sas._log + ) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_rootnode(self): + self.run_solver(options={"rootnode": {"presolver": "automatic"}}) + self.checkSolution() + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_maxnodes(self): + self.run_solver(options={"maxnodes": 0}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxEvaluations + ) + self.assertEqual( + results.solver.message, + "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_maxsols(self): + self.run_solver(options={"maxsols": 1}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxEvaluations + ) + self.assertEqual( + results.solver.message, + "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_target(self): + self.run_solver(options={"target": -6.0}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual( + results.solver.message, + "The solution is not worse than the target specified by the TARGET= option.", + ) + + @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_primalin(self): + X = self.instance.X + X[1] = None + X[2] = 3 + X[3] = 7 + self.run_solver(warmstart=True) + self.checkSolution() + self.assertIn( + "NOTE: The input solution is infeasible or incomplete. Repair heuristics are applied.", + self.opt_sas._log, + ) + + +class SASTestMILPCAS(SASTestMILP): + solver_io = '_sascas' + base_options = CAS_OPTIONS + + +if __name__ == '__main__': + unittest.main() From bc0e1feb794f553d0e7a859120aeeabd297708e9 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Fri, 23 Jun 2023 09:56:54 -0400 Subject: [PATCH 004/113] Just some black adjustments --- pyomo/solvers/plugins/solvers/SAS.py | 3 ++- pyomo/solvers/tests/checks/test_SAS.py | 11 ++--------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index 7f50b7a2970..ed0e63d44d6 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -427,7 +427,8 @@ def _apply_solver(self): # Prepare the solver results results = self.results = self._create_results_from_status( - self._macro.get("STATUS", "ERROR"), self._macro.get("SOLUTION_STATUS","ERROR") + self._macro.get("STATUS", "ERROR"), + self._macro.get("SOLUTION_STATUS", "ERROR"), ) if "Objective Sense Maximization" in self._lst: diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 4592343b17f..654820f5060 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -13,15 +13,8 @@ minimize, Suffix, ) -from pyomo.opt.results import ( - SolverStatus, - TerminationCondition, - ProblemSense, -) -from pyomo.opt import ( - SolverFactory, - check_available_solvers, -) +from pyomo.opt.results import SolverStatus, TerminationCondition, ProblemSense +from pyomo.opt import SolverFactory, check_available_solvers CAS_OPTIONS = { From cda764d85ff7b37d72f8206c599f1592a0b477de Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Mon, 7 Aug 2023 09:44:00 -0400 Subject: [PATCH 005/113] Updated version for remote testing --- .github/workflows/typos.toml | 2 + .gitignore | 5 +- pyomo/solvers/plugins/solvers/SAS.py | 257 +++++++++++++++++-------- pyomo/solvers/tests/checks/test_SAS.py | 183 ++++++++++++------ 4 files changed, 307 insertions(+), 140 deletions(-) diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml index c9fe9e804a2..71d9ad0355f 100644 --- a/.github/workflows/typos.toml +++ b/.github/workflows/typos.toml @@ -38,4 +38,6 @@ caf = "caf" WRONLY = "WRONLY" # Ignore the name Hax Hax = "Hax" +# Ignore dout (short for dual output in SAS solvers) +dout = "dout" # AS NEEDED: Add More Words Below diff --git a/.gitignore b/.gitignore index 09069552990..7309ff1e8a8 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,9 @@ .spyder* .ropeproject .vscode +.env +venv +sascfg_personal.py # Python generates numerous files when byte compiling / installing packages *.pyx *.pyc @@ -24,4 +27,4 @@ gurobi.log # Jupyterhub/Jupyterlab checkpoints .ipynb_checkpoints -cplex.log \ No newline at end of file +cplex.log diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index ed0e63d44d6..87a6a18c1f4 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -1,8 +1,9 @@ -__all__ = ['SAS'] +__all__ = ["SAS"] import logging import sys -import os +from os import stat +import uuid from io import StringIO from abc import ABC, abstractmethod @@ -24,7 +25,7 @@ from pyomo.core.kernel.block import IBlock -logger = logging.getLogger('pyomo.solvers') +logger = logging.getLogger("pyomo.solvers") STATUS_TO_SOLVERSTATUS = { @@ -112,20 +113,20 @@ ] -@SolverFactory.register('sas', doc='The SAS LP/MIP solver') +@SolverFactory.register("sas", doc="The SAS LP/MIP solver") class SAS(OptSolver): """The SAS optimization solver""" def __new__(cls, *args, **kwds): - mode = kwds.pop('solver_io', None) + mode = kwds.pop("solver_io", None) if mode != None: return SolverFactory(mode) else: # Choose solver factory automatically - # bassed on what can be loaded. - s = SolverFactory('_sas94', **kwds) + # based on what can be loaded. + s = SolverFactory("_sas94", **kwds) if not s.available(): - s = SolverFactory('_sascas', **kwds) + s = SolverFactory("_sascas", **kwds) return s @@ -134,7 +135,7 @@ class SASAbc(ABC, OptSolver): def __init__(self, **kwds): """Initialize the SAS solver interfaces.""" - kwds['type'] = 'sas' + kwds["type"] = "sas" super(SASAbc, self).__init__(**kwds) # @@ -157,7 +158,7 @@ def _presolve(self, *args, **kwds): TempfileManager.push() # Get the warmstart flag - self.warmstart_flag = kwds.pop('warmstart', False) + self.warmstart_flag = kwds.pop("warmstart", False) # Call parent presolve function super(SASAbc, self)._presolve(*args, **kwds) @@ -173,7 +174,7 @@ def _presolve(self, *args, **kwds): Var, active=True, descend_into=False ): self._vars.append(vardata) - # Store the symbal map, we need this for example when writing the warmstart file + # Store the symbol map, we need this for example when writing the warmstart file if isinstance(self._instance, IBlock): self._smap = getattr(self._instance, "._symbol_maps")[self._smap_id] else: @@ -186,8 +187,8 @@ def _presolve(self, *args, **kwds): ) smap = self._smap numWritten = 0 - with open(filename, 'w') as file: - file.write('_VAR_,_VALUE_\n') + with open(filename, "w") as file: + file.write("_VAR_,_VALUE_\n") for var in self._vars: if (var.value is not None) and (id(var) in smap.byObject): name = smap.byObject[id(var)] @@ -239,8 +240,7 @@ def _create_results_from_status(self, status, solution_status): @abstractmethod def _apply_solver(self): - """The routine that performs the solve""" - raise NotImplemented("This is an abstract function and thus not implemented!") + pass def _postsolve(self): """Clean up at the end, especially the temp files.""" @@ -260,7 +260,7 @@ def warm_start_capable(self): return True -@SolverFactory.register('_sas94', doc='SAS 9.4 interface') +@SolverFactory.register("_sas94", doc="SAS 9.4 interface") class SAS94(SASAbc): """ Solver interface for SAS 9.4 using saspy. See the saspy documentation about @@ -298,6 +298,9 @@ def _create_statement_str(self, statement): else: return "" + def sas_version(self): + return self._sasver + def _apply_solver(self): """ "Prepare the options and run the solver. Then store the data to be returned.""" logger.debug("Running SAS") @@ -326,39 +329,81 @@ def _apply_solver(self): decompsubprob_str = self._create_statement_str("decompsubprob") rootnode_str = self._create_statement_str("rootnode") + # Get a unique identifier, always use the same with different prefixes + unique = uuid.uuid4().hex[:16] + + # Create unique filename for output datasets + primalout_dataset_name = "pout" + unique + dualout_dataset_name = "dout" + unique + primalin_dataset_name = None + # Handle warmstart warmstart_str = "" if self.warmstart_flag: # Set the warmstart basis option + primalin_dataset_name = "pin" + unique if proc != "OPTLP": warmstart_str = """ proc import datafile='{primalin}' - out=primalin + out={primalin_dataset_name} dbms=csv replace; getnames=yes; run; """.format( - primalin=self._warm_start_file_name + primalin=self._warm_start_file_name, + primalin_dataset_name=primalin_dataset_name, ) - self.options["primalin"] = "primalin" + self.options["primalin"] = primalin_dataset_name # Convert options to string opt_str = " ".join( option + "=" + str(value) for option, value in self.options.items() ) - # Start a SAS session, submit the code and return the results`` + # Set some SAS options to make the log more clean + sas_options = "option notes nonumber nodate nosource pagesize=max;" + + # Start a SAS session, submit the code and return the results with self._sas.SASsession() as sas: # Find the version of 9.4 we are using - if sas.sasver.startswith("9.04.01M5"): + self._sasver = sas.sasver + + # Upload files, only if not accessible locally + upload_mps = False + if not sas.file_info(self._problem_files[0], quiet=True): + sas.upload( + self._problem_files[0], self._problem_files[0], overwrite=True + ) + upload_mps = True + + upload_pin = False + if self.warmstart_flag and not sas.file_info( + self._warm_start_file_name, quiet=True + ): + sas.upload( + self._warm_start_file_name, + self._warm_start_file_name, + overwrite=True, + ) + upload_pin = True + + # Using a function call to make it easier to moch the version check + version = self.sas_version().split("M", 1)[1][0] + if int(version) < 5: + raise NotImplementedError( + "Support for SAS 9.4 M4 and earlier is no implemented." + ) + elif int(version) == 5: # In 9.4M5 we have to create an MPS data set from an MPS file first # Earlier versions will not work because the MPS format in incompatible + mps_dataset_name = "mps" + unique res = sas.submit( """ + {sas_options} {warmstart} - %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA=mpsdata, MAXLEN=256, FORMAT=FREE); - proc {proc} data=mpsdata {options} primalout=primalout dualout=dualout; + %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA={mps_dataset_name}, MAXLEN=256, FORMAT=FREE); + proc {proc} data={mps_dataset_name} {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; {decomp} {decompmaster} {decompmasterip} @@ -366,10 +411,14 @@ def _apply_solver(self): {rootnode} run; """.format( + sas_options=sas_options, warmstart=warmstart_str, proc=proc, mpsfile=self._problem_files[0], + mps_dataset_name=mps_dataset_name, options=opt_str, + primalout_dataset_name=primalout_dataset_name, + dualout_dataset_name=dualout_dataset_name, decomp=decomp_str, decompmaster=decompmaster_str, decompmasterip=decompmasterip_str, @@ -378,12 +427,14 @@ def _apply_solver(self): ), results="TEXT", ) + sas.sasdata(mps_dataset_name).delete(quiet=True) else: # Since 9.4M6+ optlp/optmilp can read mps files directly res = sas.submit( """ + {sas_options} {warmstart} - proc {proc} mpsfile=\"{mpsfile}\" {options} primalout=primalout dualout=dualout; + proc {proc} mpsfile=\"{mpsfile}\" {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; {decomp} {decompmaster} {decompmasterip} @@ -391,10 +442,13 @@ def _apply_solver(self): {rootnode} run; """.format( + sas_options=sas_options, warmstart=warmstart_str, proc=proc, mpsfile=self._problem_files[0], options=opt_str, + primalout_dataset_name=primalout_dataset_name, + dualout_dataset_name=dualout_dataset_name, decomp=decomp_str, decompmaster=decompmaster_str, decompmasterip=decompmasterip_str, @@ -404,26 +458,40 @@ def _apply_solver(self): results="TEXT", ) + # Delete uploaded file + if upload_mps: + sas.file_delete(self._problem_files[0], quiet=True) + if self.warmstart_flag and upload_pin: + sas.file_delete(self._warm_start_file_name, quiet=True) + # Store log and ODS output self._log = res["LOG"] self._lst = res["LST"] - # Print log if requested by the user - if self._tee: - print(self._log) if "ERROR 22-322: Syntax error" in self._log: raise ValueError( "An option passed to the SAS solver caused a syntax error: {log}".format( log=self._log ) ) + else: + # Print log if requested by the user, only if we did not already print it + if self._tee: + print(self._log) self._macro = dict( (key.strip(), value.strip()) for key, value in ( pair.split("=") for pair in sas.symget("_OR" + proc + "_").split() ) ) - primal_out = sas.sd2df("primalout") - dual_out = sas.sd2df("dualout") + if self._macro.get("STATUS", "ERROR") == "OK": + primal_out = sas.sd2df(primalout_dataset_name) + dual_out = sas.sd2df(dualout_dataset_name) + + # Delete data sets, they will go away automatically, but does not hurt to delete them + if primalin_dataset_name: + sas.sasdata(primalin_dataset_name).delete(quiet=True) + sas.sasdata(primalout_dataset_name).delete(quiet=True) + sas.sasdata(dualout_dataset_name).delete(quiet=True) # Prepare the solver results results = self.results = self._create_results_from_status( @@ -445,35 +513,35 @@ def _apply_solver(self): sol.termination_condition = TerminationCondition.optimal # Store objective value in solution - sol.objective['__default_objective__'] = {'Value': self._macro["OBJECTIVE"]} + sol.objective["__default_objective__"] = {"Value": self._macro["OBJECTIVE"]} if proc == "OPTLP": # Convert primal out data set to variable dictionary # Use panda functions for efficiency - primal_out = primal_out[['_VAR_', '_VALUE_', '_STATUS_', '_R_COST_']] - primal_out = primal_out.set_index('_VAR_', drop=True) + primal_out = primal_out[["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"]] + primal_out = primal_out.set_index("_VAR_", drop=True) primal_out = primal_out.rename( - {'_VALUE_': 'Value', '_STATUS_': 'Status', '_R_COST_': 'rc'}, - axis='columns', + {"_VALUE_": "Value", "_STATUS_": "Status", "_R_COST_": "rc"}, + axis="columns", ) - sol.variable = primal_out.to_dict('index') + sol.variable = primal_out.to_dict("index") # Convert dual out data set to constraint dictionary # Use pandas functions for efficiency - dual_out = dual_out[['_ROW_', '_VALUE_', '_STATUS_', '_ACTIVITY_']] - dual_out = dual_out.set_index('_ROW_', drop=True) + dual_out = dual_out[["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"]] + dual_out = dual_out.set_index("_ROW_", drop=True) dual_out = dual_out.rename( - {'_VALUE_': 'dual', '_STATUS_': 'Status', '_ACTIVITY_': 'slack'}, - axis='columns', + {"_VALUE_": "dual", "_STATUS_": "Status", "_ACTIVITY_": "slack"}, + axis="columns", ) - sol.constraint = dual_out.to_dict('index') + sol.constraint = dual_out.to_dict("index") else: # Convert primal out data set to variable dictionary # Use pandas functions for efficiency - primal_out = primal_out[['_VAR_', '_VALUE_']] - primal_out = primal_out.set_index('_VAR_', drop=True) - primal_out = primal_out.rename({'_VALUE_': 'Value'}, axis='columns') - sol.variable = primal_out.to_dict('index') + primal_out = primal_out[["_VAR_", "_VALUE_"]] + primal_out = primal_out.set_index("_VAR_", drop=True) + primal_out = primal_out.rename({"_VALUE_": "Value"}, axis="columns") + sol.variable = primal_out.to_dict("index") self._rc = 0 return Bunch(rc=self._rc, log=self._log) @@ -504,7 +572,7 @@ def log(self): return self._log.getvalue() -@SolverFactory.register('_sascas', doc='SAS Viya CAS Server interface') +@SolverFactory.register("_sascas", doc="SAS Viya CAS Server interface") class SASCAS(SASAbc): """ Solver interface connection to a SAS Viya CAS server using swat. @@ -553,70 +621,89 @@ def _apply_solver(self): # Check if there are integer variables, this might be slow action = "solveMilp" if self._has_integer_variables() else "solveLp" + # Get a unique identifier, always use the same with different prefixes + unique = uuid.uuid4().hex[:16] + # Connect to CAS server with redirect_stdout(SASLogWriter(self._tee)) as self._log_writer: s = self._sas.CAS(**cas_opts) try: # Load the optimization action set - s.loadactionset('optimization') + s.loadactionset("optimization") + + # Declare a unique table name for the mps table + mpsdata_table_name = "mps" + unique # Upload mps file to CAS - if os.stat(self._problem_files[0]).st_size >= 2 * 1024**3: - # For large files, use convertMPS, first create file for upload + if stat(self._problem_files[0]).st_size >= 2 * 1024**3: + # For files larger than 2 GB (this is a limitation of the loadMps action used in the else part). + # Use convertMPS, first create file for upload. mpsWithIdFileName = TempfileManager.create_tempfile( ".mps.csv", text=True ) - with open(mpsWithIdFileName, 'w') as mpsWithId: - mpsWithId.write('_ID_\tText\n') - with open(self._problem_files[0], 'r') as f: + with open(mpsWithIdFileName, "w") as mpsWithId: + mpsWithId.write("_ID_\tText\n") + with open(self._problem_files[0], "r") as f: id = 0 for line in f: id += 1 - mpsWithId.write(str(id) + '\t' + line.rstrip() + '\n') + mpsWithId.write(str(id) + "\t" + line.rstrip() + "\n") # Upload .mps.csv file + mpscsv_table_name = "csv" + unique s.upload_file( mpsWithIdFileName, - casout={"name": "mpscsv", "replace": True}, + casout={"name": mpscsv_table_name, "replace": True}, importoptions={"filetype": "CSV", "delimiter": "\t"}, ) # Convert .mps.csv file to .mps s.optimization.convertMps( - data="mpscsv", - casOut={"name": "mpsdata", "replace": True}, + data=mpscsv_table_name, + casOut={"name": mpsdata_table_name, "replace": True}, format="FREE", ) + + # Delete the table we don't need anymore + if mpscsv_table_name: + s.dropTable(name=mpscsv_table_name, quiet=True) else: - # For small files, use loadMPS - with open(self._problem_files[0], 'r') as mps_file: + # For small files (less than 2 GB), use loadMps + with open(self._problem_files[0], "r") as mps_file: s.optimization.loadMps( mpsFileString=mps_file.read(), - casout={"name": "mpsdata", "replace": True}, + casout={"name": mpsdata_table_name, "replace": True}, format="FREE", ) + primalin_table_name = None if self.warmstart_flag: + primalin_table_name = "pin" + unique # Upload warmstart file to CAS s.upload_file( self._warm_start_file_name, - casout={"name": "primalin", "replace": True}, + casout={"name": primalin_table_name, "replace": True}, importoptions={"filetype": "CSV"}, ) - self.options["primalin"] = "primalin" + self.options["primalin"] = primalin_table_name + + # Define output table names + primalout_table_name = "pout" + unique + dualout_table_name = None # Solve the problem in CAS if action == "solveMilp": r = s.optimization.solveMilp( - data={"name": "mpsdata"}, - primalOut={"name": "primalout", "replace": True}, + data={"name": mpsdata_table_name}, + primalOut={"name": primalout_table_name, "replace": True}, **self.options ) else: + dualout_table_name = "dout" + unique r = s.optimization.solveLp( - data={"name": "mpsdata"}, - primalOut={"name": "primalout", "replace": True}, - dualOut={"name": "dualout", "replace": True}, + data={"name": mpsdata_table_name}, + primalOut={"name": primalout_table_name, "replace": True}, + dualOut={"name": dualout_table_name, "replace": True}, **self.options ) @@ -644,44 +731,44 @@ def _apply_solver(self): sol.termination_condition = TerminationCondition.optimal # Store objective value in solution - sol.objective['__default_objective__'] = { - 'Value': r["objective"] + sol.objective["__default_objective__"] = { + "Value": r["objective"] } if action == "solveMilp": - primal_out = s.CASTable(name="primalout") + primal_out = s.CASTable(name=primalout_table_name) # Use pandas functions for efficiency - primal_out = primal_out[['_VAR_', '_VALUE_']] + primal_out = primal_out[["_VAR_", "_VALUE_"]] sol.variable = {} for row in primal_out.itertuples(index=False): - sol.variable[row[0]] = {'Value': row[1]} + sol.variable[row[0]] = {"Value": row[1]} else: # Convert primal out data set to variable dictionary # Use panda functions for efficiency - primal_out = s.CASTable(name="primalout") + primal_out = s.CASTable(name=primalout_table_name) primal_out = primal_out[ - ['_VAR_', '_VALUE_', '_STATUS_', '_R_COST_'] + ["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"] ] sol.variable = {} for row in primal_out.itertuples(index=False): sol.variable[row[0]] = { - 'Value': row[1], - 'Status': row[2], - 'rc': row[3], + "Value": row[1], + "Status": row[2], + "rc": row[3], } # Convert dual out data set to constraint dictionary # Use pandas functions for efficiency - dual_out = s.CASTable(name="dualout") + dual_out = s.CASTable(name=dualout_table_name) dual_out = dual_out[ - ['_ROW_', '_VALUE_', '_STATUS_', '_ACTIVITY_'] + ["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"] ] sol.constraint = {} for row in dual_out.itertuples(index=False): sol.constraint[row[0]] = { - 'dual': row[1], - 'Status': row[2], - 'slack': row[3], + "dual": row[1], + "Status": row[2], + "slack": row[3], } else: results = self.results = SolverResults() @@ -692,10 +779,16 @@ def _apply_solver(self): ) finally: + if mpsdata_table_name: + s.dropTable(name=mpsdata_table_name, quiet=True) + if primalin_table_name: + s.dropTable(name=primalin_table_name, quiet=True) + if primalout_table_name: + s.dropTable(name=primalout_table_name, quiet=True) + if dualout_table_name: + s.dropTable(name=dualout_table_name, quiet=True) s.close() self._log = self._log_writer.log() - if self._tee: - print(self._log) self._rc = 0 return Bunch(rc=self._rc, log=self._log) diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 654820f5060..3a63e258600 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -1,5 +1,6 @@ import os import pyomo.common.unittest as unittest +from unittest import mock from pyomo.environ import ( ConcreteModel, Var, @@ -15,20 +16,20 @@ ) from pyomo.opt.results import SolverStatus, TerminationCondition, ProblemSense from pyomo.opt import SolverFactory, check_available_solvers - +import warnings CAS_OPTIONS = { - "hostname": os.environ.get('CAS_SERVER', None), - "port": os.environ.get('CAS_PORT', None), - "authinfo": os.environ.get('CAS_AUTHINFO', None), + "hostname": os.environ.get("CASHOST", None), + "port": os.environ.get("CASPORT", None), + "authinfo": os.environ.get("CASAUTHINFO", None), } -sas_available = check_available_solvers('sas') +sas_available = check_available_solvers("sas") class SASTestAbc: - solver_io = '_sas94' + solver_io = "_sas94" base_options = {} def setObj(self): @@ -41,6 +42,8 @@ def setX(self): self.instance.X = Var([1, 2, 3], within=NonNegativeReals) def setUp(self): + # Disable resource warnings + warnings.filterwarnings("ignore", category=ResourceWarning) instance = self.instance = ConcreteModel() self.setX() X = instance.X @@ -55,7 +58,7 @@ def setUp(self): instance.rc = Suffix(direction=Suffix.IMPORT) instance.dual = Suffix(direction=Suffix.IMPORT) - self.opt_sas = SolverFactory('sas', solver_io=self.solver_io) + self.opt_sas = SolverFactory("sas", solver_io=self.solver_io) def tearDown(self): del self.opt_sas @@ -74,7 +77,7 @@ def run_solver(self, **kwargs): self.results = opt_sas.solve(instance, **kwargs) -class SASTestLP(SASTestAbc, unittest.TestCase): +class SASTestLP(SASTestAbc): def checkSolution(self): instance = self.instance results = self.results @@ -111,41 +114,39 @@ def checkSolution(self): self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) # Check basis status - self.assertEqual(instance.status[instance.X[1]], 'L') - self.assertEqual(instance.status[instance.X[2]], 'B') - self.assertEqual(instance.status[instance.X[3]], 'L') - self.assertEqual(instance.status[instance.R1], 'U') - self.assertEqual(instance.status[instance.R2], 'B') - self.assertEqual(instance.status[instance.R3], 'B') - - @unittest.skipIf(not sas_available, "The SAS solver is not available") + self.assertEqual(instance.status[instance.X[1]], "L") + self.assertEqual(instance.status[instance.X[2]], "B") + self.assertEqual(instance.status[instance.X[3]], "L") + self.assertEqual(instance.status[instance.R1], "U") + self.assertEqual(instance.status[instance.R2], "B") + self.assertEqual(instance.status[instance.R3], "B") + def test_solver_default(self): self.run_solver() self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") + def test_solver_tee(self): + self.run_solver(tee=True) + self.checkSolution() + def test_solver_primal(self): self.run_solver(options={"algorithm": "ps"}) self.assertIn("NOTE: The Primal Simplex algorithm is used.", self.opt_sas._log) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_ipm(self): self.run_solver(options={"algorithm": "ip"}) self.assertIn("NOTE: The Interior Point algorithm is used.", self.opt_sas._log) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_intoption(self): self.run_solver(options={"maxiter": 20}) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_invalidoption(self): with self.assertRaisesRegex(ValueError, "syntax error"): self.run_solver(options={"foo": "bar"}) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_max(self): X = self.instance.X self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) @@ -154,7 +155,6 @@ def test_solver_max(self): self.checkSolution() self.assertEqual(self.results.problem.sense, ProblemSense.maximize) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_infeasible(self): instance = self.instance X = instance.X @@ -167,21 +167,23 @@ def test_solver_infeasible(self): ) self.assertEqual(results.solver.message, "The problem is infeasible.") - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_infeasible_or_unbounded(self): self.instance.X.domain = Reals self.run_solver() results = self.results self.assertEqual(results.solver.status, SolverStatus.warning) - self.assertEqual( + self.assertIn( results.solver.termination_condition, - TerminationCondition.infeasibleOrUnbounded, + [ + TerminationCondition.infeasibleOrUnbounded, + TerminationCondition.unbounded, + ], ) - self.assertEqual( - results.solver.message, "The problem is infeasible or unbounded." + self.assertIn( + results.solver.message, + ["The problem is infeasible or unbounded.", "The problem is unbounded."], ) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_unbounded(self): self.instance.X.domain = Reals self.run_solver(options={"presolver": "none", "algorithm": "primal"}) @@ -229,7 +231,6 @@ def checkSolutionDecomp(self): # Don't check basis status for decomp - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_decomp(self): self.run_solver( options={ @@ -243,7 +244,6 @@ def test_solver_decomp(self): ) self.checkSolutionDecomp() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_iis(self): self.run_solver(options={"iis": "true"}) results = self.results @@ -257,7 +257,6 @@ def test_solver_iis(self): "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", ) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_maxiter(self): self.run_solver(options={"maxiter": 1}) results = self.results @@ -270,13 +269,59 @@ def test_solver_maxiter(self): "The maximum allowable number of iterations was reached.", ) + def test_solver_with_milp(self): + self.run_solver(options={"with": "milp"}) + self.assertIn( + "WARNING: The problem has no integer variables.", self.opt_sas._log + ) + -class SASTestLPCAS(SASTestLP): - solver_io = '_sascas' +@unittest.skipIf(not sas_available, "The SAS solver is not available") +class SASTestLP94(SASTestLP, unittest.TestCase): + @mock.patch( + "pyomo.solvers.plugins.solvers.SAS.SAS94.sas_version", + return_value="2sd45s39M4234232", + ) + def test_solver_versionM4(self, sas): + with self.assertRaises(NotImplementedError): + self.run_solver() + + @mock.patch( + "pyomo.solvers.plugins.solvers.SAS.SAS94.sas_version", + return_value="234897293M5324u98", + ) + def test_solver_versionM5(self, sas): + self.run_solver() + self.checkSolution() + + @mock.patch("saspy.SASsession.submit", return_value={"LOG": "", "LST": ""}) + @mock.patch("saspy.SASsession.symget", return_value="STATUS=OUT_OF_MEMORY") + def test_solver_out_of_memory(self, submit_mock, symget_mocks): + self.run_solver(load_solutions=False) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.aborted) + + @mock.patch("saspy.SASsession.submit", return_value={"LOG": "", "LST": ""}) + @mock.patch("saspy.SASsession.symget", return_value="STATUS=ERROR") + def test_solver_error(self, submit_mock, symget_mock): + self.run_solver(load_solutions=False) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.error) + + +@unittest.skipIf(not sas_available, "The SAS solver is not available") +class SASTestLPCAS(SASTestLP, unittest.TestCase): + solver_io = "_sascas" base_options = CAS_OPTIONS + @mock.patch("pyomo.solvers.plugins.solvers.SAS.stat") + def test_solver_large_file(self, os_stat): + os_stat.return_value.st_size = 3 * 1024**3 + self.run_solver() + self.checkSolution() + -class SASTestMILP(SASTestAbc, unittest.TestCase): +class SASTestMILP(SASTestAbc): def setX(self): self.instance.X = Var([1, 2, 3], within=NonNegativeIntegers) @@ -301,12 +346,14 @@ def checkSolution(self): self.assertAlmostEqual(instance.X[2].value, 1.0) self.assertAlmostEqual(instance.X[3].value, 1.0) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_default(self): - self.run_solver(options={}) + self.run_solver() + self.checkSolution() + + def test_solver_tee(self): + self.run_solver(tee=True) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_presolve(self): self.run_solver(options={"presolver": "none"}) self.assertIn( @@ -314,17 +361,14 @@ def test_solver_presolve(self): ) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_intoption(self): self.run_solver(options={"maxnodes": 20}) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_invalidoption(self): with self.assertRaisesRegex(ValueError, "syntax error"): self.run_solver(options={"foo": "bar"}) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_max(self): X = self.instance.X self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) @@ -332,7 +376,6 @@ def test_solver_max(self): self.run_solver() self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_infeasible(self): instance = self.instance X = instance.X @@ -345,22 +388,23 @@ def test_solver_infeasible(self): ) self.assertEqual(results.solver.message, "The problem is infeasible.") - @unittest.skipIf(not sas_available, "The SAS solver is not available") - @unittest.skip("Returns wrong status for some versions.") def test_solver_infeasible_or_unbounded(self): self.instance.X.domain = Integers self.run_solver() results = self.results self.assertEqual(results.solver.status, SolverStatus.warning) - self.assertEqual( + self.assertIn( results.solver.termination_condition, - TerminationCondition.infeasibleOrUnbounded, + [ + TerminationCondition.infeasibleOrUnbounded, + TerminationCondition.unbounded, + ], ) - self.assertEqual( - results.solver.message, "The problem is infeasible or unbounded." + self.assertIn( + results.solver.message, + ["The problem is infeasible or unbounded.", "The problem is unbounded."], ) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_unbounded(self): self.instance.X.domain = Integers self.run_solver( @@ -373,7 +417,6 @@ def test_solver_unbounded(self): ) self.assertEqual(results.solver.message, "The problem is unbounded.") - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_decomp(self): self.run_solver( options={ @@ -388,12 +431,10 @@ def test_solver_decomp(self): ) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_rootnode(self): self.run_solver(options={"rootnode": {"presolver": "automatic"}}) self.checkSolution() - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_maxnodes(self): self.run_solver(options={"maxnodes": 0}) results = self.results @@ -406,7 +447,6 @@ def test_solver_maxnodes(self): "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", ) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_maxsols(self): self.run_solver(options={"maxsols": 1}) results = self.results @@ -419,7 +459,6 @@ def test_solver_maxsols(self): "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", ) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_target(self): self.run_solver(options={"target": -6.0}) results = self.results @@ -432,7 +471,6 @@ def test_solver_target(self): "The solution is not worse than the target specified by the TARGET= option.", ) - @unittest.skipIf(not sas_available, "The SAS solver is not available") def test_solver_primalin(self): X = self.instance.X X[1] = None @@ -445,11 +483,42 @@ def test_solver_primalin(self): self.opt_sas._log, ) + def test_solver_primalin_nosol(self): + X = self.instance.X + X[1] = None + X[2] = None + X[3] = None + self.run_solver(warmstart=True) + self.checkSolution() + + @mock.patch("pyomo.solvers.plugins.solvers.SAS.stat") + def test_solver_large_file(self, os_stat): + os_stat.return_value.st_size = 3 * 1024**3 + self.run_solver() + self.checkSolution() + + def test_solver_with_lp(self): + self.run_solver(options={"with": "lp"}) + self.assertIn( + "contains integer variables; the linear relaxation will be solved.", + self.opt_sas._log, + ) + + def test_solver_warmstart_capable(self): + self.run_solver() + self.assertTrue(self.opt_sas.warm_start_capable()) + + +@unittest.skipIf(not sas_available, "The SAS solver is not available") +class SASTestMILP94(SASTestMILP, unittest.TestCase): + pass + -class SASTestMILPCAS(SASTestMILP): - solver_io = '_sascas' +@unittest.skipIf(not sas_available, "The SAS solver is not available") +class SASTestMILPCAS(SASTestMILP, unittest.TestCase): + solver_io = "_sascas" base_options = CAS_OPTIONS -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() From 4b6298da177aa6c16a1bf581dc6e0508e9ee6084 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Mon, 7 Aug 2023 11:26:10 -0400 Subject: [PATCH 006/113] Change SAS solver interfaces to keep the SAS connection --- pyomo/solvers/plugins/solvers/SAS.py | 266 +++++++++++++------------ pyomo/solvers/tests/checks/test_SAS.py | 11 +- 2 files changed, 149 insertions(+), 128 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index 87a6a18c1f4..a5f74849813 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -285,6 +285,14 @@ def __init__(self, **kwds): self._python_api_exists = True self._sas.logger.setLevel(logger.level) + # Create the session only as its needed + self._sas_session = None + + def __del__(self): + # Close the session, if we created one + if self._sas_session: + self._sas_session.endsas() + def _create_statement_str(self, statement): """Helper function to create the strings for the statements of the proc OPTLP/OPTMILP code.""" stmt = self.options.pop(statement, None) @@ -364,134 +372,133 @@ def _apply_solver(self): # Set some SAS options to make the log more clean sas_options = "option notes nonumber nodate nosource pagesize=max;" - # Start a SAS session, submit the code and return the results - with self._sas.SASsession() as sas: - # Find the version of 9.4 we are using - self._sasver = sas.sasver - - # Upload files, only if not accessible locally - upload_mps = False - if not sas.file_info(self._problem_files[0], quiet=True): - sas.upload( - self._problem_files[0], self._problem_files[0], overwrite=True - ) - upload_mps = True - - upload_pin = False - if self.warmstart_flag and not sas.file_info( - self._warm_start_file_name, quiet=True - ): - sas.upload( - self._warm_start_file_name, - self._warm_start_file_name, - overwrite=True, - ) - upload_pin = True + # Get the current SAS session, submit the code and return the results + sas = self._sas_session + if sas == None: + sas = self._sas_session = self._sas.SASsession() + + # Find the version of 9.4 we are using + self._sasver = sas.sasver + + # Upload files, only if not accessible locally + upload_mps = False + if not sas.file_info(self._problem_files[0], quiet=True): + sas.upload(self._problem_files[0], self._problem_files[0], overwrite=True) + upload_mps = True + + upload_pin = False + if self.warmstart_flag and not sas.file_info( + self._warm_start_file_name, quiet=True + ): + sas.upload( + self._warm_start_file_name, self._warm_start_file_name, overwrite=True + ) + upload_pin = True - # Using a function call to make it easier to moch the version check - version = self.sas_version().split("M", 1)[1][0] - if int(version) < 5: - raise NotImplementedError( - "Support for SAS 9.4 M4 and earlier is no implemented." - ) - elif int(version) == 5: - # In 9.4M5 we have to create an MPS data set from an MPS file first - # Earlier versions will not work because the MPS format in incompatible - mps_dataset_name = "mps" + unique - res = sas.submit( - """ - {sas_options} - {warmstart} - %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA={mps_dataset_name}, MAXLEN=256, FORMAT=FREE); - proc {proc} data={mps_dataset_name} {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; - {decomp} - {decompmaster} - {decompmasterip} - {decompsubprob} - {rootnode} - run; - """.format( - sas_options=sas_options, - warmstart=warmstart_str, - proc=proc, - mpsfile=self._problem_files[0], - mps_dataset_name=mps_dataset_name, - options=opt_str, - primalout_dataset_name=primalout_dataset_name, - dualout_dataset_name=dualout_dataset_name, - decomp=decomp_str, - decompmaster=decompmaster_str, - decompmasterip=decompmasterip_str, - decompsubprob=decompsubprob_str, - rootnode=rootnode_str, - ), - results="TEXT", - ) - sas.sasdata(mps_dataset_name).delete(quiet=True) - else: - # Since 9.4M6+ optlp/optmilp can read mps files directly - res = sas.submit( - """ - {sas_options} - {warmstart} - proc {proc} mpsfile=\"{mpsfile}\" {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; - {decomp} - {decompmaster} - {decompmasterip} - {decompsubprob} - {rootnode} - run; - """.format( - sas_options=sas_options, - warmstart=warmstart_str, - proc=proc, - mpsfile=self._problem_files[0], - options=opt_str, - primalout_dataset_name=primalout_dataset_name, - dualout_dataset_name=dualout_dataset_name, - decomp=decomp_str, - decompmaster=decompmaster_str, - decompmasterip=decompmasterip_str, - decompsubprob=decompsubprob_str, - rootnode=rootnode_str, - ), - results="TEXT", - ) + # Using a function call to make it easier to moch the version check + version = self.sas_version().split("M", 1)[1][0] + if int(version) < 5: + raise NotImplementedError( + "Support for SAS 9.4 M4 and earlier is no implemented." + ) + elif int(version) == 5: + # In 9.4M5 we have to create an MPS data set from an MPS file first + # Earlier versions will not work because the MPS format in incompatible + mps_dataset_name = "mps" + unique + res = sas.submit( + """ + {sas_options} + {warmstart} + %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA={mps_dataset_name}, MAXLEN=256, FORMAT=FREE); + proc {proc} data={mps_dataset_name} {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + sas_options=sas_options, + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + mps_dataset_name=mps_dataset_name, + options=opt_str, + primalout_dataset_name=primalout_dataset_name, + dualout_dataset_name=dualout_dataset_name, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + sas.sasdata(mps_dataset_name).delete(quiet=True) + else: + # Since 9.4M6+ optlp/optmilp can read mps files directly + res = sas.submit( + """ + {sas_options} + {warmstart} + proc {proc} mpsfile=\"{mpsfile}\" {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + sas_options=sas_options, + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + options=opt_str, + primalout_dataset_name=primalout_dataset_name, + dualout_dataset_name=dualout_dataset_name, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) - # Delete uploaded file - if upload_mps: - sas.file_delete(self._problem_files[0], quiet=True) - if self.warmstart_flag and upload_pin: - sas.file_delete(self._warm_start_file_name, quiet=True) - - # Store log and ODS output - self._log = res["LOG"] - self._lst = res["LST"] - if "ERROR 22-322: Syntax error" in self._log: - raise ValueError( - "An option passed to the SAS solver caused a syntax error: {log}".format( - log=self._log - ) - ) - else: - # Print log if requested by the user, only if we did not already print it - if self._tee: - print(self._log) - self._macro = dict( - (key.strip(), value.strip()) - for key, value in ( - pair.split("=") for pair in sas.symget("_OR" + proc + "_").split() + # Delete uploaded file + if upload_mps: + sas.file_delete(self._problem_files[0], quiet=True) + if self.warmstart_flag and upload_pin: + sas.file_delete(self._warm_start_file_name, quiet=True) + + # Store log and ODS output + self._log = res["LOG"] + self._lst = res["LST"] + if "ERROR 22-322: Syntax error" in self._log: + raise ValueError( + "An option passed to the SAS solver caused a syntax error: {log}".format( + log=self._log ) ) - if self._macro.get("STATUS", "ERROR") == "OK": - primal_out = sas.sd2df(primalout_dataset_name) - dual_out = sas.sd2df(dualout_dataset_name) + else: + # Print log if requested by the user, only if we did not already print it + if self._tee: + print(self._log) + self._macro = dict( + (key.strip(), value.strip()) + for key, value in ( + pair.split("=") for pair in sas.symget("_OR" + proc + "_").split() + ) + ) + if self._macro.get("STATUS", "ERROR") == "OK": + primal_out = sas.sd2df(primalout_dataset_name) + dual_out = sas.sd2df(dualout_dataset_name) - # Delete data sets, they will go away automatically, but does not hurt to delete them - if primalin_dataset_name: - sas.sasdata(primalin_dataset_name).delete(quiet=True) - sas.sasdata(primalout_dataset_name).delete(quiet=True) - sas.sasdata(dualout_dataset_name).delete(quiet=True) + # Delete data sets, they will go away automatically, but does not hurt to delete them + if primalin_dataset_name: + sas.sasdata(primalin_dataset_name).delete(quiet=True) + sas.sasdata(primalout_dataset_name).delete(quiet=True) + sas.sasdata(dualout_dataset_name).delete(quiet=True) # Prepare the solver results results = self.results = self._create_results_from_status( @@ -597,6 +604,14 @@ def __init__(self, **kwds): else: self._python_api_exists = True + # Create the session only as its needed + self._sas_session = None + + def __del__(self): + # Close the session, if we created one + if self._sas_session: + self._sas_session.close() + def _apply_solver(self): """ "Prepare the options and run the solver. Then store the data to be returned.""" logger.debug("Running SAS Viya") @@ -626,7 +641,9 @@ def _apply_solver(self): # Connect to CAS server with redirect_stdout(SASLogWriter(self._tee)) as self._log_writer: - s = self._sas.CAS(**cas_opts) + s = self._sas_session + if s == None: + s = self._sas_session = self._sas.CAS(**cas_opts) try: # Load the optimization action set s.loadactionset("optimization") @@ -787,7 +804,6 @@ def _apply_solver(self): s.dropTable(name=primalout_table_name, quiet=True) if dualout_table_name: s.dropTable(name=dualout_table_name, quiet=True) - s.close() self._log = self._log_writer.log() self._rc = 0 diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 3a63e258600..1a6bbd80f1d 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -32,6 +32,14 @@ class SASTestAbc: solver_io = "_sas94" base_options = {} + @classmethod + def setUpClass(cls): + cls.opt_sas = SolverFactory("sas", solver_io=cls.solver_io) + + @classmethod + def tearDownClass(cls): + del cls.opt_sas + def setObj(self): X = self.instance.X self.instance.Obj = Objective( @@ -58,10 +66,7 @@ def setUp(self): instance.rc = Suffix(direction=Suffix.IMPORT) instance.dual = Suffix(direction=Suffix.IMPORT) - self.opt_sas = SolverFactory("sas", solver_io=self.solver_io) - def tearDown(self): - del self.opt_sas del self.instance def run_solver(self, **kwargs): From 619edf9805639ef2c95088c90f09d3eb5e8972c4 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Mon, 7 Aug 2023 14:31:58 -0400 Subject: [PATCH 007/113] Fix formatting issue in comment --- pyomo/solvers/plugins/solvers/SAS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index a5f74849813..f5840b5d6f3 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -152,7 +152,7 @@ def __init__(self, **kwds): super(SASAbc, self).set_problem_format(ProblemFormat.mps) def _presolve(self, *args, **kwds): - """ "Set things up for the actual solve.""" + """Set things up for the actual solve.""" # create a context in the temporary file manager for # this plugin - is "pop"ed in the _postsolve method. TempfileManager.push() From faddef1a12afdc8953a14ce78146a3d8ab7cef89 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Fri, 11 Aug 2023 03:23:50 -0400 Subject: [PATCH 008/113] Reset .gitignore to original state. --- .gitignore | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 7309ff1e8a8..09069552990 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,6 @@ .spyder* .ropeproject .vscode -.env -venv -sascfg_personal.py # Python generates numerous files when byte compiling / installing packages *.pyx *.pyc @@ -27,4 +24,4 @@ gurobi.log # Jupyterhub/Jupyterlab checkpoints .ipynb_checkpoints -cplex.log +cplex.log \ No newline at end of file From db18d03e5f4403d014f6fdaaa5a4ca494fee28c5 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Tue, 12 Mar 2024 05:43:11 -0400 Subject: [PATCH 009/113] Add session options, fix non-optimal return codes and version checking --- pyomo/solvers/plugins/solvers/SAS.py | 180 +++++++++++-------------- pyomo/solvers/tests/checks/test_SAS.py | 17 +-- 2 files changed, 87 insertions(+), 110 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index f5840b5d6f3..87ee31a08af 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -96,23 +96,6 @@ } -CAS_OPTION_NAMES = [ - "hostname", - "port", - "username", - "password", - "session", - "locale", - "name", - "nworkers", - "authinfo", - "protocol", - "path", - "ssl_ca_list", - "authcode", -] - - @SolverFactory.register("sas", doc="The SAS LP/MIP solver") class SAS(OptSolver): """The SAS optimization solver""" @@ -120,7 +103,7 @@ class SAS(OptSolver): def __new__(cls, *args, **kwds): mode = kwds.pop("solver_io", None) if mode != None: - return SolverFactory(mode) + return SolverFactory(mode, **kwds) else: # Choose solver factory automatically # based on what can be loaded. @@ -216,6 +199,7 @@ def _create_results_from_status(self, status, solution_status): results = SolverResults() results.solver.name = "SAS" results.solver.status = STATUS_TO_SOLVERSTATUS[status] + results.solver.hasSolution = False if results.solver.status == SolverStatus.ok: results.solver.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ solution_status @@ -226,11 +210,14 @@ def _create_results_from_status(self, status, solution_status): results.solver.status = TerminationCondition.to_solver_status( results.solver.termination_condition ) + if "OPTIMAL" in solution_status or "_SOL" in solution_status: + results.solver.hasSolution = True elif results.solver.status == SolverStatus.aborted: results.solver.termination_condition = TerminationCondition.userInterrupt - results.solver.message = ( - results.solver.termination_message - ) = SOLSTATUS_TO_MESSAGE["ABORTED"] + if solution_status != "ERROR": + results.solver.message = ( + results.solver.termination_message + ) = SOLSTATUS_TO_MESSAGE[solution_status] else: results.solver.termination_condition = TerminationCondition.error results.solver.message = ( @@ -288,6 +275,9 @@ def __init__(self, **kwds): # Create the session only as its needed self._sas_session = None + # Store other options for the SAS session + self._session_options = kwds + def __del__(self): # Close the session, if we created one if self._sas_session: @@ -326,10 +316,6 @@ def _apply_solver(self): # Check if there are integer variables, this might be slow proc = "OPTMILP" if self._has_integer_variables() else "OPTLP" - # Remove CAS options in case they were specified - for opt in CAS_OPTION_NAMES: - self.options.pop(opt, None) - # Get the rootnode options decomp_str = self._create_statement_str("decomp") decompmaster_str = self._create_statement_str("decompmaster") @@ -373,9 +359,7 @@ def _apply_solver(self): sas_options = "option notes nonumber nodate nosource pagesize=max;" # Get the current SAS session, submit the code and return the results - sas = self._sas_session - if sas == None: - sas = self._sas_session = self._sas.SASsession() + sas = self._sas_session = self._sas.SASsession(**self._session_options) # Find the version of 9.4 we are using self._sasver = sas.sasver @@ -396,12 +380,13 @@ def _apply_solver(self): upload_pin = True # Using a function call to make it easier to moch the version check - version = self.sas_version().split("M", 1)[1][0] - if int(version) < 5: + major_version = self.sas_version()[0] + minor_version = self.sas_version().split("M", 1)[1][0] + if major_version == "9" and int(minor_version) < 5: raise NotImplementedError( "Support for SAS 9.4 M4 and earlier is no implemented." ) - elif int(version) == 5: + elif major_version == "9" and int(minor_version) == 5: # In 9.4M5 we have to create an MPS data set from an MPS file first # Earlier versions will not work because the MPS format in incompatible mps_dataset_name = "mps" + unique @@ -436,7 +421,7 @@ def _apply_solver(self): ) sas.sasdata(mps_dataset_name).delete(quiet=True) else: - # Since 9.4M6+ optlp/optmilp can read mps files directly + # Since 9.4M6+ optlp/optmilp can read mps files directly (this includes Viya-based local installs) res = sas.submit( """ {sas_options} @@ -512,12 +497,12 @@ def _apply_solver(self): results.problem.sense = ProblemSense.minimize # Prepare the solution information - if results.solver.termination_condition == TerminationCondition.optimal: + if results.solver.hasSolution: sol = results.solution.add() # Store status in solution sol.status = SolutionStatus.feasible - sol.termination_condition = TerminationCondition.optimal + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[self._macro.get("SOLUTION_STATUS", "ERROR")] # Store objective value in solution sol.objective["__default_objective__"] = {"Value": self._macro["OBJECTIVE"]} @@ -606,6 +591,7 @@ def __init__(self, **kwds): # Create the session only as its needed self._sas_session = None + self._session_options = kwds def __del__(self): # Close the session, if we created one @@ -619,13 +605,6 @@ def _apply_solver(self): # Set return code to issue an error if we get interrupted self._rc = -1 - # Extract CAS connection options - cas_opts = {} - for opt in CAS_OPTION_NAMES: - val = self.options.pop(opt, None) - if val != None: - cas_opts[opt] = val - # Figure out if the problem has integer variables with_opt = self.options.pop("with", None) if with_opt == "lp": @@ -643,7 +622,7 @@ def _apply_solver(self): with redirect_stdout(SASLogWriter(self._tee)) as self._log_writer: s = self._sas_session if s == None: - s = self._sas_session = self._sas.CAS(**cas_opts) + s = self._sas_session = self._sas.CAS(**self._session_options) try: # Load the optimization action set s.loadactionset("optimization") @@ -651,8 +630,9 @@ def _apply_solver(self): # Declare a unique table name for the mps table mpsdata_table_name = "mps" + unique - # Upload mps file to CAS - if stat(self._problem_files[0]).st_size >= 2 * 1024**3: + # Upload mps file to CAS, if the file is larger than 2 GB, we need to use convertMps instead of loadMps + # Note that technically it is 2 Gibibytes file size that trigger the issue, but 2 GB is the safer threshold + if stat(self._problem_files[0]).st_size > 2E9: # For files larger than 2 GB (this is a limitation of the loadMps action used in the else part). # Use convertMPS, first create file for upload. mpsWithIdFileName = TempfileManager.create_tempfile( @@ -731,62 +711,64 @@ def _apply_solver(self): r.get("status", "ERROR"), r.get("solutionStatus", "ERROR") ) - if r.ProblemSummary["cValue1"][1] == "Maximization": - results.problem.sense = ProblemSense.maximize - else: - results.problem.sense = ProblemSense.minimize - - # Prepare the solution information - if ( - results.solver.termination_condition - == TerminationCondition.optimal - ): - sol = results.solution.add() - - # Store status in solution - sol.status = SolutionStatus.feasible - sol.termination_condition = TerminationCondition.optimal - - # Store objective value in solution - sol.objective["__default_objective__"] = { - "Value": r["objective"] - } - - if action == "solveMilp": - primal_out = s.CASTable(name=primalout_table_name) - # Use pandas functions for efficiency - primal_out = primal_out[["_VAR_", "_VALUE_"]] - sol.variable = {} - for row in primal_out.itertuples(index=False): - sol.variable[row[0]] = {"Value": row[1]} + if results.solver.status != SolverStatus.error: + if r.ProblemSummary["cValue1"][1] == "Maximization": + results.problem.sense = ProblemSense.maximize else: - # Convert primal out data set to variable dictionary - # Use panda functions for efficiency - primal_out = s.CASTable(name=primalout_table_name) - primal_out = primal_out[ - ["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"] - ] - sol.variable = {} - for row in primal_out.itertuples(index=False): - sol.variable[row[0]] = { - "Value": row[1], - "Status": row[2], - "rc": row[3], - } - - # Convert dual out data set to constraint dictionary - # Use pandas functions for efficiency - dual_out = s.CASTable(name=dualout_table_name) - dual_out = dual_out[ - ["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"] - ] - sol.constraint = {} - for row in dual_out.itertuples(index=False): - sol.constraint[row[0]] = { - "dual": row[1], - "Status": row[2], - "slack": row[3], - } + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if results.solver.hasSolution: + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[r.get("solutionStatus", "ERROR")] + + # Store objective value in solution + sol.objective["__default_objective__"] = { + "Value": r["objective"] + } + + if action == "solveMilp": + primal_out = s.CASTable(name=primalout_table_name) + # Use pandas functions for efficiency + primal_out = primal_out[["_VAR_", "_VALUE_"]] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {"Value": row[1]} + else: + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = s.CASTable(name=primalout_table_name) + primal_out = primal_out[ + ["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"] + ] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = { + "Value": row[1], + "Status": row[2], + "rc": row[3], + } + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = s.CASTable(name=dualout_table_name) + dual_out = dual_out[ + ["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"] + ] + sol.constraint = {} + for row in dual_out.itertuples(index=False): + sol.constraint[row[0]] = { + "dual": row[1], + "Status": row[2], + "slack": row[3], + } + else: + raise ValueError( + "The SAS solver returned an error status." + ) else: results = self.results = SolverResults() results.solver.name = "SAS" diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 1a6bbd80f1d..7b0e2cccd9a 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -30,11 +30,11 @@ class SASTestAbc: solver_io = "_sas94" - base_options = {} + session_options = {} @classmethod def setUpClass(cls): - cls.opt_sas = SolverFactory("sas", solver_io=cls.solver_io) + cls.opt_sas = SolverFactory("sas", solver_io=cls.solver_io, **cls.session_options) @classmethod def tearDownClass(cls): @@ -73,11 +73,6 @@ def run_solver(self, **kwargs): opt_sas = self.opt_sas instance = self.instance - # Add base options for connection data etc. - options = kwargs.get("options", {}) - if self.base_options: - kwargs["options"] = {**options, **self.base_options} - # Call the solver self.results = opt_sas.solve(instance, **kwargs) @@ -285,7 +280,7 @@ def test_solver_with_milp(self): class SASTestLP94(SASTestLP, unittest.TestCase): @mock.patch( "pyomo.solvers.plugins.solvers.SAS.SAS94.sas_version", - return_value="2sd45s39M4234232", + return_value="9.sd45s39M4234232", ) def test_solver_versionM4(self, sas): with self.assertRaises(NotImplementedError): @@ -293,7 +288,7 @@ def test_solver_versionM4(self, sas): @mock.patch( "pyomo.solvers.plugins.solvers.SAS.SAS94.sas_version", - return_value="234897293M5324u98", + return_value="9.34897293M5324u98", ) def test_solver_versionM5(self, sas): self.run_solver() @@ -317,7 +312,7 @@ def test_solver_error(self, submit_mock, symget_mock): @unittest.skipIf(not sas_available, "The SAS solver is not available") class SASTestLPCAS(SASTestLP, unittest.TestCase): solver_io = "_sascas" - base_options = CAS_OPTIONS + session_options = CAS_OPTIONS @mock.patch("pyomo.solvers.plugins.solvers.SAS.stat") def test_solver_large_file(self, os_stat): @@ -522,7 +517,7 @@ class SASTestMILP94(SASTestMILP, unittest.TestCase): @unittest.skipIf(not sas_available, "The SAS solver is not available") class SASTestMILPCAS(SASTestMILP, unittest.TestCase): solver_io = "_sascas" - base_options = CAS_OPTIONS + session_options = CAS_OPTIONS if __name__ == "__main__": From e17a2e5b77d33c6d63e2836667de97dd854d3c41 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Tue, 12 Mar 2024 07:26:58 -0400 Subject: [PATCH 010/113] Black formatting --- pyomo/solvers/plugins/solvers/SAS.py | 32 ++++++++++++++------------ pyomo/solvers/tests/checks/test_SAS.py | 4 +++- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index 87ee31a08af..bd06f6a1ef7 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -204,9 +204,9 @@ def _create_results_from_status(self, status, solution_status): results.solver.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ solution_status ] - results.solver.message = ( - results.solver.termination_message - ) = SOLSTATUS_TO_MESSAGE[solution_status] + results.solver.message = results.solver.termination_message = ( + SOLSTATUS_TO_MESSAGE[solution_status] + ) results.solver.status = TerminationCondition.to_solver_status( results.solver.termination_condition ) @@ -215,14 +215,14 @@ def _create_results_from_status(self, status, solution_status): elif results.solver.status == SolverStatus.aborted: results.solver.termination_condition = TerminationCondition.userInterrupt if solution_status != "ERROR": - results.solver.message = ( - results.solver.termination_message - ) = SOLSTATUS_TO_MESSAGE[solution_status] + results.solver.message = results.solver.termination_message = ( + SOLSTATUS_TO_MESSAGE[solution_status] + ) else: results.solver.termination_condition = TerminationCondition.error - results.solver.message = ( - results.solver.termination_message - ) = SOLSTATUS_TO_MESSAGE["FAILED"] + results.solver.message = results.solver.termination_message = ( + SOLSTATUS_TO_MESSAGE["FAILED"] + ) return results @abstractmethod @@ -502,7 +502,9 @@ def _apply_solver(self): # Store status in solution sol.status = SolutionStatus.feasible - sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[self._macro.get("SOLUTION_STATUS", "ERROR")] + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + self._macro.get("SOLUTION_STATUS", "ERROR") + ] # Store objective value in solution sol.objective["__default_objective__"] = {"Value": self._macro["OBJECTIVE"]} @@ -632,7 +634,7 @@ def _apply_solver(self): # Upload mps file to CAS, if the file is larger than 2 GB, we need to use convertMps instead of loadMps # Note that technically it is 2 Gibibytes file size that trigger the issue, but 2 GB is the safer threshold - if stat(self._problem_files[0]).st_size > 2E9: + if stat(self._problem_files[0]).st_size > 2e9: # For files larger than 2 GB (this is a limitation of the loadMps action used in the else part). # Use convertMPS, first create file for upload. mpsWithIdFileName = TempfileManager.create_tempfile( @@ -723,7 +725,9 @@ def _apply_solver(self): # Store status in solution sol.status = SolutionStatus.feasible - sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[r.get("solutionStatus", "ERROR")] + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + r.get("solutionStatus", "ERROR") + ] # Store objective value in solution sol.objective["__default_objective__"] = { @@ -766,9 +770,7 @@ def _apply_solver(self): "slack": row[3], } else: - raise ValueError( - "The SAS solver returned an error status." - ) + raise ValueError("The SAS solver returned an error status.") else: results = self.results = SolverResults() results.solver.name = "SAS" diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 7b0e2cccd9a..922209ef88b 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -34,7 +34,9 @@ class SASTestAbc: @classmethod def setUpClass(cls): - cls.opt_sas = SolverFactory("sas", solver_io=cls.solver_io, **cls.session_options) + cls.opt_sas = SolverFactory( + "sas", solver_io=cls.solver_io, **cls.session_options + ) @classmethod def tearDownClass(cls): From 07df0c69a9d62e0174d94077f3fff323bea53c87 Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Tue, 26 Mar 2024 09:17:14 -0400 Subject: [PATCH 011/113] Use TeeStream, simplify _apply_solver and other small fixes --- pyomo/solvers/plugins/solvers/SAS.py | 392 +++++++++++++------------ pyomo/solvers/tests/checks/test_SAS.py | 11 + 2 files changed, 208 insertions(+), 195 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index bd06f6a1ef7..bccb7d34077 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -1,13 +1,20 @@ -__all__ = ["SAS"] +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ import logging import sys from os import stat import uuid - -from io import StringIO from abc import ABC, abstractmethod -from contextlib import redirect_stdout +from io import StringIO from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver from pyomo.opt.base.solvers import SolverFactory @@ -23,6 +30,8 @@ from pyomo.core.base import Var from pyomo.core.base.block import _BlockData from pyomo.core.kernel.block import IBlock +from pyomo.common.log import LogStream +from pyomo.common.tee import capture_output, TeeStream logger = logging.getLogger("pyomo.solvers") @@ -252,6 +261,7 @@ class SAS94(SASAbc): """ Solver interface for SAS 9.4 using saspy. See the saspy documentation about how to create a connection. + The swat connection options can be specified on the SolverFactory call. """ def __init__(self, **kwds): @@ -541,37 +551,12 @@ def _apply_solver(self): return Bunch(rc=self._rc, log=self._log) -class SASLogWriter: - """Helper class to take the log from stdout and put it also in a StringIO.""" - - def __init__(self, tee): - """Set up the two outputs.""" - self.tee = tee - self._log = StringIO() - self.stdout = sys.stdout - - def write(self, message): - """If the tee options is specified, write to both outputs.""" - if self.tee: - self.stdout.write(message) - self._log.write(message) - - def flush(self): - """Nothing to do, just here for compatibility reasons.""" - # Do nothing since we flush right away - pass - - def log(self): - """ "Get the log as a string.""" - return self._log.getvalue() - - @SolverFactory.register("_sascas", doc="SAS Viya CAS Server interface") class SASCAS(SASAbc): """ Solver interface connection to a SAS Viya CAS server using swat. See the documentation for the swat package about how to create a connection. - The swat connection options can be passed as options to the solve function. + The swat connection options can be specified on the SolverFactory call. """ def __init__(self, **kwds): @@ -600,6 +585,106 @@ def __del__(self): if self._sas_session: self._sas_session.close() + def _uploadMpsFile(self, s, unique): + # Declare a unique table name for the mps table + mpsdata_table_name = "mps" + unique + + # Upload mps file to CAS, if the file is larger than 2 GB, we need to use convertMps instead of loadMps + # Note that technically it is 2 Gibibytes file size that trigger the issue, but 2 GB is the safer threshold + if stat(self._problem_files[0]).st_size > 2e9: + # For files larger than 2 GB (this is a limitation of the loadMps action used in the else part). + # Use convertMPS, first create file for upload. + mpsWithIdFileName = TempfileManager.create_tempfile(".mps.csv", text=True) + with open(mpsWithIdFileName, "w") as mpsWithId: + mpsWithId.write("_ID_\tText\n") + with open(self._problem_files[0], "r") as f: + id = 0 + for line in f: + id += 1 + mpsWithId.write(str(id) + "\t" + line.rstrip() + "\n") + + # Upload .mps.csv file + mpscsv_table_name = "csv" + unique + s.upload_file( + mpsWithIdFileName, + casout={"name": mpscsv_table_name, "replace": True}, + importoptions={"filetype": "CSV", "delimiter": "\t"}, + ) + + # Convert .mps.csv file to .mps + s.optimization.convertMps( + data=mpscsv_table_name, + casOut={"name": mpsdata_table_name, "replace": True}, + format="FREE", + ) + + # Delete the table we don't need anymore + if mpscsv_table_name: + s.dropTable(name=mpscsv_table_name, quiet=True) + else: + # For small files (less than 2 GB), use loadMps + with open(self._problem_files[0], "r") as mps_file: + s.optimization.loadMps( + mpsFileString=mps_file.read(), + casout={"name": mpsdata_table_name, "replace": True}, + format="FREE", + ) + return mpsdata_table_name + + def _uploadPrimalin(self, s, unique): + # Upload warmstart file to CAS with a unique name + primalin_table_name = "pin" + unique + s.upload_file( + self._warm_start_file_name, + casout={"name": primalin_table_name, "replace": True}, + importoptions={"filetype": "CSV"}, + ) + self.options["primalin"] = primalin_table_name + return primalin_table_name + + def _retrieveSolution( + self, s, r, results, action, primalout_table_name, dualout_table_name + ): + # Create solution + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + r.get("solutionStatus", "ERROR") + ] + + # Store objective value in solution + sol.objective["__default_objective__"] = {"Value": r["objective"]} + + if action == "solveMilp": + primal_out = s.CASTable(name=primalout_table_name) + # Use pandas functions for efficiency + primal_out = primal_out[["_VAR_", "_VALUE_"]] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {"Value": row[1]} + else: + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = s.CASTable(name=primalout_table_name) + primal_out = primal_out[["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"]] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {"Value": row[1], "Status": row[2], "rc": row[3]} + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = s.CASTable(name=dualout_table_name) + dual_out = dual_out[["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"]] + sol.constraint = {} + for row in dual_out.itertuples(index=False): + sol.constraint[row[0]] = { + "dual": row[1], + "Status": row[2], + "slack": row[3], + } + def _apply_solver(self): """ "Prepare the options and run the solver. Then store the data to be returned.""" logger.debug("Running SAS Viya") @@ -620,175 +705,92 @@ def _apply_solver(self): # Get a unique identifier, always use the same with different prefixes unique = uuid.uuid4().hex[:16] + # Creat the output stream, we want to print to a log string as well as to the console + self._log = StringIO() + ostreams = [LogStream(level=logging.INFO, logger=logger)] + ostreams.append(self._log) + if self._tee: + ostreams.append(sys.stdout) + # Connect to CAS server - with redirect_stdout(SASLogWriter(self._tee)) as self._log_writer: - s = self._sas_session - if s == None: - s = self._sas_session = self._sas.CAS(**self._session_options) - try: - # Load the optimization action set - s.loadactionset("optimization") - - # Declare a unique table name for the mps table - mpsdata_table_name = "mps" + unique - - # Upload mps file to CAS, if the file is larger than 2 GB, we need to use convertMps instead of loadMps - # Note that technically it is 2 Gibibytes file size that trigger the issue, but 2 GB is the safer threshold - if stat(self._problem_files[0]).st_size > 2e9: - # For files larger than 2 GB (this is a limitation of the loadMps action used in the else part). - # Use convertMPS, first create file for upload. - mpsWithIdFileName = TempfileManager.create_tempfile( - ".mps.csv", text=True - ) - with open(mpsWithIdFileName, "w") as mpsWithId: - mpsWithId.write("_ID_\tText\n") - with open(self._problem_files[0], "r") as f: - id = 0 - for line in f: - id += 1 - mpsWithId.write(str(id) + "\t" + line.rstrip() + "\n") - - # Upload .mps.csv file - mpscsv_table_name = "csv" + unique - s.upload_file( - mpsWithIdFileName, - casout={"name": mpscsv_table_name, "replace": True}, - importoptions={"filetype": "CSV", "delimiter": "\t"}, - ) - - # Convert .mps.csv file to .mps - s.optimization.convertMps( - data=mpscsv_table_name, - casOut={"name": mpsdata_table_name, "replace": True}, - format="FREE", - ) - - # Delete the table we don't need anymore - if mpscsv_table_name: - s.dropTable(name=mpscsv_table_name, quiet=True) - else: - # For small files (less than 2 GB), use loadMps - with open(self._problem_files[0], "r") as mps_file: - s.optimization.loadMps( - mpsFileString=mps_file.read(), - casout={"name": mpsdata_table_name, "replace": True}, - format="FREE", + with TeeStream(*ostreams) as t: + with capture_output(output=t.STDOUT, capture_fd=False): + s = self._sas_session + if s == None: + s = self._sas_session = self._sas.CAS(**self._session_options) + try: + # Load the optimization action set + s.loadactionset("optimization") + + mpsdata_table_name = self._uploadMpsFile(s, unique) + + primalin_table_name = None + if self.warmstart_flag: + primalin_table_name = self._uploadPrimalin(s, unique) + + # Define output table names + primalout_table_name = "pout" + unique + dualout_table_name = None + + # Solve the problem in CAS + if action == "solveMilp": + r = s.optimization.solveMilp( + data={"name": mpsdata_table_name}, + primalOut={"name": primalout_table_name, "replace": True}, + **self.options + ) + else: + dualout_table_name = "dout" + unique + r = s.optimization.solveLp( + data={"name": mpsdata_table_name}, + primalOut={"name": primalout_table_name, "replace": True}, + dualOut={"name": dualout_table_name, "replace": True}, + **self.options ) - primalin_table_name = None - if self.warmstart_flag: - primalin_table_name = "pin" + unique - # Upload warmstart file to CAS - s.upload_file( - self._warm_start_file_name, - casout={"name": primalin_table_name, "replace": True}, - importoptions={"filetype": "CSV"}, - ) - self.options["primalin"] = primalin_table_name - - # Define output table names - primalout_table_name = "pout" + unique - dualout_table_name = None - - # Solve the problem in CAS - if action == "solveMilp": - r = s.optimization.solveMilp( - data={"name": mpsdata_table_name}, - primalOut={"name": primalout_table_name, "replace": True}, - **self.options - ) - else: - dualout_table_name = "dout" + unique - r = s.optimization.solveLp( - data={"name": mpsdata_table_name}, - primalOut={"name": primalout_table_name, "replace": True}, - dualOut={"name": dualout_table_name, "replace": True}, - **self.options - ) - - # Prepare the solver results - if r: - # Get back the primal and dual solution data sets - results = self.results = self._create_results_from_status( - r.get("status", "ERROR"), r.get("solutionStatus", "ERROR") - ) - - if results.solver.status != SolverStatus.error: - if r.ProblemSummary["cValue1"][1] == "Maximization": - results.problem.sense = ProblemSense.maximize - else: - results.problem.sense = ProblemSense.minimize - - # Prepare the solution information - if results.solver.hasSolution: - sol = results.solution.add() - - # Store status in solution - sol.status = SolutionStatus.feasible - sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ - r.get("solutionStatus", "ERROR") - ] - - # Store objective value in solution - sol.objective["__default_objective__"] = { - "Value": r["objective"] - } - - if action == "solveMilp": - primal_out = s.CASTable(name=primalout_table_name) - # Use pandas functions for efficiency - primal_out = primal_out[["_VAR_", "_VALUE_"]] - sol.variable = {} - for row in primal_out.itertuples(index=False): - sol.variable[row[0]] = {"Value": row[1]} + # Prepare the solver results + if r: + # Get back the primal and dual solution data sets + results = self.results = self._create_results_from_status( + r.get("status", "ERROR"), r.get("solutionStatus", "ERROR") + ) + + if results.solver.status != SolverStatus.error: + if r.ProblemSummary["cValue1"][1] == "Maximization": + results.problem.sense = ProblemSense.maximize else: - # Convert primal out data set to variable dictionary - # Use panda functions for efficiency - primal_out = s.CASTable(name=primalout_table_name) - primal_out = primal_out[ - ["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"] - ] - sol.variable = {} - for row in primal_out.itertuples(index=False): - sol.variable[row[0]] = { - "Value": row[1], - "Status": row[2], - "rc": row[3], - } - - # Convert dual out data set to constraint dictionary - # Use pandas functions for efficiency - dual_out = s.CASTable(name=dualout_table_name) - dual_out = dual_out[ - ["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"] - ] - sol.constraint = {} - for row in dual_out.itertuples(index=False): - sol.constraint[row[0]] = { - "dual": row[1], - "Status": row[2], - "slack": row[3], - } + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if results.solver.hasSolution: + self._retrieveSolution( + s, + r, + results, + action, + primalout_table_name, + dualout_table_name, + ) + else: + raise ValueError("The SAS solver returned an error status.") else: - raise ValueError("The SAS solver returned an error status.") - else: - results = self.results = SolverResults() - results.solver.name = "SAS" - results.solver.status = SolverStatus.error - raise ValueError( - "An option passed to the SAS solver caused a syntax error." - ) - - finally: - if mpsdata_table_name: - s.dropTable(name=mpsdata_table_name, quiet=True) - if primalin_table_name: - s.dropTable(name=primalin_table_name, quiet=True) - if primalout_table_name: - s.dropTable(name=primalout_table_name, quiet=True) - if dualout_table_name: - s.dropTable(name=dualout_table_name, quiet=True) - - self._log = self._log_writer.log() + results = self.results = SolverResults() + results.solver.name = "SAS" + results.solver.status = SolverStatus.error + raise ValueError( + "An option passed to the SAS solver caused a syntax error." + ) + + finally: + if mpsdata_table_name: + s.dropTable(name=mpsdata_table_name, quiet=True) + if primalin_table_name: + s.dropTable(name=primalin_table_name, quiet=True) + if primalout_table_name: + s.dropTable(name=primalout_table_name, quiet=True) + if dualout_table_name: + s.dropTable(name=dualout_table_name, quiet=True) + + self._log = self._log.getvalue() self._rc = 0 return Bunch(rc=self._rc, log=self._log) diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 922209ef88b..75534e0e001 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -1,3 +1,14 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + import os import pyomo.common.unittest as unittest from unittest import mock From 4f0c8d84ccb1f6cabbd90a0b0790db2e9d2311f5 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Thu, 18 Apr 2024 14:07:07 -0600 Subject: [PATCH 012/113] Simplify constant NL expressions in defined vars due to the presolver --- pyomo/repn/plugins/nl_writer.py | 125 +++++++++++++++++++-- pyomo/repn/tests/ampl/test_nlv2.py | 167 +++++++++++++++++++++++++++++ 2 files changed, 282 insertions(+), 10 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index ee5b65149ae..2de44b4f68b 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -11,6 +11,8 @@ import ctypes import logging +import math +import operator import os from collections import deque, defaultdict, namedtuple from contextlib import nullcontext @@ -1835,7 +1837,7 @@ def _linear_presolve(self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds): fixed_vars.append(x) eliminated_cons.add(con_id) else: - return eliminated_cons, eliminated_vars + break for con_id, expr_info in comp_by_linear_var[_id]: # Note that if we were aggregating (i.e., _id was # from two_var), then one of these info's will be @@ -1888,6 +1890,32 @@ def _linear_presolve(self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds): elif a: expr_info.linear[x] = c * a + # Note: the ASL will (silently) produce incorrect answers if the + # nonlinear portion of a defined variable is a constant + # expression. This may now be the case if all the variables in + # the original nonlinear expression have been fixed. + for expr, info, _ in self.subexpression_cache.values(): + if not info.nonlinear: + continue + print(info.nonlinear) + nl, args = info.nonlinear + if not args or any(vid not in eliminated_vars for vid in args): + continue + # Ideally, we would just evaluate the named expression. + # However, there might be a linear portion of the named + # expression that still has free variables, and there is no + # guarantee that the user actually initialized the + # variables. So, we will fall back on parsing the (now + # constant) nonlinear fragment and evaluating it. + if info.linear is None: + info.linear = {} + info.nonlinear = None + info.const += _evaluate_constant_nl( + nl % tuple(template.const % eliminated_vars[i].const for i in args) + ) + + return eliminated_cons, eliminated_vars + def _record_named_expression_usage(self, named_exprs, src, comp_type): self.used_named_expressions.update(named_exprs) src = id(src) @@ -2263,6 +2291,40 @@ class text_nl_debug_template(object): _create_strict_inequality_map(vars()) +nl_operators = { + 0: (2, operator.add), + 2: (2, operator.mul), + 3: (2, operator.truediv), + 5: (2, operator.pow), + 15: (1, operator.abs), + 16: (1, operator.neg), + 54: (None, lambda *x: sum(x)), + 35: (3, lambda a, b, c: b if a else c), + 21: (2, operator.and_), + 22: (2, operator.lt), + 23: (2, operator.le), + 24: (2, operator.eq), + 43: (2, math.log), + 42: (2, math.log10), + 41: (2, math.sin), + 46: (2, math.cos), + 38: (2, math.tan), + 40: (2, math.sinh), + 45: (2, math.cosh), + 37: (2, math.tanh), + 51: (2, math.asin), + 53: (2, math.acos), + 49: (2, math.atan), + 44: (2, math.exp), + 39: (2, math.sqrt), + 50: (2, math.asinh), + 52: (2, math.acosh), + 47: (2, math.atanh), + 14: (2, math.ceil), + 13: (2, math.floor), +} + + def _strip_template_comments(vars_, base_): vars_['unary'] = {k: v[: v.find('\t#')] + '\n' for k, v in base_.unary.items()} for k, v in base_.__dict__.items(): @@ -2515,6 +2577,15 @@ def handle_named_expression_node(visitor, node, arg1): expression_source, ) + # As we will eventually need the compiled form of any nonlinear + # expression, we will go ahead and compile it here. We do not + # do the same for the linear component as we will only need the + # linear component compiled to a dict if we are emitting the + # original (linear + nonlinear) V line (which will not happen if + # the V line is part of a larger linear operator). + if repn.nonlinear.__class__ is list: + repn.compile_nonlinear_fragment(visitor) + if not visitor.use_named_exprs: return _GENERAL, repn.duplicate() @@ -2527,15 +2598,6 @@ def handle_named_expression_node(visitor, node, arg1): repn.nl = (visitor.template.var, (_id,)) if repn.nonlinear: - # As we will eventually need the compiled form of any nonlinear - # expression, we will go ahead and compile it here. We do not - # do the same for the linear component as we will only need the - # linear component compiled to a dict if we are emitting the - # original (linear + nonlinear) V line (which will not happen if - # the V line is part of a larger linear operator). - if repn.nonlinear.__class__ is list: - repn.compile_nonlinear_fragment(visitor) - if repn.linear: # If this expression has both linear and nonlinear # components, we will follow the ASL convention and break @@ -3016,3 +3078,46 @@ def finalizeResult(self, result): # self.active_expression_source = None return ans + + +def _evaluate_constant_nl(nl): + expr = nl.splitlines() + stack = [] + while expr: + line = expr.pop() + tokens = line.split() + # remove tokens after the first comment + for i, t in enumerate(tokens): + if t.startswith('#'): + tokens = tokens[:i] + break + if len(tokens) != 1: + # skip blank lines + if not tokens: + continue + raise DeveloperError( + f"Unsupported line format _evaluate_nl() (we expect each line " + f"to contain a single token): '{line}'" + ) + term = tokens[0] + # the "command" can be determined by the first character on the line + cmd = term[0] + # Note that we will unpack the line into the expected number of + # explicit arguments as a form of error checking + if cmd == 'n': + stack.append(float(term[1:])) + elif cmd == 'o': + # operator + nargs, fcn = nl_operators[int(term[1:])] + if nargs is None: + nargs = int(stack.pop()) + stack.append(fcn(*(stack.pop() for i in range(nargs)))) + elif cmd in '1234567890': + # this is either a single int (e.g., the nargs in a nary + # sum) or a string argument. Preserve it as-is until later + # when we know which we are expecting. + stack.append(term) + else: + raise DeveloperError(f"Unsupported NL operator in _evaluate_nl(): '{line}'") + assert len(stack) == 1 + return stack[0] diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index be72025edcd..54b0d93b52b 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2165,6 +2165,173 @@ def test_named_expressions(self): 0 0 1 0 2 0 +""", + OUT.getvalue(), + ) + ) + + def test_presolve_fixes_nl_defined_variables(self): + # This tests a workaround for a bug in the ASL where defined + # variables with nonstant expressions in the NL portion are not + # evaluated correctly. + m = ConcreteModel() + m.x = Var() + m.y = Var(bounds=(3, None)) + m.z = Var(bounds=(None, 3)) + m.e = Expression(expr=m.x + m.y * m.z + m.y**2 + 3 / m.z) + m.c1 = Constraint(expr=m.y * m.e + m.x >= 0) + m.c2 = Constraint(expr=m.y == m.z) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, + OUT, + symbolic_solver_labels=True, + linear_presolve=True, + export_defined_variables=True, + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 # problem unknown + 1 1 0 0 0 #vars, constraints, objectives, ranges, eqns + 1 0 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 1 0 0 #nonlinear vars in constraints, objectives, both + 0 0 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 1 0 #nonzeros in Jacobian, obj. gradient + 2 1 #max name lengths: constraints, variables + 0 0 0 2 0 #common exprs: b,c,o,c1,o1 +V1 0 1 #nl(e) +n19 +V2 1 1 #e +0 1 +v1 #nl(e) +C0 #c1 +o2 #* +n3 +v2 #e +x0 #initial guess +r #1 ranges (rhs's) +2 0 #c1 +b #1 bounds (on variables) +3 #x +k0 #intermediate Jacobian column lengths +J0 1 #c1 +0 1 +""", + OUT.getvalue(), + ) + ) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, + OUT, + symbolic_solver_labels=True, + linear_presolve=True, + export_defined_variables=False, + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 # problem unknown + 1 1 0 0 0 #vars, constraints, objectives, ranges, eqns + 1 0 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 1 0 0 #nonlinear vars in constraints, objectives, both + 0 0 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 1 0 #nonzeros in Jacobian, obj. gradient + 2 1 #max name lengths: constraints, variables + 0 0 0 0 0 #common exprs: b,c,o,c1,o1 +C0 #c1 +o2 #* +n3 +o0 #+ +v0 #x +o54 #sumlist +3 #(n) +o2 #* +n3 +n3 +o5 #^ +n3 +n2 +o3 #/ +n3 +n3 +x0 #initial guess +r #1 ranges (rhs's) +2 0 #c1 +b #1 bounds (on variables) +3 #x +k0 #intermediate Jacobian column lengths +J0 1 #c1 +0 1 +""", + OUT.getvalue(), + ) + ) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, + OUT, + symbolic_solver_labels=True, + linear_presolve=False, + export_defined_variables=True, + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 # problem unknown + 3 2 0 0 1 #vars, constraints, objectives, ranges, eqns + 1 0 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 3 0 0 #nonlinear vars in constraints, objectives, both + 0 0 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 5 0 #nonzeros in Jacobian, obj. gradient + 2 1 #max name lengths: constraints, variables + 0 0 0 2 0 #common exprs: b,c,o,c1,o1 +V3 0 1 #nl(e) +o54 #sumlist +3 #(n) + o2 #* + v0 #y +v2 #z +o5 #^ +v0 #y +n2 +o3 #/ +n3 +v2 #z +V4 1 1 #e +1 1 +v3 #nl(e) +C0 #c1 +o2 #* +v0 #y +v4 #e +C1 #c2 +n0 +x0 #initial guess +r #2 ranges (rhs's) +2 0 #c1 +4 0 #c2 +b #3 bounds (on variables) +2 3 #y +3 #x +1 3 #z +k2 #intermediate Jacobian column lengths +2 +3 +J0 3 #c1 +0 0 +1 1 +2 0 +J1 2 #c2 +0 1 +2 -1 """, OUT.getvalue(), ) From afa542f8e565b43a05b73e62398d29e7f2753f6b Mon Sep 17 00:00:00 2001 From: John Siirola Date: Fri, 19 Apr 2024 09:06:00 -0600 Subject: [PATCH 013/113] Improve comments/error messages --- pyomo/repn/plugins/nl_writer.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index f91f88ff5cf..fc8b8b10309 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -3102,8 +3102,8 @@ def _evaluate_constant_nl(nl): if not tokens: continue raise DeveloperError( - f"Unsupported line format _evaluate_nl() (we expect each line " - f"to contain a single token): '{line}'" + f"Unsupported line format _evaluate_constant_nl() " + f"(we expect each line to contain a single token): '{line}'" ) term = tokens[0] # the "command" can be determined by the first character on the line @@ -3111,6 +3111,7 @@ def _evaluate_constant_nl(nl): # Note that we will unpack the line into the expected number of # explicit arguments as a form of error checking if cmd == 'n': + # numeric constant stack.append(float(term[1:])) elif cmd == 'o': # operator @@ -3124,6 +3125,8 @@ def _evaluate_constant_nl(nl): # when we know which we are expecting. stack.append(term) else: - raise DeveloperError(f"Unsupported NL operator in _evaluate_nl(): '{line}'") + raise DeveloperError( + f"Unsupported NL operator in _evaluate_constant_nl(): '{line}'" + ) assert len(stack) == 1 return stack[0] From 1a8ab9621c0252fedca294e3e599f363253ce158 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Fri, 19 Apr 2024 09:06:53 -0600 Subject: [PATCH 014/113] Fix typo in baseline --- pyomo/repn/tests/ampl/test_nlv2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 64830d36921..f2378e56b4c 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2435,8 +2435,8 @@ def test_presolve_fixes_nl_defined_variables(self): V3 0 1 #nl(e) o54 #sumlist 3 #(n) - o2 #* - v0 #y +o2 #* +v0 #y v2 #z o5 #^ v0 #y From a5c79317bfa68a91e5081682e36873559acea5ab Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 22 Apr 2024 15:27:21 -0600 Subject: [PATCH 015/113] Remove debugging --- pyomo/repn/plugins/nl_writer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index fc8b8b10309..e4a2cea0bad 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1903,7 +1903,6 @@ def _linear_presolve(self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds): for expr, info, _ in self.subexpression_cache.values(): if not info.nonlinear: continue - print(info.nonlinear) nl, args = info.nonlinear if not args or any(vid not in eliminated_vars for vid in args): continue From 8390ce4321934eae0288dd8fe756fe0cbfaea5e6 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 22 Apr 2024 15:30:21 -0600 Subject: [PATCH 016/113] Fix the arg count for unary operators --- pyomo/repn/plugins/nl_writer.py | 36 ++++++++++++++++----------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index e4a2cea0bad..d4e7485cce4 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -2309,24 +2309,24 @@ class text_nl_debug_template(object): 22: (2, operator.lt), 23: (2, operator.le), 24: (2, operator.eq), - 43: (2, math.log), - 42: (2, math.log10), - 41: (2, math.sin), - 46: (2, math.cos), - 38: (2, math.tan), - 40: (2, math.sinh), - 45: (2, math.cosh), - 37: (2, math.tanh), - 51: (2, math.asin), - 53: (2, math.acos), - 49: (2, math.atan), - 44: (2, math.exp), - 39: (2, math.sqrt), - 50: (2, math.asinh), - 52: (2, math.acosh), - 47: (2, math.atanh), - 14: (2, math.ceil), - 13: (2, math.floor), + 43: (1, math.log), + 42: (1, math.log10), + 41: (1, math.sin), + 46: (1, math.cos), + 38: (1, math.tan), + 40: (1, math.sinh), + 45: (1, math.cosh), + 37: (1, math.tanh), + 51: (1, math.asin), + 53: (1, math.acos), + 49: (1, math.atan), + 44: (1, math.exp), + 39: (1, math.sqrt), + 50: (1, math.asinh), + 52: (1, math.acosh), + 47: (1, math.atanh), + 14: (1, math.ceil), + 13: (1, math.floor), } From 909fd1e73cf04aa890616816cba789b8c0bb7d01 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Thu, 16 May 2024 00:49:38 -0600 Subject: [PATCH 017/113] Support deferred final resolution of external function args --- pyomo/repn/plugins/nl_writer.py | 63 +++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 10 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index d4e7485cce4..763ed3cce0e 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -16,7 +16,7 @@ import os from collections import deque, defaultdict, namedtuple from contextlib import nullcontext -from itertools import filterfalse, product +from itertools import filterfalse, product, chain from math import log10 as _log10 from operator import itemgetter, attrgetter, setitem @@ -549,6 +549,7 @@ def __init__(self, ostream, rowstream, colstream, config): self.external_functions = {} self.used_named_expressions = set() self.var_map = {} + self.var_id_to_nl = None self.sorter = FileDeterminism_to_SortComponents(config.file_determinism) self.visitor = AMPLRepnVisitor( self.template, @@ -1646,6 +1647,9 @@ def _categorize_vars(self, comp_list, linear_by_comp): Count of the number of components that each var appears in. """ + subexpression_cache = self.subexpression_cache + used_named_expressions = self.used_named_expressions + var_map = self.var_map all_linear_vars = set() all_nonlinear_vars = set() nnz_by_var = {} @@ -1673,9 +1677,14 @@ def _categorize_vars(self, comp_list, linear_by_comp): # Process the nonlinear portion of this component if expr_info.nonlinear: nonlinear_vars = set() - for _id in expr_info.nonlinear[1]: + _id_src = [expr_info.nonlinear[1]] + for _id in chain.from_iterable(_id_src): if _id in nonlinear_vars: continue + if _id not in var_map and _id not in used_named_expressions: + _sub_info = subexpression_cache[_id][1] + _id_src.append(_sub_info.nonlinear[1]) + continue if _id in linear_by_comp: nonlinear_vars.update(linear_by_comp[_id]) else: @@ -1945,7 +1954,22 @@ def _write_nl_expression(self, repn, include_const): # Add the constant to the NL expression. AMPL adds the # constant as the second argument, so we will too. nl = self.template.binary_sum + nl + self.template.const % repn.const - self.ostream.write(nl % tuple(map(self.var_id_to_nl.__getitem__, args))) + try: + self.ostream.write(nl % tuple(map(self.var_id_to_nl.__getitem__, args))) + except KeyError: + final_args = [] + for arg in args: + if arg in self.var_id_to_nl: + final_args.append(self.var_id_to_nl[arg]) + else: + _nl, _ids, _ = self.subexpression_cache[arg][1].compile_repn( + self.visitor + ) + final_args.append( + _nl % tuple(map(self.var_id_to_nl.__getitem__, _ids)) + ) + self.ostream.write(nl % tuple(final_args)) + elif include_const: self.ostream.write(self.template.const % repn.const) else: @@ -2708,14 +2732,33 @@ def handle_external_function_node(visitor, node, *args): else: visitor.external_functions[func] = (len(visitor.external_functions), node._fcn) comment = f'\t#{node.local_name}' if visitor.symbolic_solver_labels else '' - nonlin = node_result_to_amplrepn(args[0]).compile_repn( - visitor, - visitor.template.external_fcn - % (visitor.external_functions[func][0], len(args), comment), + nl = visitor.template.external_fcn % ( + visitor.external_functions[func][0], + len(args), + comment, + ) + arg_ids = [] + for arg in args: + _id = id(arg) + arg_ids.append(_id) + named_exprs = set() + visitor.subexpression_cache[_id] = ( + arg, + AMPLRepn( + 0, + None, + node_result_to_amplrepn(arg).compile_repn( + visitor, named_exprs=named_exprs + ), + ), + (None, None, True), + ) + if not named_exprs: + named_exprs = None + return ( + _GENERAL, + AMPLRepn(0, None, (nl + '%s' * len(arg_ids), arg_ids, named_exprs)), ) - for arg in args[1:]: - nonlin = node_result_to_amplrepn(arg).compile_repn(visitor, *nonlin) - return (_GENERAL, AMPLRepn(0, None, nonlin)) _operator_handles = ExitNodeDispatcher( From 2a2fe4e3cc9c2625bcab35bfdd8a89483007a7bd Mon Sep 17 00:00:00 2001 From: John Siirola Date: Thu, 16 May 2024 11:19:55 -0600 Subject: [PATCH 018/113] Attempt to propagate initial values for presolved variables --- pyomo/repn/plugins/nl_writer.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index bed073ea1d7..fc6db2c33fb 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -774,9 +774,10 @@ def write(self, model): # expressions, or when users provide superfluous variables in # the column ordering. var_bounds = {_id: v.bounds for _id, v in var_map.items()} + var_values = {_id: v.value for _id, v in var_map.items()} eliminated_cons, eliminated_vars = self._linear_presolve( - comp_by_linear_var, lcon_by_linear_nnz, var_bounds + comp_by_linear_var, lcon_by_linear_nnz, var_bounds, var_values ) del comp_by_linear_var del lcon_by_linear_nnz @@ -1472,7 +1473,7 @@ def write(self, model): # _init_lines = [ (var_idx, val if val.__class__ in int_float else float(val)) - for var_idx, val in enumerate(var_map[_id].value for _id in variables) + for var_idx, val in enumerate(map(var_values.__getitem__, variables)) if val is not None ] if scale_model: @@ -1746,7 +1747,9 @@ def _count_subexpression_occurrences(self): n_subexpressions[0] += 1 return n_subexpressions - def _linear_presolve(self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds): + def _linear_presolve( + self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds, var_values + ): eliminated_vars = {} eliminated_cons = set() if not self.config.linear_presolve: @@ -1819,7 +1822,7 @@ def _linear_presolve(self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds): ): _id, id2 = id2, _id coef, coef2 = coef2, coef - # substituting _id with a*x + b + # eliminating _id and replacing it with a*x + b a = -coef2 / coef x = id2 b = expr_info.const = (lb - expr_info.const) / coef @@ -1849,6 +1852,25 @@ def _linear_presolve(self, comp_by_linear_var, lcon_by_linear_nnz, var_bounds): var_bounds[x] = x_lb, x_ub if x_lb == x_ub and x_lb is not None: fixed_vars.append(x) + # Given that we are eliminating a variable, we want to + # attempt to sanely resolve the initial variable values. + y_init = var_values[_id] + if y_init is not None: + # Y has a value + x_init = var_values[x] + if x_init is None: + # X does not; just use the one calculated from Y + x_init = (y_init - b) / a + else: + # X does too, use the average of the two values + x_init = (x_init + (y_init - b) / a) / 2.0 + # Ensure that the initial value respects the + # tightened bounds + if x_ub is not None and x_init > x_ub: + x_init = x_ub + if x_lb is not None and x_init < x_lb: + x_init = x_lb + var_values[x] = x_init eliminated_cons.add(con_id) else: break From 0eb5545e72bd5f0a8981a5ead7ac2788ee1985ef Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sun, 19 May 2024 15:48:10 -0400 Subject: [PATCH 019/113] Fixed some bugs associated with specifying experiment design variables with no indices or a single (float) value/bound --- pyomo/contrib/doe/measurements.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index fd3962f7888..e7e16db6283 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -96,16 +96,21 @@ def add_variables( ) if values is not None: + # if a scalar (int or float) is given, set it as the value for all variables + if type(values) in native_numeric_types: + values = [values] * len(added_names) # this dictionary keys are special set, values are its value self.variable_names_value.update(zip(added_names, values)) - # if a scalar (int or float) is given, set it as the lower bound for all variables + if lower_bounds is not None: + # if a scalar (int or float) is given, set it as the lower bound for all variables if type(lower_bounds) in native_numeric_types: lower_bounds = [lower_bounds] * len(added_names) self.lower_bounds.update(zip(added_names, lower_bounds)) if upper_bounds is not None: + # if a scalar (int or float) is given, set it as the upper bound for all variables if type(upper_bounds) in native_numeric_types: upper_bounds = [upper_bounds] * len(added_names) self.upper_bounds.update(zip(added_names, upper_bounds)) @@ -129,7 +134,7 @@ def _generate_variable_names_with_indices( """ # first combine all indices into a list all_index_list = [] # contains all index lists - if indices: + if indices is not None: for index_pointer in indices: all_index_list.append(indices[index_pointer]) @@ -143,8 +148,11 @@ def _generate_variable_names_with_indices( added_names = [] # iterate over index combinations ["CA", 1], ["CA", 2], ..., ["CC", 2], ["CC", 3] for index_instance in all_variable_indices: - var_name_index_string = var_name + "[" + var_name_index_string = var_name for i, idx in enumerate(index_instance): + # if i is the first index, open the [] + if i == 0: + var_name_index_string += "[" # use repr() is different from using str() # with repr(), "CA" is "CA", with str(), "CA" is CA. The first is not valid in our interface. var_name_index_string += str(idx) @@ -175,22 +183,31 @@ def _check_valid_input( """ assert isinstance(var_name, str), "var_name should be a string." - if time_index_position not in indices: + # check if time_index_position is in indices + if (indices is not None # ensure not None + and time_index_position is None # ensure not None + and time_index_position not in indices # ensure time_index_position is in indices + ): raise ValueError("time index cannot be found in indices.") # if given a list, check if bounds have the same length with flattened variable - if values is not None and len(values) != len_indices: + if (values is not None # ensure not None + and not type(values) in native_numeric_types # skip this test if scalar (int or float) + and len(values) != len_indices + ): raise ValueError("Values is of different length with indices.") if ( lower_bounds is not None # ensure not None + and not type(lower_bounds) in native_numeric_types # skip this test if scalar (int or float) and isinstance(lower_bounds, collections.abc.Sequence) # ensure list-like and len(lower_bounds) != len_indices # ensure same length ): raise ValueError("Lowerbounds is of different length with indices.") if ( - upper_bounds is not None # ensure None + upper_bounds is not None # ensure not None + and not type(upper_bounds) in native_numeric_types # skip this test if scalar (int or float) and isinstance(upper_bounds, collections.abc.Sequence) # ensure list-like and len(upper_bounds) != len_indices # ensure same length ): From 7b798ac989b8691232992f82d584c6c19e0f7d16 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sun, 19 May 2024 21:31:36 -0400 Subject: [PATCH 020/113] Fixed bugs in sensitivity analysis. --- pyomo/contrib/doe/doe.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 0fc3e8770fe..b00e24e18dd 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -71,6 +71,7 @@ def __init__( prior_FIM=None, discretize_model=None, args=None, + logger_level=logging.INFO ): """ This package enables model-based design of experiments analysis with Pyomo. @@ -101,6 +102,8 @@ def __init__( A user-specified ``function`` that discretizes the model. Only use with Pyomo.DAE, default=None args: Additional arguments for the create_model function. + logger_level: + Specify the level of the logger. Changer to logging.DEBUG for all messages. """ # parameters @@ -136,7 +139,7 @@ def __init__( # if print statements self.logger = logging.getLogger(__name__) - self.logger.setLevel(level=logging.INFO) + self.logger.setLevel(level=logger_level) def _check_inputs(self): """ @@ -727,6 +730,7 @@ def run_grid_search( store_optimality_as_csv=None, formula="central", step=0.001, + post_processing_function=None ): """ Enumerate through full grid search for any number of design variables; @@ -768,6 +772,10 @@ def run_grid_search( This option is only used for CalculationMode.sequential_finite. step: Sensitivity perturbation step size, a fraction between [0,1]. default is 0.001 + post_processing_function: + An optional function that executes after each solve of the grid search. + The function should take one input: the Pyomo model. This could be a plotting function. + Default is None. Returns ------- @@ -808,12 +816,18 @@ def run_grid_search( design_iter = self.design_vars.variable_names_value.copy() # update the controlled value of certain time points for certain design variables for i, names in enumerate(design_dimension_names): - # if the element is a list, all design variables in this list share the same values - if isinstance(names, collections.abc.Sequence): + print("names =",names,"for i=",i) + if isinstance(names, str): + # if 'names' is simply a string, copy the new value + design_iter[names] = list(design_set_iter)[i] + elif isinstance(names, collections.abc.Sequence): + # if the element is a list, all design variables in this list share the same values for n in names: design_iter[n] = list(design_set_iter)[i] else: - design_iter[names] = list(design_set_iter)[i] + # otherwise just copy the value + # design_iter[names] = list(design_set_iter)[i] + raise NotImplementedError('You should not see this error message. Please report it to the Pyomo.DoE developers.') self.design_vars.variable_names_value = design_iter iter_timer = TicTocTimer() @@ -828,7 +842,7 @@ def run_grid_search( else: store_output_name = store_name + str(count) - if read_name: + if read_name is not None: read_input_name = read_name + str(count) else: read_input_name = None @@ -856,12 +870,16 @@ def run_grid_search( # give run information at each iteration self.logger.info('This is run %s out of %s.', count, total_count) - self.logger.info('The code has run %s seconds.', sum(time_set)) + self.logger.info('The code has run %s seconds.', round(sum(time_set),2)) self.logger.info( 'Estimated remaining time: %s seconds', - (sum(time_set) / (count + 1) * (total_count - count - 1)), + round(sum(time_set) / (count) * (total_count - count), 2), # need to check this math... it gives a negative number for the final count ) + if post_processing_function is not None: + # Call the post processing function + post_processing_function(self.model) + # the combined result object are organized as a dictionary, keys are a tuple of the design variable values, values are a result object result_combine[tuple(design_set_iter)] = result_iter From c2a0c8e24c66941c00eccba8efd6c48155bd7d0e Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sun, 19 May 2024 21:52:40 -0400 Subject: [PATCH 021/113] Removed an extra print statement from debugging --- pyomo/contrib/doe/doe.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index b00e24e18dd..ddd091a24e2 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -816,7 +816,6 @@ def run_grid_search( design_iter = self.design_vars.variable_names_value.copy() # update the controlled value of certain time points for certain design variables for i, names in enumerate(design_dimension_names): - print("names =",names,"for i=",i) if isinstance(names, str): # if 'names' is simply a string, copy the new value design_iter[names] = list(design_set_iter)[i] From 0f3a1ff9277ff97f9c66747f9bbbc4a4af207358 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 20 May 2024 10:37:19 -0400 Subject: [PATCH 022/113] Updated useable for optional post-processing function in sensitivity analysis --- pyomo/contrib/doe/doe.py | 13 ++++++++++--- pyomo/contrib/doe/result.py | 16 ++++++++-------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index ddd091a24e2..ed44a18d262 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -423,6 +423,9 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): # solve model square_result = self._solve_doe(mod, fix=True) + # save model from optional post processing function + self._square_model_from_compute_FIM = mod + if extract_single_model: mod_name = store_output + '.csv' dataframe = extract_single_model(mod, square_result) @@ -487,10 +490,10 @@ def _direct_kaug(self): mod = self.create_model(model_option=ModelOptionLib.parmest) # discretize if needed - if self.discretize_model: + if self.discretize_model is not None: mod = self.discretize_model(mod, block=False) - # add objective function + # add zero (dummy/placeholder) objective function mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) # set ub and lb to parameters @@ -505,6 +508,10 @@ def _direct_kaug(self): # call k_aug get_dsdp function square_result = self._solve_doe(mod, fix=True) + + # save model from optional post processing function + self._square_model_from_compute_FIM = mod + dsdp_re, col = get_dsdp( mod, list(self.param.keys()), self.param, tee=self.tee_opt ) @@ -877,7 +884,7 @@ def run_grid_search( if post_processing_function is not None: # Call the post processing function - post_processing_function(self.model) + post_processing_function(self._square_model_from_compute_FIM) # the combined result object are organized as a dictionary, keys are a tuple of the design variable values, values are a result object result_combine[tuple(design_set_iter)] = result_iter diff --git a/pyomo/contrib/doe/result.py b/pyomo/contrib/doe/result.py index 1593214c30a..d8ed343352a 100644 --- a/pyomo/contrib/doe/result.py +++ b/pyomo/contrib/doe/result.py @@ -549,7 +549,7 @@ def _curve1D( ax.scatter(x_range, y_range_A) ax.set_ylabel('$log_{10}$ Trace') ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ' - A optimality') + plt.pyplot.title(title_text + ': A-optimality') plt.pyplot.show() # Draw D-optimality @@ -565,7 +565,7 @@ def _curve1D( ax.scatter(x_range, y_range_D) ax.set_ylabel('$log_{10}$ Determinant') ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ' - D optimality') + plt.pyplot.title(title_text + ': D-optimality') plt.pyplot.show() # Draw E-optimality @@ -581,7 +581,7 @@ def _curve1D( ax.scatter(x_range, y_range_E) ax.set_ylabel('$log_{10}$ Minimal eigenvalue') ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ' - E optimality') + plt.pyplot.title(title_text + ': E-optimality') plt.pyplot.show() # Draw Modified E-optimality @@ -597,7 +597,7 @@ def _curve1D( ax.scatter(x_range, y_range_ME) ax.set_ylabel('$log_{10}$ Condition number') ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ' - Modified E optimality') + plt.pyplot.title(title_text + ': Modified E-optimality') plt.pyplot.show() def _heatmap( @@ -691,7 +691,7 @@ def _heatmap( im = ax.imshow(hes_a.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) ba.set_label('log10(trace(FIM))') - plt.pyplot.title(title_text + ' - A optimality') + plt.pyplot.title(title_text + ': A-optimality') plt.pyplot.show() # D-optimality @@ -712,7 +712,7 @@ def _heatmap( im = ax.imshow(hes_d.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) ba.set_label('log10(det(FIM))') - plt.pyplot.title(title_text + ' - D optimality') + plt.pyplot.title(title_text + ': D-optimality') plt.pyplot.show() # E-optimality @@ -733,7 +733,7 @@ def _heatmap( im = ax.imshow(hes_e.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) ba.set_label('log10(minimal eig(FIM))') - plt.pyplot.title(title_text + ' - E optimality') + plt.pyplot.title(title_text + ': E-optimality') plt.pyplot.show() # modified E-optimality @@ -754,5 +754,5 @@ def _heatmap( im = ax.imshow(hes_e2.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) ba.set_label('log10(cond(FIM))') - plt.pyplot.title(title_text + ' - Modified E-optimality') + plt.pyplot.title(title_text + ': Modified E-optimality') plt.pyplot.show() From 2f744561e7e26ff79b34c59f97dc283c7d438b37 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 20 May 2024 12:50:52 -0400 Subject: [PATCH 023/113] Fixed maximizing log(trace) --- pyomo/contrib/doe/doe.py | 71 ++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index ed44a18d262..8b110bbf0a4 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -988,19 +988,22 @@ def initialize_fim(m, j, d): initialize=identity_matrix, ) - # move the L matrix initial point to a dictionary - if type(self.L_initial) != type(None): - dict_cho = {} - for i, bu in enumerate(model.regression_parameters): - for j, un in enumerate(model.regression_parameters): - dict_cho[(bu, un)] = self.L_initial[i][j] - - # use the L dictionary to initialize L matrix - def init_cho(m, i, j): - return dict_cho[(i, j)] - # if cholesky, define L elements as variables - if self.Cholesky_option: + if self.Cholesky_option and self.objective_option == ObjectiveLib.det: + + # move the L matrix initial point to a dictionary + if type(self.L_initial) != type(None): + dict_cho = {} + # Loop over rows + for i, bu in enumerate(model.regression_parameters): + # Loop over columns + for j, un in enumerate(model.regression_parameters): + dict_cho[(bu, un)] = self.L_initial[i][j] + + # use the L dictionary to initialize L matrix + def init_cho(m, i, j): + return dict_cho[(i, j)] + # Define elements of Cholesky decomposition matrix as Pyomo variables and either # Initialize with L in L_initial if type(self.L_initial) != type(None): @@ -1095,14 +1098,22 @@ def fim_rule(m, p, q): def _add_objective(self, m): + small_number = 1E-10 + + # Assemble the FIM matrix. This is helpful for initialization! + fim = np.zeros((len(self.param), len(self.param))) + for i, bu in enumerate(m.regression_parameters): + for j, un in enumerate(m.regression_parameters): + # Copy value from Pyomo model into numpy array + fim[i][j] = m.fim[bu, un].value + + # Set lower bound to ensure diagonal elements are (almost) non-negative + # m.fim[bu, un].setlb(-small_number) + ### Initialize the Cholesky decomposition matrix - if self.Cholesky_option: + if self.Cholesky_option and self.objective_option == ObjectiveLib.det: + - # Assemble the FIM matrix - fim = np.zeros((len(self.param), len(self.param))) - for i, bu in enumerate(m.regression_parameters): - for j, un in enumerate(m.regression_parameters): - fim[i][j] = m.fim[bu, un].value # Calculate the eigenvalues of the FIM matrix eig = np.linalg.eigvals(fim) @@ -1115,10 +1126,10 @@ def _add_objective(self, m): # Compute the Cholesky decomposition of the FIM matrix L = np.linalg.cholesky(fim) - # Initialize the Cholesky matrix - for i, c in enumerate(m.regression_parameters): - for j, d in enumerate(m.regression_parameters): - m.L_ele[c, d].value = L[i, j] + # Initialize the Cholesky matrix + for i, c in enumerate(m.regression_parameters): + for j, d in enumerate(m.regression_parameters): + m.L_ele[c, d].value = L[i, j] def cholesky_imp(m, c, d): """ @@ -1173,7 +1184,7 @@ def det_general(m): ) return m.det == det_perm - if self.Cholesky_option: + if self.Cholesky_option and self.objective_option == ObjectiveLib.det: m.cholesky_cons = pyo.Constraint( m.regression_parameters, m.regression_parameters, rule=cholesky_imp ) @@ -1181,16 +1192,26 @@ def det_general(m): expr=2 * sum(pyo.log(m.L_ele[j, j]) for j in m.regression_parameters), sense=pyo.maximize, ) - # if not cholesky but determinant, calculating det and evaluate the OBJ with det + elif self.objective_option == ObjectiveLib.det: + # if not cholesky but determinant, calculating det and evaluate the OBJ with det + m.det = pyo.Var(initialize=np.linalg.det(fim), bounds=(small_number, None)) m.det_rule = pyo.Constraint(rule=det_general) m.Obj = pyo.Objective(expr=pyo.log(m.det), sense=pyo.maximize) - # if not determinant or cholesky, calculating the OBJ with trace + elif self.objective_option == ObjectiveLib.trace: + # if not determinant or cholesky, calculating the OBJ with trace + m.trace = pyo.Var(initialize=np.trace(fim), bounds=(small_number, None)) m.trace_rule = pyo.Constraint(rule=trace_calc) m.Obj = pyo.Objective(expr=pyo.log(m.trace), sense=pyo.maximize) + #m.Obj = pyo.Objective(expr=m.trace, sense=pyo.maximize) + elif self.objective_option == ObjectiveLib.zero: + # add dummy objective function m.Obj = pyo.Objective(expr=0) + else: + # something went wrong! + raise ValueError("Objective option not recognized. Please contact the developers as you should not see this error.") return m From 2ae726dbba0c80b73dd73a67ed50c811515c51ea Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 20 May 2024 13:31:18 -0400 Subject: [PATCH 024/113] Ran black. --- pyomo/contrib/doe/doe.py | 40 ++++++++++++++++++------------- pyomo/contrib/doe/measurements.py | 21 +++++++++------- 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 8b110bbf0a4..3243bec6e1c 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -71,7 +71,7 @@ def __init__( prior_FIM=None, discretize_model=None, args=None, - logger_level=logging.INFO + logger_level=logging.INFO, ): """ This package enables model-based design of experiments analysis with Pyomo. @@ -508,10 +508,10 @@ def _direct_kaug(self): # call k_aug get_dsdp function square_result = self._solve_doe(mod, fix=True) - + # save model from optional post processing function self._square_model_from_compute_FIM = mod - + dsdp_re, col = get_dsdp( mod, list(self.param.keys()), self.param, tee=self.tee_opt ) @@ -737,7 +737,7 @@ def run_grid_search( store_optimality_as_csv=None, formula="central", step=0.001, - post_processing_function=None + post_processing_function=None, ): """ Enumerate through full grid search for any number of design variables; @@ -833,7 +833,9 @@ def run_grid_search( else: # otherwise just copy the value # design_iter[names] = list(design_set_iter)[i] - raise NotImplementedError('You should not see this error message. Please report it to the Pyomo.DoE developers.') + raise NotImplementedError( + 'You should not see this error message. Please report it to the Pyomo.DoE developers.' + ) self.design_vars.variable_names_value = design_iter iter_timer = TicTocTimer() @@ -876,10 +878,14 @@ def run_grid_search( # give run information at each iteration self.logger.info('This is run %s out of %s.', count, total_count) - self.logger.info('The code has run %s seconds.', round(sum(time_set),2)) + self.logger.info( + 'The code has run %s seconds.', round(sum(time_set), 2) + ) self.logger.info( 'Estimated remaining time: %s seconds', - round(sum(time_set) / (count) * (total_count - count), 2), # need to check this math... it gives a negative number for the final count + round( + sum(time_set) / (count) * (total_count - count), 2 + ), # need to check this math... it gives a negative number for the final count ) if post_processing_function is not None: @@ -990,7 +996,7 @@ def initialize_fim(m, j, d): # if cholesky, define L elements as variables if self.Cholesky_option and self.objective_option == ObjectiveLib.det: - + # move the L matrix initial point to a dictionary if type(self.L_initial) != type(None): dict_cho = {} @@ -1003,7 +1009,7 @@ def initialize_fim(m, j, d): # use the L dictionary to initialize L matrix def init_cho(m, i, j): return dict_cho[(i, j)] - + # Define elements of Cholesky decomposition matrix as Pyomo variables and either # Initialize with L in L_initial if type(self.L_initial) != type(None): @@ -1098,7 +1104,7 @@ def fim_rule(m, p, q): def _add_objective(self, m): - small_number = 1E-10 + small_number = 1e-10 # Assemble the FIM matrix. This is helpful for initialization! fim = np.zeros((len(self.param), len(self.param))) @@ -1113,8 +1119,6 @@ def _add_objective(self, m): ### Initialize the Cholesky decomposition matrix if self.Cholesky_option and self.objective_option == ObjectiveLib.det: - - # Calculate the eigenvalues of the FIM matrix eig = np.linalg.eigvals(fim) @@ -1192,26 +1196,28 @@ def det_general(m): expr=2 * sum(pyo.log(m.L_ele[j, j]) for j in m.regression_parameters), sense=pyo.maximize, ) - + elif self.objective_option == ObjectiveLib.det: # if not cholesky but determinant, calculating det and evaluate the OBJ with det m.det = pyo.Var(initialize=np.linalg.det(fim), bounds=(small_number, None)) m.det_rule = pyo.Constraint(rule=det_general) m.Obj = pyo.Objective(expr=pyo.log(m.det), sense=pyo.maximize) - + elif self.objective_option == ObjectiveLib.trace: # if not determinant or cholesky, calculating the OBJ with trace m.trace = pyo.Var(initialize=np.trace(fim), bounds=(small_number, None)) m.trace_rule = pyo.Constraint(rule=trace_calc) m.Obj = pyo.Objective(expr=pyo.log(m.trace), sense=pyo.maximize) - #m.Obj = pyo.Objective(expr=m.trace, sense=pyo.maximize) - + # m.Obj = pyo.Objective(expr=m.trace, sense=pyo.maximize) + elif self.objective_option == ObjectiveLib.zero: # add dummy objective function m.Obj = pyo.Objective(expr=0) else: # something went wrong! - raise ValueError("Objective option not recognized. Please contact the developers as you should not see this error.") + raise ValueError( + "Objective option not recognized. Please contact the developers as you should not see this error." + ) return m diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index e7e16db6283..47df09d27c1 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -102,7 +102,6 @@ def add_variables( # this dictionary keys are special set, values are its value self.variable_names_value.update(zip(added_names, values)) - if lower_bounds is not None: # if a scalar (int or float) is given, set it as the lower bound for all variables if type(lower_bounds) in native_numeric_types: @@ -184,22 +183,27 @@ def _check_valid_input( assert isinstance(var_name, str), "var_name should be a string." # check if time_index_position is in indices - if (indices is not None # ensure not None - and time_index_position is None # ensure not None - and time_index_position not in indices # ensure time_index_position is in indices + if ( + indices is not None # ensure not None + and time_index_position is None # ensure not None + and time_index_position + not in indices # ensure time_index_position is in indices ): raise ValueError("time index cannot be found in indices.") # if given a list, check if bounds have the same length with flattened variable - if (values is not None # ensure not None - and not type(values) in native_numeric_types # skip this test if scalar (int or float) + if ( + values is not None # ensure not None + and not type(values) + in native_numeric_types # skip this test if scalar (int or float) and len(values) != len_indices ): raise ValueError("Values is of different length with indices.") if ( lower_bounds is not None # ensure not None - and not type(lower_bounds) in native_numeric_types # skip this test if scalar (int or float) + and not type(lower_bounds) + in native_numeric_types # skip this test if scalar (int or float) and isinstance(lower_bounds, collections.abc.Sequence) # ensure list-like and len(lower_bounds) != len_indices # ensure same length ): @@ -207,7 +211,8 @@ def _check_valid_input( if ( upper_bounds is not None # ensure not None - and not type(upper_bounds) in native_numeric_types # skip this test if scalar (int or float) + and not type(upper_bounds) + in native_numeric_types # skip this test if scalar (int or float) and isinstance(upper_bounds, collections.abc.Sequence) # ensure list-like and len(upper_bounds) != len_indices # ensure same length ): From 405ac567a4c2d793adf79400b77609466bbc226d Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 20 May 2024 13:46:46 -0400 Subject: [PATCH 025/113] Added note to help with additional debugging. --- pyomo/contrib/doe/measurements.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index 47df09d27c1..229fb7f7830 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -182,6 +182,7 @@ def _check_valid_input( """ assert isinstance(var_name, str), "var_name should be a string." + # debugging note: what is an integer versus a list versus a dictionary here? # check if time_index_position is in indices if ( indices is not None # ensure not None From 50c8ffc71be56ce8569bb3cca54d82122d58507a Mon Sep 17 00:00:00 2001 From: Daniel Laky <29078718+djlaky@users.noreply.github.com> Date: Mon, 20 May 2024 14:17:07 -0400 Subject: [PATCH 026/113] Enact design variable bounds --- pyomo/contrib/doe/doe.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 3243bec6e1c..f84147841c6 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -662,6 +662,13 @@ def fix1(mod, s): con_name = "con" + name mod.add_component(con_name, pyo.Constraint(mod.scenario, expr=fix1)) + # Add user-defined design variable bounds + cuid = pyo.ComponentUID(name) + design_var_global = cuid.find_component_on(mod) + # Set the lower and upper bounds of the design variables + design_var_global.setlb(self.design_vars.lower_bounds[name]) + design_var_global.setub(self.design_vars.upper_bounds[name]) + return mod def _finite_calculation(self, output_record): From dbd3d8f0c8f753881fa66faf9783e2395487121f Mon Sep 17 00:00:00 2001 From: Daniel Laky <29078718+djlaky@users.noreply.github.com> Date: Mon, 20 May 2024 14:19:46 -0400 Subject: [PATCH 027/113] Enacted user-passed model arguments --- pyomo/contrib/doe/doe.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index f84147841c6..02ac1908b5d 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -70,7 +70,7 @@ def __init__( solver=None, prior_FIM=None, discretize_model=None, - args=None, + args={}, logger_level=logging.INFO, ): """ @@ -487,7 +487,7 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): def _direct_kaug(self): # create model - mod = self.create_model(model_option=ModelOptionLib.parmest) + mod = self.create_model(model_option=ModelOptionLib.parmest, **self.args) # discretize if needed if self.discretize_model is not None: @@ -612,7 +612,7 @@ def _create_block(self): ) # Allow user to self-define complex design variables - self.create_model(mod=mod, model_option=ModelOptionLib.stage1) + self.create_model(mod=mod, model_option=ModelOptionLib.stage1, **self.args) # Fix parameter values in the copy of the stage1 model (if they exist) for par in self.param: @@ -632,11 +632,11 @@ def block_build(b, s): theta_initialize = self.scenario_data.scenario[s] # Add model on block with theta values self.create_model( - mod=b, model_option=ModelOptionLib.stage2, theta=theta_initialize + mod=b, model_option=ModelOptionLib.stage2, theta=theta_initialize, **self.args, ) else: # Otherwise add model on block without theta values - self.create_model(mod=b, model_option=ModelOptionLib.stage2) + self.create_model(mod=b, model_option=ModelOptionLib.stage2, **self.args) # fix parameter values to perturbed values for par in self.param: From 9ded80f156e3f1e0139d98bf4e036dc30c100e05 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 20 May 2024 16:12:25 -0600 Subject: [PATCH 028/113] Resolve KeyError with export_nonlinear_variables --- pyomo/repn/plugins/nl_writer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index fc6db2c33fb..7a991a989f0 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -851,6 +851,7 @@ def write(self, model): if _id not in var_map: var_map[_id] = _v var_bounds[_id] = _v.bounds + var_values[_id] = _v.value con_vars_nonlinear.add(_id) con_nnz = sum(con_nnz_by_var.values()) From 5badeba53f1b47685c885fff48df3a6e887ef718 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 20 May 2024 16:13:18 -0600 Subject: [PATCH 029/113] Fix error when ressolving constant arguments to external functions --- pyomo/repn/plugins/nl_writer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 7a991a989f0..331c45b78a5 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1683,8 +1683,9 @@ def _categorize_vars(self, comp_list, linear_by_comp): if _id in nonlinear_vars: continue if _id not in var_map and _id not in used_named_expressions: - _sub_info = subexpression_cache[_id][1] - _id_src.append(_sub_info.nonlinear[1]) + _sub_info = subexpression_cache[_id][1].nonlinear + if _sub_info: + _id_src.append(_sub_info[1]) continue if _id in linear_by_comp: nonlinear_vars.update(linear_by_comp[_id]) From 6c1ced9ab4c922dca1b8823911ceea910f853acd Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 20 May 2024 16:14:24 -0600 Subject: [PATCH 030/113] Add test for external functions whose arguments presolve to constants --- pyomo/repn/tests/ampl/test_nlv2.py | 120 ++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 1 deletion(-) diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index f2378e56b4c..784a277c118 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -25,6 +25,7 @@ from pyomo.common.dependencies import numpy, numpy_available from pyomo.common.errors import MouseTrap +from pyomo.common.gsl import find_GSL from pyomo.common.log import LoggingIntercept from pyomo.common.tee import capture_output from pyomo.common.tempfiles import TempfileManager @@ -2311,7 +2312,7 @@ def test_discrete_var_tabulation(self): def test_presolve_fixes_nl_defined_variables(self): # This tests a workaround for a bug in the ASL where defined - # variables with nonstant expressions in the NL portion are not + # variables with constant expressions in the NL portion are not # evaluated correctly. m = ConcreteModel() m.x = Var() @@ -2471,6 +2472,123 @@ def test_presolve_fixes_nl_defined_variables(self): J1 2 #c2 0 1 2 -1 +""", + OUT.getvalue(), + ) + ) + + def test_presolve_fixes_nl_exernal_function(self): + # This tests a workaround for a bug in the ASL where external + # functions with constant argument expressions are not + # evaluated correctly. + DLL = find_GSL() + if not DLL: + self.skipTest("Could not find the amplgsl.dll library") + + m = ConcreteModel() + m.hypot = ExternalFunction(library=DLL, function="gsl_hypot") + m.p = Param(initialize=1, mutable=True) + m.x = Var(bounds=(None, 3)) + m.y = Var(bounds=(3, None)) + m.z = Var(initialize=1) + m.o = Objective(expr=m.z**2 * m.hypot(m.p * m.x, m.p + m.y) ** 2) + m.c = Constraint(expr=m.x == m.y) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, + OUT, + symbolic_solver_labels=True, + linear_presolve=False, + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 #problem unknown + 3 1 1 0 1 #vars, constraints, objectives, ranges, eqns + 0 1 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 0 3 0 #nonlinear vars in constraints, objectives, both + 0 1 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 2 3 #nonzeros in Jacobian, obj. gradient + 1 1 #max name lengths: constraints, variables + 0 0 0 0 0 #common exprs: b,c,o,c1,o1 +F0 1 -1 gsl_hypot +C0 #c +n0 +O0 0 #o +o2 #* +o5 #^ +v0 #z +n2 +o5 #^ +f0 2 #hypot +v1 #x +o0 #+ +v2 #y +n1 +n2 +x1 #initial guess +0 1 #z +r #1 ranges (rhs's) +4 0 #c +b #3 bounds (on variables) +3 #z +1 3 #x +2 3 #y +k2 #intermediate Jacobian column lengths +0 +1 +J0 2 #c +1 1 +2 -1 +G0 3 #o +0 0 +1 0 +2 0 +""", + OUT.getvalue(), + ) + ) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, + OUT, + symbolic_solver_labels=True, + linear_presolve=True, + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 #problem unknown + 1 0 1 0 0 #vars, constraints, objectives, ranges, eqns + 0 1 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 0 1 0 #nonlinear vars in constraints, objectives, both + 0 1 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 0 1 #nonzeros in Jacobian, obj. gradient + 1 1 #max name lengths: constraints, variables + 0 0 0 0 0 #common exprs: b,c,o,c1,o1 +F0 1 -1 gsl_hypot +O0 0 #o +o2 #* +o5 #^ +v0 #z +n2 +o5 #^ +f0 2 #hypot +n3 +n4 +n2 +x1 #initial guess +0 1 #z +r #0 ranges (rhs's) +b #1 bounds (on variables) +3 #z +k0 #intermediate Jacobian column lengths +G0 1 #o +0 0 """, OUT.getvalue(), ) From 26c0e4bf3d13a9fbd707a5c7809ef7be7e680567 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 20 May 2024 16:27:49 -0600 Subject: [PATCH 031/113] NFC: apply black --- pyomo/repn/tests/ampl/test_nlv2.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 784a277c118..97c93ad1649 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2496,10 +2496,7 @@ def test_presolve_fixes_nl_exernal_function(self): OUT = io.StringIO() nl_writer.NLWriter().write( - m, - OUT, - symbolic_solver_labels=True, - linear_presolve=False, + m, OUT, symbolic_solver_labels=True, linear_presolve=False ) self.assertEqual( *nl_diff( @@ -2553,10 +2550,7 @@ def test_presolve_fixes_nl_exernal_function(self): OUT = io.StringIO() nl_writer.NLWriter().write( - m, - OUT, - symbolic_solver_labels=True, - linear_presolve=True, + m, OUT, symbolic_solver_labels=True, linear_presolve=True ) self.assertEqual( *nl_diff( From 5d13a38bd6d2f699412f447b85101e4351e0823b Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Tue, 21 May 2024 07:17:14 -0400 Subject: [PATCH 032/113] Removed bounds on FIM diagonal --- pyomo/contrib/doe/doe.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 02ac1908b5d..412f387f8b5 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1121,7 +1121,8 @@ def _add_objective(self, m): fim[i][j] = m.fim[bu, un].value # Set lower bound to ensure diagonal elements are (almost) non-negative - # m.fim[bu, un].setlb(-small_number) + # if i == j: + # m.fim[bu, un].setlb(-small_number) ### Initialize the Cholesky decomposition matrix if self.Cholesky_option and self.objective_option == ObjectiveLib.det: From d2b75f36f0dca7e6fc243719e3e7db542ca23dfd Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Tue, 21 May 2024 07:21:14 -0400 Subject: [PATCH 033/113] Added suggestion for future improvement --- pyomo/contrib/doe/doe.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 412f387f8b5..f5c053876bd 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1082,6 +1082,11 @@ def read_prior(m, i, j): model.regression_parameters, model.regression_parameters, rule=read_prior ) + # TODO: explore exploiting the symmetry of the FIM matrix + # The off-diagonal elements are symmetric, thus only half of the elements need to be calculated + # Syntax challenge: determine the order of p and q, i.e., if p > q, then replace with + # equality constraint fim[p, q] == fim[q, p] + def fim_rule(m, p, q): """ m: Pyomo model From f616aded6a98327ad057e517f6bad5cfef7bcffc Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:08:19 -0600 Subject: [PATCH 034/113] Remove subexpression_order from AMPLRepnVisitor --- pyomo/contrib/incidence_analysis/config.py | 2 -- pyomo/repn/plugins/nl_writer.py | 23 +++++++--------------- pyomo/repn/tests/ampl/test_nlv2.py | 2 -- 3 files changed, 7 insertions(+), 20 deletions(-) diff --git a/pyomo/contrib/incidence_analysis/config.py b/pyomo/contrib/incidence_analysis/config.py index 2a7734ba433..9fac48c8a26 100644 --- a/pyomo/contrib/incidence_analysis/config.py +++ b/pyomo/contrib/incidence_analysis/config.py @@ -130,7 +130,6 @@ def get_config_from_kwds(**kwds): and kwds.get("_ampl_repn_visitor", None) is None ): subexpression_cache = {} - subexpression_order = [] external_functions = {} var_map = {} used_named_expressions = set() @@ -143,7 +142,6 @@ def get_config_from_kwds(**kwds): amplvisitor = AMPLRepnVisitor( text_nl_template, subexpression_cache, - subexpression_order, external_functions, var_map, used_named_expressions, diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 331c45b78a5..04d8c42540f 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -544,7 +544,7 @@ def __init__(self, ostream, rowstream, colstream, config): else: self.template = text_nl_template self.subexpression_cache = {} - self.subexpression_order = [] + self.subexpression_order = None # set to [] later self.external_functions = {} self.used_named_expressions = set() self.var_map = {} @@ -553,7 +553,6 @@ def __init__(self, ostream, rowstream, colstream, config): self.visitor = AMPLRepnVisitor( self.template, self.subexpression_cache, - self.subexpression_order, self.external_functions, self.var_map, self.used_named_expressions, @@ -802,7 +801,7 @@ def write(self, model): # Filter out any unused named expressions self.subexpression_order = list( - filter(self.used_named_expressions.__contains__, self.subexpression_order) + filter(self.used_named_expressions.__contains__, self.subexpression_cache) ) # linear contribution by (constraint, objective, variable) component. @@ -824,10 +823,7 @@ def write(self, model): # We need to categorize the named subexpressions first so that # we know their linear / nonlinear vars when we encounter them # in constraints / objectives - self._categorize_vars( - map(self.subexpression_cache.__getitem__, self.subexpression_order), - linear_by_comp, - ) + self._categorize_vars(self.subexpression_cache.values(), linear_by_comp) n_subexpressions = self._count_subexpression_occurrences() obj_vars_linear, obj_vars_nonlinear, obj_nnz_by_var = self._categorize_vars( objectives, linear_by_comp @@ -2672,8 +2668,10 @@ def handle_named_expression_node(visitor, node, arg1): nl_info = list(expression_source) visitor.subexpression_cache[sub_id] = (sub_node, sub_repn, nl_info) # It is important that the NL subexpression comes before the - # main named expression: - visitor.subexpression_order.append(sub_id) + # main named expression: re-insert the original named + # expression (so that the nonlinear sub_node comes first + # when iterating over subexpression_cache) + visitor.subexpression_cache[_id] = visitor.subexpression_cache.pop(_id) else: nl_info = expression_source else: @@ -2716,11 +2714,6 @@ def handle_named_expression_node(visitor, node, arg1): else: return (_CONSTANT, repn.const) - # Defer recording this _id until after we know that this repn will - # not be directly substituted (and to ensure that the NL fragment is - # added to the order first). - visitor.subexpression_order.append(_id) - return (_GENERAL, repn.duplicate()) @@ -2989,7 +2982,6 @@ def __init__( self, template, subexpression_cache, - subexpression_order, external_functions, var_map, used_named_expressions, @@ -3000,7 +2992,6 @@ def __init__( super().__init__() self.template = template self.subexpression_cache = subexpression_cache - self.subexpression_order = subexpression_order self.external_functions = external_functions self.active_expression_source = None self.var_map = var_map diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 97c93ad1649..0aa9fab96f9 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -58,7 +58,6 @@ def __init__(self, symbolic=False): else: self.template = nl_writer.text_nl_template self.subexpression_cache = {} - self.subexpression_order = [] self.external_functions = {} self.var_map = {} self.used_named_expressions = set() @@ -67,7 +66,6 @@ def __init__(self, symbolic=False): self.visitor = nl_writer.AMPLRepnVisitor( self.template, self.subexpression_cache, - self.subexpression_order, self.external_functions, self.var_map, self.used_named_expressions, From b87325a7df44d222763e5ded71be0d8fede8aa9b Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:09:54 -0600 Subject: [PATCH 035/113] Revert some previous changes that were not necessary: because we work leaf-to-root, unnamed subexpressions will be handled correctly --- pyomo/repn/plugins/nl_writer.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 04d8c42540f..74a7251a1fe 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1644,9 +1644,6 @@ def _categorize_vars(self, comp_list, linear_by_comp): Count of the number of components that each var appears in. """ - subexpression_cache = self.subexpression_cache - used_named_expressions = self.used_named_expressions - var_map = self.var_map all_linear_vars = set() all_nonlinear_vars = set() nnz_by_var = {} @@ -1674,15 +1671,9 @@ def _categorize_vars(self, comp_list, linear_by_comp): # Process the nonlinear portion of this component if expr_info.nonlinear: nonlinear_vars = set() - _id_src = [expr_info.nonlinear[1]] - for _id in chain.from_iterable(_id_src): + for _id in expr_info.nonlinear[1]: if _id in nonlinear_vars: continue - if _id not in var_map and _id not in used_named_expressions: - _sub_info = subexpression_cache[_id][1].nonlinear - if _sub_info: - _id_src.append(_sub_info[1]) - continue if _id in linear_by_comp: nonlinear_vars.update(linear_by_comp[_id]) else: From 254ca0486965f9f02e0e6f37e342a0d56989e7a1 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:10:27 -0600 Subject: [PATCH 036/113] Avoid error cor constant AMPLRepn objects --- pyomo/repn/plugins/nl_writer.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 74a7251a1fe..93f0832acf0 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1691,12 +1691,13 @@ def _categorize_vars(self, comp_list, linear_by_comp): expr_info.linear = dict.fromkeys(nonlinear_vars, 0) all_nonlinear_vars.update(nonlinear_vars) - # Update the count of components that each variable appears in - for v in expr_info.linear: - if v in nnz_by_var: - nnz_by_var[v] += 1 - else: - nnz_by_var[v] = 1 + if expr_info.linear: + # Update the count of components that each variable appears in + for v in expr_info.linear: + if v in nnz_by_var: + nnz_by_var[v] += 1 + else: + nnz_by_var[v] = 1 # Record all nonzero variable ids for this component linear_by_comp[id(comp_info[0])] = expr_info.linear # Linear models (or objectives) are common. Avoid the set From 0fecf80f6c0a6ee170413d5a41211025be39ef99 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:10:55 -0600 Subject: [PATCH 037/113] Resolve potential information leak when duplicating AMPLRepn --- pyomo/repn/plugins/nl_writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 93f0832acf0..b0fb7099894 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -2077,7 +2077,7 @@ def duplicate(self): ans.const = self.const ans.linear = None if self.linear is None else dict(self.linear) ans.nonlinear = self.nonlinear - ans.named_exprs = self.named_exprs + ans.named_exprs = None if self.named_exprs is None else set(self.named_exprs) return ans def compile_repn(self, visitor, prefix='', args=None, named_exprs=None): From 7a9a83eb6e808a4ca437dad51ac46108114de295 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:11:24 -0600 Subject: [PATCH 038/113] Additional error checking when substituting simple named expressions --- pyomo/repn/plugins/nl_writer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index b0fb7099894..5344348d0a0 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -2702,7 +2702,8 @@ def handle_named_expression_node(visitor, node, arg1): if expression_source[2]: if repn.linear: - return (_MONOMIAL, next(iter(repn.linear)), 1) + assert len(repn.linear) == 1 and not repn.const + return (_MONOMIAL,) + next(iter(repn.linear.items())) else: return (_CONSTANT, repn.const) From 69b7d3011f23aedf3631e8c46a4438e97ee90cba Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:11:56 -0600 Subject: [PATCH 039/113] Resolve error when collecting named expressions used in external functions --- pyomo/repn/plugins/nl_writer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 5344348d0a0..e174ce796c6 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -2747,10 +2747,10 @@ def handle_external_function_node(visitor, node, *args): comment, ) arg_ids = [] + named_exprs = set() for arg in args: _id = id(arg) arg_ids.append(_id) - named_exprs = set() visitor.subexpression_cache[_id] = ( arg, AMPLRepn( @@ -2762,8 +2762,8 @@ def handle_external_function_node(visitor, node, *args): ), (None, None, True), ) - if not named_exprs: - named_exprs = None + if not named_exprs: + named_exprs = None return ( _GENERAL, AMPLRepn(0, None, (nl + '%s' * len(arg_ids), arg_ids, named_exprs)), From 0ceedc61ae67589866694e1b09527c4570196dd2 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 21 May 2024 14:12:44 -0600 Subject: [PATCH 040/113] Track changes in AMPLRepnVisitor (handling of external functions) --- pyomo/contrib/incidence_analysis/incidence.py | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pyomo/contrib/incidence_analysis/incidence.py b/pyomo/contrib/incidence_analysis/incidence.py index 96cbf77c47d..030ee2b0f79 100644 --- a/pyomo/contrib/incidence_analysis/incidence.py +++ b/pyomo/contrib/incidence_analysis/incidence.py @@ -83,6 +83,17 @@ def _get_incident_via_standard_repn( def _get_incident_via_ampl_repn(expr, linear_only, visitor): + def _nonlinear_var_id_collector(idlist): + for _id in idlist: + if _id in visitor.subexpression_cache: + info = visitor.subexpression_cache[_id][1] + if info.nonlinear: + yield from _nonlinear_var_id_collector(info.nonlinear[1]) + if info.linear: + yield from _nonlinear_var_id_collector(info.linear) + else: + yield _id + var_map = visitor.var_map orig_activevisitor = AMPLRepn.ActiveVisitor AMPLRepn.ActiveVisitor = visitor @@ -91,13 +102,13 @@ def _get_incident_via_ampl_repn(expr, linear_only, visitor): finally: AMPLRepn.ActiveVisitor = orig_activevisitor - nonlinear_var_ids = [] if repn.nonlinear is None else repn.nonlinear[1] nonlinear_var_id_set = set() unique_nonlinear_var_ids = [] - for v_id in nonlinear_var_ids: - if v_id not in nonlinear_var_id_set: - nonlinear_var_id_set.add(v_id) - unique_nonlinear_var_ids.append(v_id) + if repn.nonlinear: + for v_id in _nonlinear_var_id_collector(repn.nonlinear[1]): + if v_id not in nonlinear_var_id_set: + nonlinear_var_id_set.add(v_id) + unique_nonlinear_var_ids.append(v_id) nonlinear_vars = [var_map[v_id] for v_id in unique_nonlinear_var_ids] linear_only_vars = [ From 6be83b57981238ba94c81e4582819e340b7fbdc6 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 22 May 2024 22:00:30 -0600 Subject: [PATCH 041/113] NFC: clarify comment wrt string args --- pyomo/repn/plugins/nl_writer.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index e174ce796c6..fa8a49c345b 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -2714,9 +2714,12 @@ def handle_external_function_node(visitor, node, *args): func = node._fcn._function # There is a special case for external functions: these are the only # expressions that can accept string arguments. As we currently pass - # these as 'precompiled' general NL fragments, the normal trap for - # constant subexpressions will miss constant external function calls - # that contain strings. We will catch that case here. + # these as 'precompiled' GENERAL AMPLRepns, the normal trap for + # constant subexpressions will miss string arguments. We will catch + # that case here by looking for NL fragments with no variable + # references. Note that the NL fragment is NOT the raw string + # argument that we want to evaluate: the raw string is in the + # `const` field. if all( arg[0] is _CONSTANT or (arg[0] is _GENERAL and arg[1].nl and not arg[1].nl[1]) for arg in args From 04313dcd8a990880287f6350e22ad033ec49850d Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 22 May 2024 22:00:55 -0600 Subject: [PATCH 042/113] fix detection of variables replaces with expressions --- pyomo/repn/plugins/nl_writer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index fa8a49c345b..0a6f9f3da30 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1924,7 +1924,10 @@ def _linear_presolve( if not info.nonlinear: continue nl, args = info.nonlinear - if not args or any(vid not in eliminated_vars for vid in args): + if not args or any( + vid not in eliminated_vars or eliminated_vars[vid].linear + for vid in args + ): continue # Ideally, we would just evaluate the named expression. # However, there might be a linear portion of the named From c85d38cfedac6393b407abc0a160f0a6c1d648bf Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 09:10:06 -0400 Subject: [PATCH 043/113] Implemented most suggestions from JS. Still need to debug failing test and some string manipulation logic. --- pyomo/contrib/doe/doe.py | 49 ++++++++++++++++++------------- pyomo/contrib/doe/measurements.py | 3 ++ 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index f5c053876bd..0c37364642b 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -70,7 +70,7 @@ def __init__( solver=None, prior_FIM=None, discretize_model=None, - args={}, + args=None, logger_level=logging.INFO, ): """ @@ -103,7 +103,7 @@ def __init__( args: Additional arguments for the create_model function. logger_level: - Specify the level of the logger. Changer to logging.DEBUG for all messages. + Specify the level of the logger. Change to logging.DEBUG for all messages. """ # parameters @@ -114,6 +114,9 @@ def __init__( self.design_name = design_vars.variable_names self.design_vars = design_vars self.create_model = create_model + + if args is None: + args = {} self.args = args # create the measurement information object @@ -145,7 +148,7 @@ def _check_inputs(self): """ Check if the prior FIM is N*N matrix, where N is the number of parameter """ - if type(self.prior_FIM) != type(None): + if self.prior_FIM is not None: if np.shape(self.prior_FIM)[0] != np.shape(self.prior_FIM)[1]: raise ValueError('Found wrong prior information matrix shape.') elif np.shape(self.prior_FIM)[0] != len(self.param): @@ -980,10 +983,12 @@ def initialize_jac(m, i, j): ) if self.fim_initial is not None: - dict_fim_initialize = {} - for i, bu in enumerate(model.regression_parameters): - for j, un in enumerate(model.regression_parameters): - dict_fim_initialize[(bu, un)] = self.fim_initial[i][j] + dict_fim_initialize = { + (bu, un): self.fim_initial[i][j] + for i, bu in enumerate(model.regression_parameters) + for j, un in enumerate(model.regression_parameters) + + } def initialize_fim(m, j, d): return dict_fim_initialize[(j, d)] @@ -1005,13 +1010,12 @@ def initialize_fim(m, j, d): if self.Cholesky_option and self.objective_option == ObjectiveLib.det: # move the L matrix initial point to a dictionary - if type(self.L_initial) != type(None): - dict_cho = {} - # Loop over rows - for i, bu in enumerate(model.regression_parameters): - # Loop over columns - for j, un in enumerate(model.regression_parameters): - dict_cho[(bu, un)] = self.L_initial[i][j] + if self.L_initial is not None: + dict_cho = { + (bu, un): self.L_initial[i][j] + for i, bu in enumerate(model.regression_parameters) + for j, un in enumerate(model.regression_parameters) + } # use the L dictionary to initialize L matrix def init_cho(m, i, j): @@ -1019,7 +1023,7 @@ def init_cho(m, i, j): # Define elements of Cholesky decomposition matrix as Pyomo variables and either # Initialize with L in L_initial - if type(self.L_initial) != type(None): + if self.L_initial is not None: model.L_ele = pyo.Var( model.regression_parameters, model.regression_parameters, @@ -1070,10 +1074,11 @@ def jacobian_rule(m, p, n): # A constraint to calculate elements in Hessian matrix # transfer prior FIM to be Expressions - fim_initial_dict = {} - for i, bu in enumerate(model.regression_parameters): - for j, un in enumerate(model.regression_parameters): - fim_initial_dict[(bu, un)] = self.prior_FIM[i][j] + fim_initial_dict = { + (bu, un): self.prior_FIM[i][j] + for i, bu in enumerate(model.regression_parameters) + for j, un in enumerate(model.regression_parameters) + } def read_prior(m, i, j): return fim_initial_dict[(i, j)] @@ -1119,6 +1124,10 @@ def _add_objective(self, m): small_number = 1e-10 # Assemble the FIM matrix. This is helpful for initialization! + # + # Suggestion from JS: "It might be more efficient to form the NP array in one shot + # (from a list or using fromiter), and then reshaping to the 2-D matrix" + # fim = np.zeros((len(self.param), len(self.param))) for i, bu in enumerate(m.regression_parameters): for j, un in enumerate(m.regression_parameters): @@ -1228,7 +1237,7 @@ def det_general(m): m.Obj = pyo.Objective(expr=0) else: # something went wrong! - raise ValueError( + raise DeveloperError( "Objective option not recognized. Please contact the developers as you should not see this error." ) diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index 229fb7f7830..ff40aa6143f 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -148,6 +148,9 @@ def _generate_variable_names_with_indices( # iterate over index combinations ["CA", 1], ["CA", 2], ..., ["CC", 2], ["CC", 3] for index_instance in all_variable_indices: var_name_index_string = var_name + # + # Suggestion from JS: "Can you re-use name_repr and index_repr from pyomo.core.base.component_namer here?" + # for i, idx in enumerate(index_instance): # if i is the first index, open the [] if i == 0: From 459b593b8790945d2fd552a49f05ad20c7232d5b Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 09:52:32 -0400 Subject: [PATCH 044/113] Added more tests. --- pyomo/contrib/doe/measurements.py | 3 +- pyomo/contrib/doe/tests/test_fim_doe.py | 124 ++++++++++++++++++++++-- 2 files changed, 119 insertions(+), 8 deletions(-) diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index ff40aa6143f..1b47c78c65c 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -183,7 +183,8 @@ def _check_valid_input( """ Check if the measurement information provided are valid to use. """ - assert isinstance(var_name, str), "var_name should be a string." + if not isinstance(var_name, str): + raise TypeError("Variable name must be a string.") # debugging note: what is an integer versus a list versus a dictionary here? # check if time_index_position is in indices diff --git a/pyomo/contrib/doe/tests/test_fim_doe.py b/pyomo/contrib/doe/tests/test_fim_doe.py index 31d250f0d10..a41ad552228 100644 --- a/pyomo/contrib/doe/tests/test_fim_doe.py +++ b/pyomo/contrib/doe/tests/test_fim_doe.py @@ -38,18 +38,128 @@ class TestMeasurementError(unittest.TestCase): - def test(self): - t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + + def test_with_time_plus_one_extra_index(self): + """ This tests confirms the typical usage with a time index plus one extra index. + + This test should execute without throwing any errors. + + """ + + MeasurementVariables().add_variables( + "C", + indices={0: ['A', 'B', 'C'], 1: [0, 0.5, 1.0]}, + time_index_position=1 + ) + + def test_with_time_plus_two_extra_indices(self): + """ This tests confirms the typical usage with a time index plus two extra indices. + + This test should execute without throwing any errors. + + """ + + MeasurementVariables().add_variables( + "C", + indices={0: ['A', 'B', 'C'], # species + 1: [0, 0.5, 1.0], # time + 2: [1, 2, 3]}, # position + time_index_position=1 + ) + + def test_time_index_position_out_of_bounds(self): + """ This test confirms that an error is thrown when the time index position is out of bounds. + + """ + + # if time index is not in indices, an value error is thrown. + with self.assertRaises(ValueError): + MeasurementVariables().add_variables( + "C", + indices={0: ['CA', 'CB', 'CC'], # species + 1: [0, 0.5, 1.0],}, # time + time_index_position=2 # this is out of bounds + ) + + def test_single_measurement_variable(self): + """ This test confirms we can specify a single measurement variable without + specifying the indices. + + The test should execute with no errors. + """ + measurements = MeasurementVariables() + measurements.add_variables( + "HelloWorld", + indices=None, + time_index_position=None) + + def test_without_time_index(self): + """ This test confirms we can add a measurement variable without specifying the time index. + + The test should execute with no errors. + + """ variable_name = "C" - indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + indices = {0: ['CA', 'CB', 'CC']} # specify the indices + # no time index + # measurement object measurements = MeasurementVariables() - # if time index is not in indices, an value error is thrown. - with self.assertRaises(ValueError): - measurements.add_variables( - variable_name, indices=indices, time_index_position=2 + measurements.add_variables( + variable_name, indices=indices, time_index_position=None ) + + def test_only_time_index(self): + """ This test confirms we can add a measurement variable without specifying the variable name. + + The test should execute with no errors. + + """ + + MeasurementVariables().add_variables( + "HelloWorld", # name of the variable + indices={0: [0, 0.5, 1.0]}, + time_index_position=0 + ) + + def test_with_no_measurement_name(self): + """ This test confirms that an error is thrown when None is used as the measurement name. + """ + + with self.assertRaises(TypeError): + MeasurementVariables().add_variables( + None, + indices={0: [0, 0.5, 1.0]}, + time_index_position=0 + ) + + def test_with_non_string_measurement_name(self): + """ This test confirms that an error is thrown when a non-string is used as the measurement name. + + """ + + with self.assertRaises(TypeError): + MeasurementVariables().add_variables( + 1, + indices={0: [0, 0.5, 1.0]}, + time_index_position=0 + ) + + def test_non_integer_index_keys(self): + """ This test confirms that strings can be used as keys for specifying the indices. + + Warning: it is possible this usage breaks something else in Pyomo.DoE. + There may be an implicit assumption that the order of the keys must match the order + of the indices in the Pyomo model. + + """ + + MeasurementVariables().add_variables( + "C", + indices={"species": ['CA', 'CB', 'CC'], "time": [0, 0.5, 1.0]}, + time_index_position="time" + ) class TestDesignError(unittest.TestCase): def test(self): From a1667ad79e9e12131a5fe76c49b9fa08f2fc4167 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 10:43:50 -0400 Subject: [PATCH 045/113] Fixed mistake in test logic. --- pyomo/contrib/doe/measurements.py | 10 +++++----- pyomo/contrib/doe/tests/test_fim_doe.py | 11 ++++------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index 1b47c78c65c..31a9dc19dbb 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -190,13 +190,13 @@ def _check_valid_input( # check if time_index_position is in indices if ( indices is not None # ensure not None - and time_index_position is None # ensure not None + and time_index_position is not None # ensure not None and time_index_position - not in indices # ensure time_index_position is in indices + not in indices.keys() # ensure time_index_position is in indices ): raise ValueError("time index cannot be found in indices.") - # if given a list, check if bounds have the same length with flattened variable + # if given a list, check if values have the same length with flattened variable if ( values is not None # ensure not None and not type(values) @@ -212,7 +212,7 @@ def _check_valid_input( and isinstance(lower_bounds, collections.abc.Sequence) # ensure list-like and len(lower_bounds) != len_indices # ensure same length ): - raise ValueError("Lowerbounds is of different length with indices.") + raise ValueError("Lowerbounds have a different length with indices.") if ( upper_bounds is not None # ensure not None @@ -221,7 +221,7 @@ def _check_valid_input( and isinstance(upper_bounds, collections.abc.Sequence) # ensure list-like and len(upper_bounds) != len_indices # ensure same length ): - raise ValueError("Upperbounds is of different length with indices.") + raise ValueError("Upperbounds have a different length with indices.") class MeasurementVariables(VariablesWithIndices): diff --git a/pyomo/contrib/doe/tests/test_fim_doe.py b/pyomo/contrib/doe/tests/test_fim_doe.py index a41ad552228..d2431f7bd45 100644 --- a/pyomo/contrib/doe/tests/test_fim_doe.py +++ b/pyomo/contrib/doe/tests/test_fim_doe.py @@ -99,14 +99,11 @@ def test_without_time_index(self): The test should execute with no errors. """ - variable_name = "C" - indices = {0: ['CA', 'CB', 'CC']} # specify the indices - # no time index - # measurement object - measurements = MeasurementVariables() - measurements.add_variables( - variable_name, indices=indices, time_index_position=None + MeasurementVariables().add_variables( + "C", + indices= {0: ['CA', 'CB', 'CC']}, # species as only index + time_index_position=None # no time index ) def test_only_time_index(self): From 62af537940e163e52863e9f0954308263dbb8b7a Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 10:53:09 -0400 Subject: [PATCH 046/113] Implemented caching suggestion from JS; ran black. --- pyomo/contrib/doe/doe.py | 68 ++--- pyomo/contrib/doe/result.py | 174 ++++++------- pyomo/contrib/doe/scenario.py | 2 +- pyomo/contrib/doe/tests/test_example.py | 2 +- pyomo/contrib/doe/tests/test_fim_doe.py | 237 +++++++++--------- .../contrib/doe/tests/test_reactor_example.py | 12 +- 6 files changed, 245 insertions(+), 250 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 0c37364642b..9356cce360b 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -114,7 +114,7 @@ def __init__( self.design_name = design_vars.variable_names self.design_vars = design_vars self.create_model = create_model - + if args is None: args = {} self.args = args @@ -150,9 +150,9 @@ def _check_inputs(self): """ if self.prior_FIM is not None: if np.shape(self.prior_FIM)[0] != np.shape(self.prior_FIM)[1]: - raise ValueError('Found wrong prior information matrix shape.') + raise ValueError("Found wrong prior information matrix shape.") elif np.shape(self.prior_FIM)[0] != len(self.param): - raise ValueError('Found wrong prior information matrix shape.') + raise ValueError("Found wrong prior information matrix shape.") def stochastic_program( self, @@ -411,7 +411,7 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): # if measurements are provided if read_output: - with open(read_output, 'rb') as f: + with open(read_output, "rb") as f: output_record = pickle.load(f) f.close() jac = self._finite_calculation(output_record) @@ -430,7 +430,7 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): self._square_model_from_compute_FIM = mod if extract_single_model: - mod_name = store_output + '.csv' + mod_name = store_output + ".csv" dataframe = extract_single_model(mod, square_result) dataframe.to_csv(mod_name) @@ -452,10 +452,10 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): output_record[s] = output_iter - output_record['design'] = self.design_values + output_record["design"] = self.design_values if store_output: - f = open(store_output, 'wb') + f = open(store_output, "wb") pickle.dump(output_record, f) f.close() @@ -537,7 +537,7 @@ def _direct_kaug(self): dsdp_extract.append(dsdp_array[kaug_no]) except: # k_aug does not provide value for fixed variables - self.logger.debug('The variable is fixed: %s', mname) + self.logger.debug("The variable is fixed: %s", mname) # produce the sensitivity for fixed variables zero_sens = np.zeros(len(self.param)) # for fixed variables, the sensitivity are a zero vector @@ -611,7 +611,7 @@ def _create_block(self): # Determine if create_model takes theta as an optional input pass_theta_to_initialize = ( - 'theta' in inspect.getfullargspec(self.create_model).args + "theta" in inspect.getfullargspec(self.create_model).args ) # Allow user to self-define complex design variables @@ -635,11 +635,16 @@ def block_build(b, s): theta_initialize = self.scenario_data.scenario[s] # Add model on block with theta values self.create_model( - mod=b, model_option=ModelOptionLib.stage2, theta=theta_initialize, **self.args, + mod=b, + model_option=ModelOptionLib.stage2, + theta=theta_initialize, + **self.args, ) else: # Otherwise add model on block without theta values - self.create_model(mod=b, model_option=ModelOptionLib.stage2, **self.args) + self.create_model( + mod=b, model_option=ModelOptionLib.stage2, **self.args + ) # fix parameter values to perturbed values for par in self.param: @@ -831,27 +836,31 @@ def run_grid_search( # generate the design variable dictionary needed for running compute_FIM # first copy value from design_values design_iter = self.design_vars.variable_names_value.copy() + + # convert to a list and cache + list_design_set_iter = list(design_set_iter) + # update the controlled value of certain time points for certain design variables for i, names in enumerate(design_dimension_names): if isinstance(names, str): # if 'names' is simply a string, copy the new value - design_iter[names] = list(design_set_iter)[i] + design_iter[names] = list_design_set_iter[i] elif isinstance(names, collections.abc.Sequence): # if the element is a list, all design variables in this list share the same values for n in names: - design_iter[n] = list(design_set_iter)[i] + design_iter[n] = list_design_set_iter[i] else: # otherwise just copy the value # design_iter[names] = list(design_set_iter)[i] raise NotImplementedError( - 'You should not see this error message. Please report it to the Pyomo.DoE developers.' + "You should not see this error message. Please report it to the Pyomo.DoE developers." ) self.design_vars.variable_names_value = design_iter iter_timer = TicTocTimer() - self.logger.info('=======Iteration Number: %s =====', count + 1) + self.logger.info("=======Iteration Number: %s =====", count + 1) self.logger.debug( - 'Design variable values of this iteration: %s', design_iter + "Design variable values of this iteration: %s", design_iter ) iter_timer.tic(msg=None) # generate store name @@ -887,12 +896,12 @@ def run_grid_search( time_set.append(iter_t) # give run information at each iteration - self.logger.info('This is run %s out of %s.', count, total_count) + self.logger.info("This is run %s out of %s.", count, total_count) self.logger.info( - 'The code has run %s seconds.', round(sum(time_set), 2) + "The code has run %s seconds.", round(sum(time_set), 2) ) self.logger.info( - 'Estimated remaining time: %s seconds', + "Estimated remaining time: %s seconds", round( sum(time_set) / (count) * (total_count - count), 2 ), # need to check this math... it gives a negative number for the final count @@ -907,11 +916,11 @@ def run_grid_search( except: self.logger.warning( - ':::::::::::Warning: Cannot converge this run.::::::::::::' + ":::::::::::Warning: Cannot converge this run.::::::::::::" ) count += 1 failed_count += 1 - self.logger.warning('failed count:', failed_count) + self.logger.warning("failed count:", failed_count) result_combine[tuple(design_set_iter)] = None # For user's access @@ -925,7 +934,7 @@ def run_grid_search( store_optimality_name=store_optimality_as_csv, ) - self.logger.info('Overall wall clock time [s]: %s', sum(time_set)) + self.logger.info("Overall wall clock time [s]: %s", sum(time_set)) return figure_draw_object @@ -987,7 +996,6 @@ def initialize_jac(m, i, j): (bu, un): self.fim_initial[i][j] for i, bu in enumerate(model.regression_parameters) for j, un in enumerate(model.regression_parameters) - } def initialize_fim(m, j, d): @@ -1012,7 +1020,7 @@ def initialize_fim(m, j, d): # move the L matrix initial point to a dictionary if self.L_initial is not None: dict_cho = { - (bu, un): self.L_initial[i][j] + (bu, un): self.L_initial[i][j] for i, bu in enumerate(model.regression_parameters) for j, un in enumerate(model.regression_parameters) } @@ -1124,8 +1132,8 @@ def _add_objective(self, m): small_number = 1e-10 # Assemble the FIM matrix. This is helpful for initialization! - # - # Suggestion from JS: "It might be more efficient to form the NP array in one shot + # + # Suggestion from JS: "It might be more efficient to form the NP array in one shot # (from a list or using fromiter), and then reshaping to the 2-D matrix" # fim = np.zeros((len(self.param), len(self.param))) @@ -1279,10 +1287,10 @@ def _fix_design(self, m, design_val, fix_opt=True, optimize_option=None): def _get_default_ipopt_solver(self): """Default solver""" - solver = SolverFactory('ipopt') - solver.options['linear_solver'] = 'ma57' - solver.options['halt_on_ampl_error'] = 'yes' - solver.options['max_iter'] = 3000 + solver = SolverFactory("ipopt") + solver.options["linear_solver"] = "ma57" + solver.options["halt_on_ampl_error"] = "yes" + solver.options["max_iter"] = 3000 return solver def _solve_doe(self, m, fix=False, opt_option=None): diff --git a/pyomo/contrib/doe/result.py b/pyomo/contrib/doe/result.py index d8ed343352a..f7145ae2a46 100644 --- a/pyomo/contrib/doe/result.py +++ b/pyomo/contrib/doe/result.py @@ -123,9 +123,9 @@ def result_analysis(self, result=None): if self.prior_FIM is not None: try: fim = fim + self.prior_FIM - self.logger.info('Existed information has been added.') + self.logger.info("Existed information has been added.") except: - raise ValueError('Check the shape of prior FIM.') + raise ValueError("Check the shape of prior FIM.") if np.linalg.cond(fim) > self.max_condition_number: self.logger.info( @@ -133,7 +133,7 @@ def result_analysis(self, result=None): np.linalg.cond(fim), ) self.logger.info( - 'A condition number bigger than %s is considered near singular.', + "A condition number bigger than %s is considered near singular.", self.max_condition_number, ) @@ -239,10 +239,10 @@ def _print_FIM_info(self, FIM): self.eig_vecs = np.linalg.eig(FIM)[1] self.logger.info( - 'FIM: %s; \n Trace: %s; \n Determinant: %s;', self.FIM, self.trace, self.det + "FIM: %s; \n Trace: %s; \n Determinant: %s;", self.FIM, self.trace, self.det ) self.logger.info( - 'Condition number: %s; \n Min eigenvalue: %s.', self.cond, self.min_eig + "Condition number: %s; \n Min eigenvalue: %s.", self.cond, self.min_eig ) def _solution_info(self, m, dv_set): @@ -268,11 +268,11 @@ def _solution_info(self, m, dv_set): # When scaled with constant values, the effect of the scaling factors are removed here # For determinant, the scaling factor to determinant is scaling factor ** (Dim of FIM) # For trace, the scaling factor to trace is the scaling factor. - if self.obj == 'det': + if self.obj == "det": self.obj_det = np.exp(value(m.obj)) / (self.fim_scale_constant_value) ** ( len(self.parameter_names) ) - elif self.obj == 'trace': + elif self.obj == "trace": self.obj_trace = np.exp(value(m.obj)) / (self.fim_scale_constant_value) design_variable_names = list(dv_set.keys()) @@ -314,11 +314,11 @@ def _get_solver_info(self): if (self.result.solver.status == SolverStatus.ok) and ( self.result.solver.termination_condition == TerminationCondition.optimal ): - self.status = 'converged' + self.status = "converged" elif ( self.result.solver.termination_condition == TerminationCondition.infeasible ): - self.status = 'infeasible' + self.status = "infeasible" else: self.status = self.result.solver.status @@ -399,10 +399,10 @@ def extract_criteria(self): column_names.append(i) # Each design criteria has a column to store values - column_names.append('A') - column_names.append('D') - column_names.append('E') - column_names.append('ME') + column_names.append("A") + column_names.append("D") + column_names.append("E") + column_names.append("ME") # generate the dataframe store_all_results = np.asarray(store_all_results) self.store_all_results_dataframe = pd.DataFrame( @@ -458,7 +458,7 @@ def figure_drawing( self.design_names ): raise ValueError( - 'Error: All dimensions except for those the figures are drawn by should be fixed.' + "Error: All dimensions except for those the figures are drawn by should be fixed." ) if len(self.sensitivity_dimension) not in [1, 2]: @@ -467,15 +467,15 @@ def figure_drawing( # generate a combination of logic sentences to filter the results of the DOF needed. # an example filter: (self.store_all_results_dataframe["CA0"]==5). if len(self.fixed_design_names) != 0: - filter = '' + filter = "" for i in range(len(self.fixed_design_names)): - filter += '(self.store_all_results_dataframe[' + filter += "(self.store_all_results_dataframe[" filter += str(self.fixed_design_names[i]) - filter += ']==' + filter += "]==" filter += str(self.fixed_design_values[i]) - filter += ')' + filter += ")" if i != (len(self.fixed_design_names) - 1): - filter += '&' + filter += "&" # extract results with other dimensions fixed figure_result_data = self.store_all_results_dataframe.loc[eval(filter)] # if there is no other fixed dimensions @@ -526,78 +526,78 @@ def _curve1D( # decide if the results are log scaled if log_scale: - y_range_A = np.log10(self.figure_result_data['A'].values.tolist()) - y_range_D = np.log10(self.figure_result_data['D'].values.tolist()) - y_range_E = np.log10(self.figure_result_data['E'].values.tolist()) - y_range_ME = np.log10(self.figure_result_data['ME'].values.tolist()) + y_range_A = np.log10(self.figure_result_data["A"].values.tolist()) + y_range_D = np.log10(self.figure_result_data["D"].values.tolist()) + y_range_E = np.log10(self.figure_result_data["E"].values.tolist()) + y_range_ME = np.log10(self.figure_result_data["ME"].values.tolist()) else: - y_range_A = self.figure_result_data['A'].values.tolist() - y_range_D = self.figure_result_data['D'].values.tolist() - y_range_E = self.figure_result_data['E'].values.tolist() - y_range_ME = self.figure_result_data['ME'].values.tolist() + y_range_A = self.figure_result_data["A"].values.tolist() + y_range_D = self.figure_result_data["D"].values.tolist() + y_range_E = self.figure_result_data["E"].values.tolist() + y_range_ME = self.figure_result_data["ME"].values.tolist() # Draw A-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} # plt.rcParams.update(params) ax.plot(x_range, y_range_A) ax.scatter(x_range, y_range_A) - ax.set_ylabel('$log_{10}$ Trace') + ax.set_ylabel("$log_{10}$ Trace") ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ': A-optimality') + plt.pyplot.title(title_text + ": A-optimality") plt.pyplot.show() # Draw D-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} # plt.rcParams.update(params) ax.plot(x_range, y_range_D) ax.scatter(x_range, y_range_D) - ax.set_ylabel('$log_{10}$ Determinant') + ax.set_ylabel("$log_{10}$ Determinant") ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ': D-optimality') + plt.pyplot.title(title_text + ": D-optimality") plt.pyplot.show() # Draw E-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} # plt.rcParams.update(params) ax.plot(x_range, y_range_E) ax.scatter(x_range, y_range_E) - ax.set_ylabel('$log_{10}$ Minimal eigenvalue') + ax.set_ylabel("$log_{10}$ Minimal eigenvalue") ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ': E-optimality') + plt.pyplot.title(title_text + ": E-optimality") plt.pyplot.show() # Draw Modified E-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} # plt.rcParams.update(params) ax.plot(x_range, y_range_ME) ax.scatter(x_range, y_range_ME) - ax.set_ylabel('$log_{10}$ Condition number') + ax.set_ylabel("$log_{10}$ Condition number") ax.set_xlabel(xlabel_text) - plt.pyplot.title(title_text + ': Modified E-optimality') + plt.pyplot.title(title_text + ": Modified E-optimality") plt.pyplot.show() def _heatmap( @@ -641,10 +641,10 @@ def _heatmap( y_range = sensitivity_dict[self.sensitivity_dimension[1]] # extract the design criteria values - A_range = self.figure_result_data['A'].values.tolist() - D_range = self.figure_result_data['D'].values.tolist() - E_range = self.figure_result_data['E'].values.tolist() - ME_range = self.figure_result_data['ME'].values.tolist() + A_range = self.figure_result_data["A"].values.tolist() + D_range = self.figure_result_data["D"].values.tolist() + E_range = self.figure_result_data["E"].values.tolist() + ME_range = self.figure_result_data["ME"].values.tolist() # reshape the design criteria values for heatmaps cri_a = np.asarray(A_range).reshape(len(x_range), len(y_range)) @@ -675,12 +675,12 @@ def _heatmap( # A-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} plt.pyplot.rcParams.update(params) ax.set_yticks(range(len(yLabel))) ax.set_yticklabels(yLabel) @@ -690,18 +690,18 @@ def _heatmap( ax.set_xlabel(xlabel_text) im = ax.imshow(hes_a.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) - ba.set_label('log10(trace(FIM))') - plt.pyplot.title(title_text + ': A-optimality') + ba.set_label("log10(trace(FIM))") + plt.pyplot.title(title_text + ": A-optimality") plt.pyplot.show() # D-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} plt.pyplot.rcParams.update(params) ax.set_yticks(range(len(yLabel))) ax.set_yticklabels(yLabel) @@ -711,18 +711,18 @@ def _heatmap( ax.set_xlabel(xlabel_text) im = ax.imshow(hes_d.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) - ba.set_label('log10(det(FIM))') - plt.pyplot.title(title_text + ': D-optimality') + ba.set_label("log10(det(FIM))") + plt.pyplot.title(title_text + ": D-optimality") plt.pyplot.show() # E-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} plt.pyplot.rcParams.update(params) ax.set_yticks(range(len(yLabel))) ax.set_yticklabels(yLabel) @@ -732,18 +732,18 @@ def _heatmap( ax.set_xlabel(xlabel_text) im = ax.imshow(hes_e.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) - ba.set_label('log10(minimal eig(FIM))') - plt.pyplot.title(title_text + ': E-optimality') + ba.set_label("log10(minimal eig(FIM))") + plt.pyplot.title(title_text + ": E-optimality") plt.pyplot.show() # modified E-optimality fig = plt.pyplot.figure() - plt.pyplot.rc('axes', titlesize=font_axes) - plt.pyplot.rc('axes', labelsize=font_axes) - plt.pyplot.rc('xtick', labelsize=font_tick) - plt.pyplot.rc('ytick', labelsize=font_tick) + plt.pyplot.rc("axes", titlesize=font_axes) + plt.pyplot.rc("axes", labelsize=font_axes) + plt.pyplot.rc("xtick", labelsize=font_tick) + plt.pyplot.rc("ytick", labelsize=font_tick) ax = fig.add_subplot(111) - params = {'mathtext.default': 'regular'} + params = {"mathtext.default": "regular"} plt.pyplot.rcParams.update(params) ax.set_yticks(range(len(yLabel))) ax.set_yticklabels(yLabel) @@ -753,6 +753,6 @@ def _heatmap( ax.set_xlabel(xlabel_text) im = ax.imshow(hes_e2.T, cmap=plt.pyplot.cm.hot_r) ba = plt.pyplot.colorbar(im) - ba.set_label('log10(cond(FIM))') - plt.pyplot.title(title_text + ': Modified E-optimality') + ba.set_label("log10(cond(FIM))") + plt.pyplot.title(title_text + ": Modified E-optimality") plt.pyplot.show() diff --git a/pyomo/contrib/doe/scenario.py b/pyomo/contrib/doe/scenario.py index 6c6f5ef7d1b..b44ce1ab4d3 100644 --- a/pyomo/contrib/doe/scenario.py +++ b/pyomo/contrib/doe/scenario.py @@ -150,5 +150,5 @@ def generate_scenario(self): # store scenario if self.store: - with open('scenario_simultaneous.pickle', 'wb') as f: + with open("scenario_simultaneous.pickle", "wb") as f: pickle.dump(self.scenario_data, f) diff --git a/pyomo/contrib/doe/tests/test_example.py b/pyomo/contrib/doe/tests/test_example.py index b59014a8110..8153e07018a 100644 --- a/pyomo/contrib/doe/tests/test_example.py +++ b/pyomo/contrib/doe/tests/test_example.py @@ -38,7 +38,7 @@ from pyomo.opt import SolverFactory -ipopt_available = SolverFactory('ipopt').available() +ipopt_available = SolverFactory("ipopt").available() class TestReactorExample(unittest.TestCase): diff --git a/pyomo/contrib/doe/tests/test_fim_doe.py b/pyomo/contrib/doe/tests/test_fim_doe.py index d2431f7bd45..05664b0a795 100644 --- a/pyomo/contrib/doe/tests/test_fim_doe.py +++ b/pyomo/contrib/doe/tests/test_fim_doe.py @@ -40,124 +40,111 @@ class TestMeasurementError(unittest.TestCase): def test_with_time_plus_one_extra_index(self): - """ This tests confirms the typical usage with a time index plus one extra index. + """This tests confirms the typical usage with a time index plus one extra index. This test should execute without throwing any errors. - + """ MeasurementVariables().add_variables( - "C", - indices={0: ['A', 'B', 'C'], 1: [0, 0.5, 1.0]}, - time_index_position=1 + "C", indices={0: ["A", "B", "C"], 1: [0, 0.5, 1.0]}, time_index_position=1 ) def test_with_time_plus_two_extra_indices(self): - """ This tests confirms the typical usage with a time index plus two extra indices. + """This tests confirms the typical usage with a time index plus two extra indices. This test should execute without throwing any errors. - + """ MeasurementVariables().add_variables( "C", - indices={0: ['A', 'B', 'C'], # species - 1: [0, 0.5, 1.0], # time - 2: [1, 2, 3]}, # position - time_index_position=1 + indices={ + 0: ["A", "B", "C"], # species + 1: [0, 0.5, 1.0], # time + 2: [1, 2, 3], + }, # position + time_index_position=1, ) def test_time_index_position_out_of_bounds(self): - """ This test confirms that an error is thrown when the time index position is out of bounds. - - """ + """This test confirms that an error is thrown when the time index position is out of bounds.""" # if time index is not in indices, an value error is thrown. with self.assertRaises(ValueError): MeasurementVariables().add_variables( - "C", - indices={0: ['CA', 'CB', 'CC'], # species - 1: [0, 0.5, 1.0],}, # time - time_index_position=2 # this is out of bounds + "C", + indices={0: ["CA", "CB", "CC"], 1: [0, 0.5, 1.0]}, # species # time + time_index_position=2, # this is out of bounds ) def test_single_measurement_variable(self): - """ This test confirms we can specify a single measurement variable without + """This test confirms we can specify a single measurement variable without specifying the indices. The test should execute with no errors. """ measurements = MeasurementVariables() - measurements.add_variables( - "HelloWorld", - indices=None, - time_index_position=None) + measurements.add_variables("HelloWorld", indices=None, time_index_position=None) def test_without_time_index(self): - """ This test confirms we can add a measurement variable without specifying the time index. + """This test confirms we can add a measurement variable without specifying the time index. The test should execute with no errors. """ MeasurementVariables().add_variables( - "C", - indices= {0: ['CA', 'CB', 'CC']}, # species as only index - time_index_position=None # no time index - ) - + "C", + indices={0: ["CA", "CB", "CC"]}, # species as only index + time_index_position=None, # no time index + ) + def test_only_time_index(self): - """ This test confirms we can add a measurement variable without specifying the variable name. + """This test confirms we can add a measurement variable without specifying the variable name. The test should execute with no errors. """ MeasurementVariables().add_variables( - "HelloWorld", # name of the variable - indices={0: [0, 0.5, 1.0]}, - time_index_position=0 + "HelloWorld", # name of the variable + indices={0: [0, 0.5, 1.0]}, + time_index_position=0, ) def test_with_no_measurement_name(self): - """ This test confirms that an error is thrown when None is used as the measurement name. - - """ + """This test confirms that an error is thrown when None is used as the measurement name.""" with self.assertRaises(TypeError): MeasurementVariables().add_variables( - None, - indices={0: [0, 0.5, 1.0]}, - time_index_position=0 + None, indices={0: [0, 0.5, 1.0]}, time_index_position=0 ) - - def test_with_non_string_measurement_name(self): - """ This test confirms that an error is thrown when a non-string is used as the measurement name. - """ + def test_with_non_string_measurement_name(self): + """This test confirms that an error is thrown when a non-string is used as the measurement name.""" with self.assertRaises(TypeError): MeasurementVariables().add_variables( - 1, - indices={0: [0, 0.5, 1.0]}, - time_index_position=0 + 1, indices={0: [0, 0.5, 1.0]}, time_index_position=0 ) def test_non_integer_index_keys(self): - """ This test confirms that strings can be used as keys for specifying the indices. + """This test confirms that strings can be used as keys for specifying the indices. Warning: it is possible this usage breaks something else in Pyomo.DoE. - There may be an implicit assumption that the order of the keys must match the order + There may be an implicit assumption that the order of the keys must match the order of the indices in the Pyomo model. """ MeasurementVariables().add_variables( "C", - indices={"species": ['CA', 'CB', 'CC'], "time": [0, 0.5, 1.0]}, - time_index_position="time" + indices={"species": ["CA", "CB", "CC"], "time": [0, 0.5, 1.0]}, + time_index_position="time", ) + class TestDesignError(unittest.TestCase): def test(self): t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] @@ -165,7 +152,7 @@ def test(self): exp_design = DesignVariables() # add T as design variable - var_T = 'T' + var_T = "T" indices_T = {0: t_control} exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] @@ -201,7 +188,7 @@ def test(self): t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] # measurement object variable_name = "C" - indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + indices = {0: ["CA", "CB", "CC"], 1: t_control} measurements = MeasurementVariables() measurements.add_variables( @@ -212,7 +199,7 @@ def test(self): exp_design = DesignVariables() # add CAO as design variable - var_C = 'CA0' + var_C = "CA0" indices_C = {0: [0]} exp1_C = [5] exp_design.add_variables( @@ -225,7 +212,7 @@ def test(self): ) # add T as design variable - var_T = 'T' + var_T = "T" indices_T = {0: t_control} exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] @@ -272,7 +259,7 @@ def test_setup(self): # add variable C variable_name = "C" - indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + indices = {0: ["CA", "CB", "CC"], 1: t_control} measurements.add_variables( variable_name, indices=indices, time_index_position=1 ) @@ -285,36 +272,36 @@ def test_setup(self): ) # check variable names - self.assertEqual(measurements.variable_names[0], 'C[CA,0]') - self.assertEqual(measurements.variable_names[1], 'C[CA,0.125]') - self.assertEqual(measurements.variable_names[-1], 'T[5,0.8]') - self.assertEqual(measurements.variable_names[-2], 'T[5,0.6]') - self.assertEqual(measurements.variance['T[5,0.4]'], 10) - self.assertEqual(measurements.variance['T[5,0.6]'], 10) - self.assertEqual(measurements.variance['T[5,0.4]'], 10) - self.assertEqual(measurements.variance['T[5,0.6]'], 10) + self.assertEqual(measurements.variable_names[0], "C[CA,0]") + self.assertEqual(measurements.variable_names[1], "C[CA,0.125]") + self.assertEqual(measurements.variable_names[-1], "T[5,0.8]") + self.assertEqual(measurements.variable_names[-2], "T[5,0.6]") + self.assertEqual(measurements.variance["T[5,0.4]"], 10) + self.assertEqual(measurements.variance["T[5,0.6]"], 10) + self.assertEqual(measurements.variance["T[5,0.4]"], 10) + self.assertEqual(measurements.variance["T[5,0.6]"], 10) ### specify function var_names = [ - 'C[CA,0]', - 'C[CA,0.125]', - 'C[CA,0.875]', - 'C[CA,1]', - 'C[CB,0]', - 'C[CB,0.125]', - 'C[CB,0.25]', - 'C[CB,0.375]', - 'C[CC,0]', - 'C[CC,0.125]', - 'C[CC,0.25]', - 'C[CC,0.375]', + "C[CA,0]", + "C[CA,0.125]", + "C[CA,0.875]", + "C[CA,1]", + "C[CB,0]", + "C[CB,0.125]", + "C[CB,0.25]", + "C[CB,0.375]", + "C[CC,0]", + "C[CC,0.125]", + "C[CC,0.25]", + "C[CC,0.375]", ] measurements2 = MeasurementVariables() measurements2.set_variable_name_list(var_names) - self.assertEqual(measurements2.variable_names[1], 'C[CA,0.125]') - self.assertEqual(measurements2.variable_names[-1], 'C[CC,0.375]') + self.assertEqual(measurements2.variable_names[1], "C[CA,0.125]") + self.assertEqual(measurements2.variable_names[-1], "C[CC,0.375]") ### check_subset function self.assertTrue(measurements.check_subset(measurements2)) @@ -330,7 +317,7 @@ def test_setup(self): exp_design = DesignVariables() # add CAO as design variable - var_C = 'CA0' + var_C = "CA0" indices_C = {0: [0]} exp1_C = [5] exp_design.add_variables( @@ -343,7 +330,7 @@ def test_setup(self): ) # add T as design variable - var_T = 'T' + var_T = "T" indices_T = {0: t_control} exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] @@ -359,31 +346,31 @@ def test_setup(self): self.assertEqual( exp_design.variable_names, [ - 'CA0[0]', - 'T[0]', - 'T[0.125]', - 'T[0.25]', - 'T[0.375]', - 'T[0.5]', - 'T[0.625]', - 'T[0.75]', - 'T[0.875]', - 'T[1]', + "CA0[0]", + "T[0]", + "T[0.125]", + "T[0.25]", + "T[0.375]", + "T[0.5]", + "T[0.625]", + "T[0.75]", + "T[0.875]", + "T[1]", ], ) - self.assertEqual(exp_design.variable_names_value['CA0[0]'], 5) - self.assertEqual(exp_design.variable_names_value['T[0]'], 470) - self.assertEqual(exp_design.upper_bounds['CA0[0]'], 5) - self.assertEqual(exp_design.upper_bounds['T[0]'], 700) - self.assertEqual(exp_design.lower_bounds['CA0[0]'], 1) - self.assertEqual(exp_design.lower_bounds['T[0]'], 300) + self.assertEqual(exp_design.variable_names_value["CA0[0]"], 5) + self.assertEqual(exp_design.variable_names_value["T[0]"], 470) + self.assertEqual(exp_design.upper_bounds["CA0[0]"], 5) + self.assertEqual(exp_design.upper_bounds["T[0]"], 700) + self.assertEqual(exp_design.lower_bounds["CA0[0]"], 1) + self.assertEqual(exp_design.lower_bounds["T[0]"], 300) design_names = exp_design.variable_names exp1 = [4, 600, 300, 300, 300, 300, 300, 300, 300, 300] exp1_design_dict = dict(zip(design_names, exp1)) exp_design.update_values(exp1_design_dict) - self.assertEqual(exp_design.variable_names_value['CA0[0]'], 4) - self.assertEqual(exp_design.variable_names_value['T[0]'], 600) + self.assertEqual(exp_design.variable_names_value["CA0[0]"], 4) + self.assertEqual(exp_design.variable_names_value["T[0]"], 600) class TestParameter(unittest.TestCase): @@ -391,19 +378,19 @@ class TestParameter(unittest.TestCase): def test_setup(self): # set up parameter class - param_dict = {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05} + param_dict = {"A1": 84.79, "A2": 371.72, "E1": 7.78, "E2": 15.05} scenario_gene = ScenarioGenerator(param_dict, formula="central", step=0.1) parameter_set = scenario_gene.ScenarioData - self.assertAlmostEqual(parameter_set.eps_abs['A1'], 16.9582, places=1) - self.assertAlmostEqual(parameter_set.eps_abs['E1'], 1.5554, places=1) - self.assertEqual(parameter_set.scena_num['A2'], [2, 3]) - self.assertEqual(parameter_set.scena_num['E1'], [4, 5]) - self.assertAlmostEqual(parameter_set.scenario[0]['A1'], 93.2699, places=1) - self.assertAlmostEqual(parameter_set.scenario[2]['A2'], 408.8895, places=1) - self.assertAlmostEqual(parameter_set.scenario[-1]['E2'], 13.54, places=1) - self.assertAlmostEqual(parameter_set.scenario[-2]['E2'], 16.55, places=1) + self.assertAlmostEqual(parameter_set.eps_abs["A1"], 16.9582, places=1) + self.assertAlmostEqual(parameter_set.eps_abs["E1"], 1.5554, places=1) + self.assertEqual(parameter_set.scena_num["A2"], [2, 3]) + self.assertEqual(parameter_set.scena_num["E1"], [4, 5]) + self.assertAlmostEqual(parameter_set.scenario[0]["A1"], 93.2699, places=1) + self.assertAlmostEqual(parameter_set.scenario[2]["A2"], 408.8895, places=1) + self.assertAlmostEqual(parameter_set.scenario[-1]["E2"], 13.54, places=1) + self.assertAlmostEqual(parameter_set.scenario[-2]["E2"], 16.55, places=1) class TestVariablesWithIndices(unittest.TestCase): @@ -414,7 +401,7 @@ def test_setup(self): t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] ### add_element function # add CAO as design variable - var_C = 'CA0' + var_C = "CA0" indices_C = {0: [0]} exp1_C = [5] special.add_variables( @@ -427,7 +414,7 @@ def test_setup(self): ) # add T as design variable - var_T = 'T' + var_T = "T" indices_T = {0: t_control} exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] @@ -443,25 +430,25 @@ def test_setup(self): self.assertEqual( special.variable_names, [ - 'CA0[0]', - 'T[0]', - 'T[0.125]', - 'T[0.25]', - 'T[0.375]', - 'T[0.5]', - 'T[0.625]', - 'T[0.75]', - 'T[0.875]', - 'T[1]', + "CA0[0]", + "T[0]", + "T[0.125]", + "T[0.25]", + "T[0.375]", + "T[0.5]", + "T[0.625]", + "T[0.75]", + "T[0.875]", + "T[1]", ], ) - self.assertEqual(special.variable_names_value['CA0[0]'], 5) - self.assertEqual(special.variable_names_value['T[0]'], 470) - self.assertEqual(special.upper_bounds['CA0[0]'], 5) - self.assertEqual(special.upper_bounds['T[0]'], 700) - self.assertEqual(special.lower_bounds['CA0[0]'], 1) - self.assertEqual(special.lower_bounds['T[0]'], 300) + self.assertEqual(special.variable_names_value["CA0[0]"], 5) + self.assertEqual(special.variable_names_value["T[0]"], 470) + self.assertEqual(special.upper_bounds["CA0[0]"], 5) + self.assertEqual(special.upper_bounds["T[0]"], 700) + self.assertEqual(special.lower_bounds["CA0[0]"], 1) + self.assertEqual(special.lower_bounds["T[0]"], 300) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/doe/tests/test_reactor_example.py b/pyomo/contrib/doe/tests/test_reactor_example.py index daf2ee89194..3fca93b5ded 100644 --- a/pyomo/contrib/doe/tests/test_reactor_example.py +++ b/pyomo/contrib/doe/tests/test_reactor_example.py @@ -34,7 +34,7 @@ from pyomo.contrib.doe.examples.reactor_kinetics import create_model, disc_for_measure from pyomo.opt import SolverFactory -ipopt_available = SolverFactory('ipopt').available() +ipopt_available = SolverFactory("ipopt").available() class Test_example_options(unittest.TestCase): @@ -70,11 +70,11 @@ def test_setUP(self): # Control time set [h] t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] # Define parameter nominal value - parameter_dict = {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05} + parameter_dict = {"A1": 84.79, "A2": 371.72, "E1": 7.78, "E2": 15.05} # measurement object variable_name = "C" - indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + indices = {0: ["CA", "CB", "CC"], 1: t_control} measurements = MeasurementVariables() measurements.add_variables( @@ -85,7 +85,7 @@ def test_setUP(self): exp_design = DesignVariables() # add CAO as design variable - var_C = 'CA0' + var_C = "CA0" indices_C = {0: [0]} exp1_C = [5] exp_design.add_variables( @@ -98,7 +98,7 @@ def test_setUP(self): ) # add T as design variable - var_T = 'T' + var_T = "T" indices_T = {0: t_control} exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] @@ -216,5 +216,5 @@ def test_setUP(self): self.assertAlmostEqual(value(optimize_result.model.T[0.5]), 300, places=2) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() From 35edf83785e4db3071080903b92ccd6c809c32bd Mon Sep 17 00:00:00 2001 From: Eslick Date: Thu, 23 May 2024 14:33:53 -0400 Subject: [PATCH 047/113] Change the way models in main are handeled --- pyomo/contrib/viewer/model_select.py | 14 ++++++----- pyomo/contrib/viewer/pyomo_viewer.py | 2 +- pyomo/contrib/viewer/tests/test_qt.py | 16 ++++++------ pyomo/contrib/viewer/ui.py | 35 ++++++++++++++++++++++++--- pyomo/contrib/viewer/ui_data.py | 13 +++++++++- 5 files changed, 61 insertions(+), 19 deletions(-) diff --git a/pyomo/contrib/viewer/model_select.py b/pyomo/contrib/viewer/model_select.py index e9c82740708..1e65e91a089 100644 --- a/pyomo/contrib/viewer/model_select.py +++ b/pyomo/contrib/viewer/model_select.py @@ -60,31 +60,33 @@ def select_model(self): items = self.tableWidget.selectedItems() if len(items) == 0: return - self.ui_data.model = self.models[items[0].row()] + self.ui_data.model_var_name_in_main = self.models[items[0].row()][1] + self.ui_data.model = self.models[items[0].row()][0] self.close() def update_models(self): import __main__ - s = __main__.__dict__ + s = dir(__main__) keys = [] for k in s: - if isinstance(s[k], pyo.Block): + if isinstance(getattr(__main__, k), pyo.Block): keys.append(k) self.tableWidget.clearContents() self.tableWidget.setRowCount(len(keys)) self.models = [] for row, k in enumerate(sorted(keys)): + model = getattr(__main__, k) item = myqt.QTableWidgetItem() item.setText(k) self.tableWidget.setItem(row, 0, item) item = myqt.QTableWidgetItem() try: - item.setText(s[k].name) + item.setText(model.name) except: item.setText("None") self.tableWidget.setItem(row, 1, item) item = myqt.QTableWidgetItem() - item.setText(str(type(s[k]))) + item.setText(str(type(model))) self.tableWidget.setItem(row, 2, item) - self.models.append(s[k]) + self.models.append((model, k)) diff --git a/pyomo/contrib/viewer/pyomo_viewer.py b/pyomo/contrib/viewer/pyomo_viewer.py index 6a24e12aa61..e4f75c86840 100644 --- a/pyomo/contrib/viewer/pyomo_viewer.py +++ b/pyomo/contrib/viewer/pyomo_viewer.py @@ -41,7 +41,7 @@ class QtApp( model except NameError: model=None - ui, model = get_mainwindow(model=model, ask_close=False) + ui = get_mainwindow(model=model, ask_close=False) ui.setWindowTitle('Pyomo Model Viewer -- {}')""" _kernel_cmd_hide_ui = """try: diff --git a/pyomo/contrib/viewer/tests/test_qt.py b/pyomo/contrib/viewer/tests/test_qt.py index e71921500f9..b7250729cd9 100644 --- a/pyomo/contrib/viewer/tests/test_qt.py +++ b/pyomo/contrib/viewer/tests/test_qt.py @@ -103,7 +103,7 @@ def blackbox(a, b): @unittest.skipIf(not available, "Qt packages are not available.") def test_get_mainwindow(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) assert hasattr(mw, "menuBar") assert isinstance(mw.variables, ModelBrowser) assert isinstance(mw.constraints, ModelBrowser) @@ -113,13 +113,13 @@ def test_get_mainwindow(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_close_mainwindow(qtbot): - mw, m = get_mainwindow(model=None, testing=True) + mw = get_mainwindow(model=None, testing=True) mw.exit_action() @unittest.skipIf(not available, "Qt packages are not available.") def test_show_model_select_no_models(qtbot): - mw, m = get_mainwindow(model=None, testing=True) + mw = get_mainwindow(model=None, testing=True) ms = mw.show_model_select() ms.update_models() ms.select_model() @@ -128,7 +128,7 @@ def test_show_model_select_no_models(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_model_information(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) mw.model_information() assert isinstance(mw._dialog, QMessageBox) text = mw._dialog.text() @@ -149,7 +149,7 @@ def test_model_information(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_tree_expand_collapse(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) mw.variables.treeView.expandAll() mw.variables.treeView.collapseAll() @@ -157,7 +157,7 @@ def test_tree_expand_collapse(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_residual_table(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) mw.residuals_restart() mw.ui_data.calculate_expressions() mw.residuals.calculate() @@ -184,7 +184,7 @@ def test_residual_table(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_var_tree(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) qtbot.addWidget(mw) mw.variables.treeView.expandAll() root_index = mw.variables.datmodel.index(0, 0) @@ -218,7 +218,7 @@ def test_var_tree(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_bad_view(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) err = None try: mw.badTree = mw._tree_restart( diff --git a/pyomo/contrib/viewer/ui.py b/pyomo/contrib/viewer/ui.py index 374af8a26f0..75b786b9796 100644 --- a/pyomo/contrib/viewer/ui.py +++ b/pyomo/contrib/viewer/ui.py @@ -79,16 +79,31 @@ def get_mainwindow(model=None, show=True, ask_close=True, testing=False): (ui, model): ui is the MainWindow widget, and model is the linked Pyomo model. If no model is provided a new ConcreteModel is created """ + model_name = None if model is None: - model = pyo.ConcreteModel(name="Default") - ui = MainWindow(model=model, ask_close=ask_close, testing=testing) + import __main__ + if "model" in dir(__main__): + if isinstance(getattr(__main__, "model"), pyo.Block): + model = getattr(__main__, "model") + model_name = "model" + for s in dir(__main__): + if isinstance(getattr(__main__, s), pyo.Block): + model = getattr(__main__, s) + model_name = s + break + ui = MainWindow( + model=model, + model_var_name_in_main=model_name, + ask_close=ask_close, + testing=testing, + ) try: get_ipython().events.register("post_execute", ui.refresh_on_execute) except AttributeError: pass # not in ipy kernel, so is fine to not register callback if show: ui.show() - return ui, model + return ui class MainWindow(_MainWindow, _MainWindowUI): @@ -97,6 +112,7 @@ def __init__(self, *args, **kwargs): main = self.main = kwargs.pop("main", None) ask_close = self.ask_close = kwargs.pop("ask_close", True) self.testing = kwargs.pop("testing", False) + model_var_name_in_main = kwargs.pop("model_var_name_in_main", None) flags = kwargs.pop("flags", 0) self.ui_data = UIData(model=model) super().__init__(*args, **kwargs) @@ -128,6 +144,7 @@ def __init__(self, *args, **kwargs): self.actionCalculateExpressions.triggered.connect( self.ui_data.calculate_expressions ) + self.ui_data.model_var_name_in_main = model_var_name_in_main self.actionTile.triggered.connect(self.mdiArea.tileSubWindows) self.actionCascade.triggered.connect(self.mdiArea.cascadeSubWindows) self.actionTabs.triggered.connect(self.toggle_tabs) @@ -256,6 +273,18 @@ def refresh_on_execute(self): ipython kernel. The main purpose of this right now it to refresh the UI display so that it matches the current state of the model. """ + if self.ui_data.model_var_name_in_main is not None: + import __main__ + + try: + mname = self.ui_data.model_var_name_in_main + mid = id(getattr(__main__, mname)) + if id(self.ui_data.model) != mid: + self.ui_data.model = getattr(__main__, mname) + self.update_model + return + except AttributeError: + pass for w in self._refresh_list: try: w.refresh() diff --git a/pyomo/contrib/viewer/ui_data.py b/pyomo/contrib/viewer/ui_data.py index c716cfeedf6..8d83be91e5f 100644 --- a/pyomo/contrib/viewer/ui_data.py +++ b/pyomo/contrib/viewer/ui_data.py @@ -39,16 +39,27 @@ class UIDataNoUi(object): UIData. The class is split this way for testing when PyQt is not available. """ - def __init__(self, model=None): + def __init__(self, model=None, model_var_name_in_main=None): """ This class holds the basic UI setup, but doesn't depend on Qt. It shouldn't really be used except for testing when Qt is not available. Args: model: The Pyomo model to view + model_var_name_in_main: if this is set, check that the model variable + which points to a model object in __main__ has the same id when + the UI is refreshed due to a command being executed in jupyter + notebook or QtConsole, if not the same id, then update the model + Since the model viewer is not necessarily pointed at a model in the + __main__ namespace only set this if you want the model to auto + update. Since the model selector dialog lets you choose models + from the __main__ namespace it sets this when you select a model. + This is useful if you run a script repeatedly that replaces a model + preventing you from looking at a previous version of the model. """ super().__init__() self._model = None + self.model_var_name_in_main = model_var_name_in_main self._begin_update = False self.value_cache = ComponentMap() self.value_cache_units = ComponentMap() From 01b5ef233935a1b7f958b49b8c13dacd35c21fde Mon Sep 17 00:00:00 2001 From: Eslick Date: Thu, 23 May 2024 14:58:31 -0400 Subject: [PATCH 048/113] add arg --- pyomo/contrib/viewer/ui.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/pyomo/contrib/viewer/ui.py b/pyomo/contrib/viewer/ui.py index 75b786b9796..ac96e58eea9 100644 --- a/pyomo/contrib/viewer/ui.py +++ b/pyomo/contrib/viewer/ui.py @@ -66,7 +66,9 @@ class _MainWindow(object): _log.error(_err) -def get_mainwindow(model=None, show=True, ask_close=True, testing=False): +def get_mainwindow( + model=None, show=True, ask_close=True, model_var_name_in_main=None, testing=False +): """ Create a UI MainWindow. @@ -79,18 +81,19 @@ def get_mainwindow(model=None, show=True, ask_close=True, testing=False): (ui, model): ui is the MainWindow widget, and model is the linked Pyomo model. If no model is provided a new ConcreteModel is created """ - model_name = None + model_name = model_var_name_in_main if model is None: import __main__ - if "model" in dir(__main__): - if isinstance(getattr(__main__, "model"), pyo.Block): - model = getattr(__main__, "model") - model_name = "model" - for s in dir(__main__): - if isinstance(getattr(__main__, s), pyo.Block): - model = getattr(__main__, s) - model_name = s - break + + if model_name in dir(__main__): + if isinstance(getattr(__main__, model_name), pyo.Block): + model = getattr(__main__, model_name) + else: + for s in dir(__main__): + if isinstance(getattr(__main__, s), pyo.Block): + model = getattr(__main__, s) + model_name = s + break ui = MainWindow( model=model, model_var_name_in_main=model_name, From a7f21c5c9c36b39f0982c3db12aaaa29258240a6 Mon Sep 17 00:00:00 2001 From: Eslick Date: Thu, 23 May 2024 15:03:52 -0400 Subject: [PATCH 049/113] Update the doc --- pyomo/contrib/viewer/README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pyomo/contrib/viewer/README.md b/pyomo/contrib/viewer/README.md index cfc50b54ce2..93d773e3829 100644 --- a/pyomo/contrib/viewer/README.md +++ b/pyomo/contrib/viewer/README.md @@ -42,6 +42,24 @@ ui = get_mainwindow(model=model) # Do model things, the viewer will stay in sync with the Pyomo model ``` +If you are working in Jupyter notebook, Jupyter qtconsole, or other Jupyter- +based IDEs, and your model is in the __main__ namespace (this is the usual case), +you can specify the model by its variable name as below. The advantage of this +is that if you replace the model with a new model having the same variable name, +the UI will automatically update without having to manually reset the model pointer. + +```python +%gui qt #Enables IPython's GUI event loop integration. +# Execute the above in its own cell and wait for it to finish before moving on. +from pyomo.contrib.viewer.ui import get_mainwindow +import pyomo.environ as pyo + +model = pyo.ConcreteModel() # could import an existing model here +ui = get_mainwindow(model_var_name_in_main="model") + +# Do model things, the viewer will stay in sync with the Pyomo model +``` + **Note:** the ```%gui qt``` cell must be executed in its own cell and execution must complete before running any other cells (you can't use "run all"). From 1bf27f02b0f94569a98e06cb0c08f7b978630f3c Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 21:21:35 -0400 Subject: [PATCH 050/113] Added reactor_design example for Pyomo.DoE. This is still being debugged. --- pyomo/contrib/doe/examples/reactor_design.py | 174 +++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 pyomo/contrib/doe/examples/reactor_design.py diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py new file mode 100644 index 00000000000..81a64a0a46a --- /dev/null +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -0,0 +1,174 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + +# from pyomo.contrib.parmest.examples.reactor_design import reactor_design_model +# if we refactor to use the same create_model function as parmest, +# we can just import instead of redefining the model + +import pyomo.environ as pyo +from pyomo.dae import ContinuousSet, DerivativeVar +from pyomo.contrib.doe import ModelOptionLib, DesignOfExperiments, MeasurementVariables, DesignVariables + +def create_model( + mod=None, + model_option="stage2"): + + model_option = ModelOptionLib(model_option) + + model = mod + + if model_option == ModelOptionLib.parmest: + model = pyo.ConcreteModel() + return_m = True + elif model_option == ModelOptionLib.stage1 or model_option == ModelOptionLib.stage2: + if model is None: + raise ValueError( + "If model option is stage1 or stage2, a created model needs to be provided." + ) + return_m = False + else: + raise ValueError( + "model_option needs to be defined as parmest, stage1, or stage2." + ) + + # Rate constants + model.k1 = pyo.Var( + initialize=5.0 / 6.0, within=pyo.PositiveReals + ) # min^-1 + model.k2 = pyo.Var( + initialize=5.0 / 3.0, within=pyo.PositiveReals + ) # min^-1 + model.k3 = pyo.Var( + initialize=1.0 / 6000.0, within=pyo.PositiveReals + ) # m^3/(gmol min) + + # Inlet concentration of A, gmol/m^3 + model.caf = pyo.Var(initialize=10000, within=pyo.PositiveReals) + + # Space velocity (flowrate/volume) + model.sv = pyo.Var(initialize=1.0, within=pyo.PositiveReals) + + # Outlet concentration of each component + model.ca = pyo.Var(initialize=5000.0, within=pyo.PositiveReals) + model.cb = pyo.Var(initialize=2000.0, within=pyo.PositiveReals) + model.cc = pyo.Var(initialize=2000.0, within=pyo.PositiveReals) + model.cd = pyo.Var(initialize=1000.0, within=pyo.PositiveReals) + + # Objective + model.obj = pyo.Objective(expr=model.cb, sense=pyo.maximize) + + # Constraints + model.ca_bal = pyo.Constraint( + expr=( + 0 + == model.sv * model.caf + - model.sv * model.ca + - model.k1 * model.ca + - 2.0 * model.k3 * model.ca**2.0 + ) + ) + + model.cb_bal = pyo.Constraint( + expr=(0 == -model.sv * model.cb + model.k1 * model.ca - model.k2 * model.cb) + ) + + model.cc_bal = pyo.Constraint( + expr=(0 == -model.sv * model.cc + model.k2 * model.cb) + ) + + model.cd_bal = pyo.Constraint( + expr=(0 == -model.sv * model.cd + model.k3 * model.ca**2.0) + ) + + if return_m: + return model + +def main(): + + # measurement object + measurements = MeasurementVariables() + measurements.add_variables( + "ca", + indices=None, + time_index_position=None, + ) + measurements.add_variables( + "cb", + indices=None, + time_index_position=None + ) + measurements.add_variables( + "cc", + indices=None, + time_index_position=None + ) + measurements.add_variables( + "cd", + indices=None, + time_index_position=None + ) + + # design object + exp_design = DesignVariables() + exp_design.add_variables( + "sv", + indices=None, + time_index_position=None, + values=1.0, + lower_bounds=0.1, + upper_bounds=10.0 + ) + exp_design.add_variables( + "caf", + indices=None, + time_index_position=None, + values=10000, + lower_bounds=5000, + upper_bounds=15000 + ) + + theta_values = {"k1": 5.0 / 6.0, "k2": 5.0 / 3.0, "k3": 1.0 / 6000.0} + + doe1 = DesignOfExperiments( + theta_values, + measurements, + exp_design, + create_model, + prior_FIM=None + ) + + doe1.compute_FIM( + mode="sequential_finite", # calculation mode + scale_nominal_param_value=True, # scale nominal parameter value + formula="central", # formula for finite difference + ) + + doe1.result.result_analysis() + +if __name__ == "__main__": + main() + From 0932c2d308e22eb1875d26f9200d1d36cb6a9a8e Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 21:28:03 -0400 Subject: [PATCH 051/113] Debugged some syntax issues in example --- pyomo/contrib/doe/examples/reactor_design.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 81a64a0a46a..18d6b07c9fb 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -114,7 +114,7 @@ def main(): measurements.add_variables( "ca", indices=None, - time_index_position=None, + time_index_position=None ) measurements.add_variables( "cb", @@ -155,8 +155,8 @@ def main(): doe1 = DesignOfExperiments( theta_values, - measurements, exp_design, + measurements, create_model, prior_FIM=None ) @@ -167,7 +167,7 @@ def main(): formula="central", # formula for finite difference ) - doe1.result.result_analysis() + doe1.result_analysis() if __name__ == "__main__": main() From 0db9100d22b7a8236c9f6bec9dc1775cfb593d75 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 21:29:09 -0400 Subject: [PATCH 052/113] A few more syntax mistakes --- pyomo/contrib/doe/examples/reactor_design.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 18d6b07c9fb..10096f17e69 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -161,13 +161,13 @@ def main(): prior_FIM=None ) - doe1.compute_FIM( + result = doe1.compute_FIM( mode="sequential_finite", # calculation mode scale_nominal_param_value=True, # scale nominal parameter value formula="central", # formula for finite difference ) - doe1.result_analysis() + result.result_analysis() if __name__ == "__main__": main() From ecc4600dcf77a9e234403b85df11e6512122abd2 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 21:51:11 -0400 Subject: [PATCH 053/113] Removed objective from example. --- pyomo/contrib/doe/doe.py | 5 +++++ pyomo/contrib/doe/examples/reactor_design.py | 3 --- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 9356cce360b..0718493a826 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -423,6 +423,11 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): # dict for storing model outputs output_record = {} + # add zero (dummy/placeholder) objective function + mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) + + mod.pprint() + # solve model square_result = self._solve_doe(mod, fix=True) diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 10096f17e69..273d97c88c5 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -78,9 +78,6 @@ def create_model( model.cc = pyo.Var(initialize=2000.0, within=pyo.PositiveReals) model.cd = pyo.Var(initialize=1000.0, within=pyo.PositiveReals) - # Objective - model.obj = pyo.Objective(expr=model.cb, sense=pyo.maximize) - # Constraints model.ca_bal = pyo.Constraint( expr=( From 3fd4784803a86a4aed56ac95a277d2d11a824e78 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Thu, 23 May 2024 22:01:08 -0400 Subject: [PATCH 054/113] Finished adding tests. --- pyomo/contrib/doe/examples/reactor_design.py | 80 ++++++++++---------- pyomo/contrib/doe/tests/test_example.py | 8 ++ 2 files changed, 46 insertions(+), 42 deletions(-) diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 273d97c88c5..450a2800ae1 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -26,17 +26,22 @@ # ___________________________________________________________________________ # from pyomo.contrib.parmest.examples.reactor_design import reactor_design_model -# if we refactor to use the same create_model function as parmest, +# if we refactor to use the same create_model function as parmest, # we can just import instead of redefining the model import pyomo.environ as pyo from pyomo.dae import ContinuousSet, DerivativeVar -from pyomo.contrib.doe import ModelOptionLib, DesignOfExperiments, MeasurementVariables, DesignVariables +from pyomo.contrib.doe import ( + ModelOptionLib, + DesignOfExperiments, + MeasurementVariables, + DesignVariables, +) +from pyomo.common.dependencies import numpy as np + + +def create_model(mod=None, model_option="stage2"): -def create_model( - mod=None, - model_option="stage2"): - model_option = ModelOptionLib(model_option) model = mod @@ -54,14 +59,10 @@ def create_model( raise ValueError( "model_option needs to be defined as parmest, stage1, or stage2." ) - + # Rate constants - model.k1 = pyo.Var( - initialize=5.0 / 6.0, within=pyo.PositiveReals - ) # min^-1 - model.k2 = pyo.Var( - initialize=5.0 / 3.0, within=pyo.PositiveReals - ) # min^-1 + model.k1 = pyo.Var(initialize=5.0 / 6.0, within=pyo.PositiveReals) # min^-1 + model.k2 = pyo.Var(initialize=5.0 / 3.0, within=pyo.PositiveReals) # min^-1 model.k3 = pyo.Var( initialize=1.0 / 6000.0, within=pyo.PositiveReals ) # m^3/(gmol min) @@ -103,31 +104,16 @@ def create_model( if return_m: return model - + + def main(): # measurement object measurements = MeasurementVariables() - measurements.add_variables( - "ca", - indices=None, - time_index_position=None - ) - measurements.add_variables( - "cb", - indices=None, - time_index_position=None - ) - measurements.add_variables( - "cc", - indices=None, - time_index_position=None - ) - measurements.add_variables( - "cd", - indices=None, - time_index_position=None - ) + measurements.add_variables("ca", indices=None, time_index_position=None) + measurements.add_variables("cb", indices=None, time_index_position=None) + measurements.add_variables("cc", indices=None, time_index_position=None) + measurements.add_variables("cd", indices=None, time_index_position=None) # design object exp_design = DesignVariables() @@ -137,7 +123,7 @@ def main(): time_index_position=None, values=1.0, lower_bounds=0.1, - upper_bounds=10.0 + upper_bounds=10.0, ) exp_design.add_variables( "caf", @@ -145,17 +131,13 @@ def main(): time_index_position=None, values=10000, lower_bounds=5000, - upper_bounds=15000 + upper_bounds=15000, ) theta_values = {"k1": 5.0 / 6.0, "k2": 5.0 / 3.0, "k3": 1.0 / 6000.0} doe1 = DesignOfExperiments( - theta_values, - exp_design, - measurements, - create_model, - prior_FIM=None + theta_values, exp_design, measurements, create_model, prior_FIM=None ) result = doe1.compute_FIM( @@ -166,6 +148,20 @@ def main(): result.result_analysis() + # print("log10 Trace of FIM: ", np.log10(result.trace)) + # print("log10 Determinant of FIM: ", np.log10(result.det)) + + # test result + relative_error_trace = abs(np.log10(result.trace) - 6.815) + assert ( + relative_error_trace < 0.01 + ), "log10(tr(FIM)) regression test failed, answer does not match previous result" + + relative_error_det = abs(np.log10(result.det) - 18.719) + assert ( + relative_error_det < 0.01 + ), "log10(det(FIM)) regression test failed, answer does not match previous result" + + if __name__ == "__main__": main() - diff --git a/pyomo/contrib/doe/tests/test_example.py b/pyomo/contrib/doe/tests/test_example.py index 8153e07018a..c92725efd3b 100644 --- a/pyomo/contrib/doe/tests/test_example.py +++ b/pyomo/contrib/doe/tests/test_example.py @@ -65,6 +65,14 @@ def test_reactor_grid_search(self): reactor_grid_search.main() + @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") + @unittest.skipIf(not pandas_available, "pandas is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + def test_reactor_design(self): + from pyomo.contrib.doe.examples import reactor_design + + reactor_design.main() + if __name__ == "__main__": unittest.main() From 27df1e5e7b4e4005608efbc2e57c6fb5bec813ea Mon Sep 17 00:00:00 2001 From: John Siirola Date: Fri, 24 May 2024 16:23:35 -0600 Subject: [PATCH 055/113] Defer categorizing constraints until after linear presolve --- pyomo/repn/plugins/nl_writer.py | 62 ++++++++++++++++++------------ pyomo/repn/tests/ampl/test_nlv2.py | 56 ++++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 25 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 0a6f9f3da30..9d66b37a429 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -702,8 +702,7 @@ def write(self, model): objectives.extend(linear_objs) n_objs = len(objectives) - constraints = [] - linear_cons = [] + all_constraints = [] n_ranges = 0 n_equality = 0 n_complementarity_nonlin = 0 @@ -740,22 +739,7 @@ def write(self, model): ub = ub * scale if scale < 0: lb, ub = ub, lb - if expr_info.nonlinear: - constraints.append((con, expr_info, lb, ub)) - elif expr_info.linear: - linear_cons.append((con, expr_info, lb, ub)) - elif not self.config.skip_trivial_constraints: - linear_cons.append((con, expr_info, lb, ub)) - else: # constant constraint and skip_trivial_constraints - c = expr_info.const - if (lb is not None and lb - c > TOL) or ( - ub is not None and ub - c < -TOL - ): - raise InfeasibleConstraintException( - "model contains a trivially infeasible " - f"constraint '{con.name}' (fixed body value " - f"{c} outside bounds [{lb}, {ub}])." - ) + all_constraints.append((con, expr_info, lb, ub)) if linear_presolve: con_id = id(con) if not expr_info.nonlinear and lb == ub and lb is not None: @@ -766,7 +750,7 @@ def write(self, model): # report the last constraint timer.toc('Constraint %s', last_parent, level=logging.DEBUG) else: - timer.toc('Processed %s constraints', len(constraints)) + timer.toc('Processed %s constraints', len(all_constraints)) # This may fetch more bounds than needed, but only in the cases # where variables were completely eliminated while walking the @@ -781,14 +765,44 @@ def write(self, model): del comp_by_linear_var del lcon_by_linear_nnz - # Order the constraints, moving all nonlinear constraints to - # the beginning - n_nonlinear_cons = len(constraints) + # Note: defer categorizing constraints until after presolve, as + # the presolver could result in nonlinear constraints to become + # linear (or trivial) + constraints = [] + linear_cons = [] if eliminated_cons: _removed = eliminated_cons.__contains__ - constraints.extend(filterfalse(lambda c: _removed(id(c[0])), linear_cons)) + _constraints = filterfalse(lambda c: _removed(id(c[0])), all_constraints) else: - constraints.extend(linear_cons) + _constraints = all_constraints + for info in _constraints: + expr_info = info[1] + if expr_info.nonlinear: + if expr_info.nonlinear[1]: + constraints.append(info) + continue + expr_info.const += _evaluate_constant_nl(expr_info.nonlinear[0]) + expr_info.nonlinear = None + if expr_info.linear: + linear_cons.append(info) + elif not self.config.skip_trivial_constraints: + linear_cons.append(info) + else: # constant constraint and skip_trivial_constraints + c = expr_info.const + con, expr_info, lb, ub = info + if (lb is not None and lb - c > TOL) or ( + ub is not None and ub - c < -TOL + ): + raise InfeasibleConstraintException( + "model contains a trivially infeasible " + f"constraint '{con.name}' (fixed body value " + f"{c} outside bounds [{lb}, {ub}])." + ) + + # Order the constraints, moving all nonlinear constraints to + # the beginning + n_nonlinear_cons = len(constraints) + constraints.extend(linear_cons) n_cons = len(constraints) # diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 0aa9fab96f9..09936b45bbc 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -1763,7 +1763,11 @@ def test_presolve_zero_coef(self): OUT = io.StringIO() with LoggingIntercept() as LOG: nlinfo = nl_writer.NLWriter().write( - m, OUT, symbolic_solver_labels=True, linear_presolve=True + m, + OUT, + symbolic_solver_labels=True, + linear_presolve=True, + skip_trivial_constraints=False, ) self.assertEqual(LOG.getvalue(), "") @@ -1808,6 +1812,56 @@ def test_presolve_zero_coef(self): k0 #intermediate Jacobian column lengths G0 1 #obj 0 0 +""", + OUT.getvalue(), + ) + ) + + OUT = io.StringIO() + with LoggingIntercept() as LOG: + nlinfo = nl_writer.NLWriter().write( + m, OUT, symbolic_solver_labels=True, linear_presolve=True + ) + self.assertEqual(LOG.getvalue(), "") + + self.assertIs(nlinfo.eliminated_vars[0][0], m.y) + self.assertExpressionsEqual( + nlinfo.eliminated_vars[0][1], LinearExpression([-1.0 * m.z]) + ) + self.assertEqual(nlinfo.eliminated_vars[1], (m.x, 2)) + + self.assertEqual( + *nl_diff( + """g3 1 1 0 # problem unknown + 1 0 1 0 0 #vars, constraints, objectives, ranges, eqns + 0 1 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 0 1 0 #nonlinear vars in constraints, objectives, both + 0 0 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 0 1 #nonzeros in Jacobian, obj. gradient + 3 1 #max name lengths: constraints, variables + 0 0 0 0 0 #common exprs: b,c,o,c1,o1 +O0 0 #obj +o54 #sumlist +3 #(n) +o5 #^ +n2 +n2 +o5 #^ +o16 #- +v0 #z +n2 +o5 #^ +v0 #z +n2 +x0 #initial guess +r #1 ranges (rhs's) +b #1 bounds (on variables) +3 #z +k0 #intermediate Jacobian column lengths +G0 1 #obj +0 0 """, OUT.getvalue(), ) From 374fd7df4577b6eceb068b97c7bce41496a7f3e3 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Fri, 24 May 2024 19:37:04 -0400 Subject: [PATCH 056/113] Added logic to support slimmer create_model. I still need to debug one part. --- pyomo/contrib/doe/doe.py | 103 ++++++++++++++----- pyomo/contrib/doe/examples/reactor_design.py | 11 +- 2 files changed, 90 insertions(+), 24 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 0718493a826..d8f1781051c 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -42,6 +42,7 @@ import inspect +import pyomo.contrib.parmest.utils as utils class CalculationMode(Enum): sequential_finite = "sequential_finite" @@ -115,6 +116,15 @@ def __init__( self.design_vars = design_vars self.create_model = create_model + # check if create model function conforms to the original + # Pyomo.DoE interface + model_option_arg = "model_option" in inspect.getfullargspec(self.create_model).args + mod_arg = "mod" in inspect.getfullargspec(self.create_model).args + if model_option_arg and mod_arg: + self._original_create_model_interface = True + else: + self._original_create_model_interface = False + if args is None: args = {} self.args = args @@ -423,10 +433,17 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): # dict for storing model outputs output_record = {} + # Deactivate any existing objective functions + for obj in mod.component_objects(pyo.Objective): + obj.deactivate() + # add zero (dummy/placeholder) objective function mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) - mod.pprint() + # convert params to vars + # print("self.param.keys():", self.param.keys()) + # mod = utils.convert_params_to_vars(mod, self.param.keys(), fix_vars=True) + # mod.pprint() # solve model square_result = self._solve_doe(mod, fix=True) @@ -495,15 +512,25 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): def _direct_kaug(self): # create model - mod = self.create_model(model_option=ModelOptionLib.parmest, **self.args) + if self._original_create_model_interface: + mod = self.create_model(model_option=ModelOptionLib.parmest, **self.args) + else: + mod = self.create_model(**self.args) # discretize if needed if self.discretize_model is not None: mod = self.discretize_model(mod, block=False) + # Deactivate any existing objective functions + for obj in mod.component_objects(pyo.Objective): + obj.deactivate() + # add zero (dummy/placeholder) objective function mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) + # convert params to vars + # mod = utils.convert_params_to_vars(mod, self.param.keys(), fix_vars=True) + # set ub and lb to parameters for par in self.param.keys(): cuid = pyo.ComponentUID(par) @@ -608,19 +635,37 @@ def _create_block(self): self.eps_abs = self.scenario_data.eps_abs self.scena_gen = scena_gen - # Create a global model - mod = pyo.ConcreteModel() - - # Set for block/scenarios - mod.scenario = pyo.Set(initialize=self.scenario_data.scenario_indices) - # Determine if create_model takes theta as an optional input pass_theta_to_initialize = ( "theta" in inspect.getfullargspec(self.create_model).args ) # Allow user to self-define complex design variables - self.create_model(mod=mod, model_option=ModelOptionLib.stage1, **self.args) + if self._original_create_model_interface: + + # Create a global model + mod = pyo.ConcreteModel() + + if pass_theta_to_initialize: + # Add model on block with theta values + self.create_model( + mod=mod, + model_option=ModelOptionLib.stage1, + theta=self.param, + **self.args, + ) + else: + # Add model on block without theta values + self.create_model(mod=mod, + model_option=ModelOptionLib.stage1, + **self.args) + + else: + # Create a global model + mod = self.create_model(**self.args) + + # Set for block/scenarios + mod.scenario = pyo.Set(initialize=self.scenario_data.scenario_indices) # Fix parameter values in the copy of the stage1 model (if they exist) for par in self.param: @@ -635,21 +680,33 @@ def block_build(b, s): # create block scenarios # idea: check if create_model takes theta as an optional input, if so, pass parameter values to create_model - if pass_theta_to_initialize: - # Grab the values of theta for this scenario/block - theta_initialize = self.scenario_data.scenario[s] - # Add model on block with theta values - self.create_model( - mod=b, - model_option=ModelOptionLib.stage2, - theta=theta_initialize, - **self.args, - ) + # TODO: Check if this is correct syntax for adding a model to a block + + if self._original_create_model_interface: + if pass_theta_to_initialize: + # Grab the values of theta for this scenario/block + theta_initialize = self.scenario_data.scenario[s] + # Add model on block with theta values + self.create_model( + mod=b, + model_option=ModelOptionLib.stage2, + theta=theta_initialize, + **self.args, + ) + else: + # Otherwise add model on block without theta values + self.create_model( + mod=b, model_option=ModelOptionLib.stage2, **self.args + ) else: - # Otherwise add model on block without theta values - self.create_model( - mod=b, model_option=ModelOptionLib.stage2, **self.args - ) + # Add model on block + if pass_theta_to_initialize: + # Grab the values of theta for this scenario/block + theta_initialize = self.scenario_data.scenario[s] + # This syntax is not yet correct :( + b = self.create_model(theta=theta_initialize, **self.args) + else: + b = self.create_model(**self.args) # fix parameter values to perturbed values for par in self.param: diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 450a2800ae1..4fbc979041e 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -40,8 +40,10 @@ from pyomo.common.dependencies import numpy as np -def create_model(mod=None, model_option="stage2"): +def create_model(): + # This is the old Pyomo.DoE interface + ''' model_option = ModelOptionLib(model_option) model = mod @@ -59,6 +61,10 @@ def create_model(mod=None, model_option="stage2"): raise ValueError( "model_option needs to be defined as parmest, stage1, or stage2." ) + ''' + + # This is the streamlined Pyomo.DoE interface + model = pyo.ConcreteModel() # Rate constants model.k1 = pyo.Var(initialize=5.0 / 6.0, within=pyo.PositiveReals) # min^-1 @@ -102,8 +108,11 @@ def create_model(mod=None, model_option="stage2"): expr=(0 == -model.sv * model.cd + model.k3 * model.ca**2.0) ) + ''' if return_m: return model + ''' + return model def main(): From ffefdef8217e62b92c0e27f63d20dae50ab923e5 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Fri, 24 May 2024 21:27:01 -0400 Subject: [PATCH 057/113] Added and tested support for a "slim" create model interface. This will make the workshop examples much easier. --- pyomo/contrib/doe/doe.py | 21 +++++--- pyomo/contrib/doe/examples/reactor_design.py | 55 +++++++++++++------- pyomo/contrib/doe/tests/test_example.py | 14 +++-- 3 files changed, 61 insertions(+), 29 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index d8f1781051c..47fee913ba8 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -680,8 +680,6 @@ def block_build(b, s): # create block scenarios # idea: check if create_model takes theta as an optional input, if so, pass parameter values to create_model - # TODO: Check if this is correct syntax for adding a model to a block - if self._original_create_model_interface: if pass_theta_to_initialize: # Grab the values of theta for this scenario/block @@ -698,22 +696,28 @@ def block_build(b, s): self.create_model( mod=b, model_option=ModelOptionLib.stage2, **self.args ) + + # save block in a temporary variable + mod_ = b else: # Add model on block if pass_theta_to_initialize: # Grab the values of theta for this scenario/block theta_initialize = self.scenario_data.scenario[s] - # This syntax is not yet correct :( - b = self.create_model(theta=theta_initialize, **self.args) + mod_ = self.create_model(theta=theta_initialize, **self.args) else: - b = self.create_model(**self.args) + mod_ = self.create_model(**self.args) # fix parameter values to perturbed values for par in self.param: cuid = pyo.ComponentUID(par) - var = cuid.find_component_on(b) + var = cuid.find_component_on(mod_) var.fix(self.scenario_data.scenario[s][par]) + if not self._original_create_model_interface: + # for the "new"/"slim" interface, we need to add the block to the model + return mod_ + mod.block = pyo.Block(mod.scenario, rule=block_build) # discretize the model @@ -1377,7 +1381,10 @@ def _solve_doe(self, m, fix=False, opt_option=None): # either fix or unfix the design variables mod = self._fix_design( - m, self.design_values, fix_opt=fix, optimize_option=opt_option + m, + self.design_values, + fix_opt=fix, + optimize_option=opt_option ) # if user gives solver, use this solver. if not, use default IPOPT solver diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 4fbc979041e..5c1ecb8a79d 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -40,10 +40,7 @@ from pyomo.common.dependencies import numpy as np -def create_model(): - - # This is the old Pyomo.DoE interface - ''' +def create_model_legacy(mod=None, model_option=None): model_option = ModelOptionLib(model_option) model = mod @@ -61,10 +58,18 @@ def create_model(): raise ValueError( "model_option needs to be defined as parmest, stage1, or stage2." ) - ''' + + model = _create_model_details(model) + + if return_m: + return model + - # This is the streamlined Pyomo.DoE interface +def create_model(): model = pyo.ConcreteModel() + return _create_model_details(model) + +def _create_model_details(model): # Rate constants model.k1 = pyo.Var(initialize=5.0 / 6.0, within=pyo.PositiveReals) # min^-1 @@ -108,14 +113,10 @@ def create_model(): expr=(0 == -model.sv * model.cd + model.k3 * model.ca**2.0) ) - ''' - if return_m: - return model - ''' return model -def main(): +def main(legacy_create_model_interface=False): # measurement object measurements = MeasurementVariables() @@ -145,32 +146,50 @@ def main(): theta_values = {"k1": 5.0 / 6.0, "k2": 5.0 / 3.0, "k3": 1.0 / 6000.0} + if legacy_create_model_interface: + create_model_ = create_model_legacy + else: + create_model_ = create_model + doe1 = DesignOfExperiments( - theta_values, exp_design, measurements, create_model, prior_FIM=None + theta_values, + exp_design, + measurements, + create_model_, + prior_FIM=None ) + + result = doe1.compute_FIM( mode="sequential_finite", # calculation mode scale_nominal_param_value=True, # scale nominal parameter value formula="central", # formula for finite difference ) + doe1.model.pprint() + result.result_analysis() + # print("FIM =\n",result.FIM) + # print("jac =\n",result.jaco_information) # print("log10 Trace of FIM: ", np.log10(result.trace)) # print("log10 Determinant of FIM: ", np.log10(result.det)) # test result - relative_error_trace = abs(np.log10(result.trace) - 6.815) + expected_log10_trace = 6.815 + log10_trace = np.log10(result.trace) + relative_error_trace = abs(log10_trace - 6.815) assert ( relative_error_trace < 0.01 - ), "log10(tr(FIM)) regression test failed, answer does not match previous result" + ), "log10(tr(FIM)) regression test failed, answer "+str(round(log10_trace,3))+" does not match expected answer of "+str(expected_log10_trace) - relative_error_det = abs(np.log10(result.det) - 18.719) + expected_log10_det = 18.719 + log10_det = np.log10(result.det) + relative_error_det = abs(log10_det - 18.719) assert ( relative_error_det < 0.01 - ), "log10(det(FIM)) regression test failed, answer does not match previous result" - + ), "log10(det(FIM)) regression test failed, answer "+str(round(log10_det,3))+" does not match expected answer of "+str(expected_log10_det) if __name__ == "__main__": - main() + main(legacy_create_model_interface=False) diff --git a/pyomo/contrib/doe/tests/test_example.py b/pyomo/contrib/doe/tests/test_example.py index c92725efd3b..d9fb5e39ed4 100644 --- a/pyomo/contrib/doe/tests/test_example.py +++ b/pyomo/contrib/doe/tests/test_example.py @@ -68,11 +68,17 @@ def test_reactor_grid_search(self): @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") @unittest.skipIf(not pandas_available, "pandas is not available") @unittest.skipIf(not numpy_available, "Numpy is not available") - def test_reactor_design(self): + def test_reactor_design_slim_create_model_interface(self): from pyomo.contrib.doe.examples import reactor_design - - reactor_design.main() - + reactor_design.main(legacy_create_model_interface=False) + + @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") + @unittest.skipIf(not pandas_available, "pandas is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + + def test_reactor_design_legacy_create_model_interface(self): + from pyomo.contrib.doe.examples import reactor_design + reactor_design.main(legacy_create_model_interface=True) if __name__ == "__main__": unittest.main() From 6b43724ce21480163e3f31589b001cf3c129d9ca Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Fri, 24 May 2024 21:39:41 -0400 Subject: [PATCH 058/113] Ran black --- pyomo/contrib/doe/doe.py | 16 ++++----- pyomo/contrib/doe/examples/reactor_design.py | 34 +++++++++++--------- pyomo/contrib/doe/tests/test_example.py | 6 ++-- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 47fee913ba8..4ae57bb1030 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -44,6 +44,7 @@ import pyomo.contrib.parmest.utils as utils + class CalculationMode(Enum): sequential_finite = "sequential_finite" direct_kaug = "direct_kaug" @@ -118,7 +119,9 @@ def __init__( # check if create model function conforms to the original # Pyomo.DoE interface - model_option_arg = "model_option" in inspect.getfullargspec(self.create_model).args + model_option_arg = ( + "model_option" in inspect.getfullargspec(self.create_model).args + ) mod_arg = "mod" in inspect.getfullargspec(self.create_model).args if model_option_arg and mod_arg: self._original_create_model_interface = True @@ -656,9 +659,9 @@ def _create_block(self): ) else: # Add model on block without theta values - self.create_model(mod=mod, - model_option=ModelOptionLib.stage1, - **self.args) + self.create_model( + mod=mod, model_option=ModelOptionLib.stage1, **self.args + ) else: # Create a global model @@ -1381,10 +1384,7 @@ def _solve_doe(self, m, fix=False, opt_option=None): # either fix or unfix the design variables mod = self._fix_design( - m, - self.design_values, - fix_opt=fix, - optimize_option=opt_option + m, self.design_values, fix_opt=fix, optimize_option=opt_option ) # if user gives solver, use this solver. if not, use default IPOPT solver diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 5c1ecb8a79d..82aa33bb5a9 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -58,17 +58,18 @@ def create_model_legacy(mod=None, model_option=None): raise ValueError( "model_option needs to be defined as parmest, stage1, or stage2." ) - + model = _create_model_details(model) - + if return_m: return model - + def create_model(): model = pyo.ConcreteModel() return _create_model_details(model) + def _create_model_details(model): # Rate constants @@ -152,15 +153,9 @@ def main(legacy_create_model_interface=False): create_model_ = create_model doe1 = DesignOfExperiments( - theta_values, - exp_design, - measurements, - create_model_, - prior_FIM=None + theta_values, exp_design, measurements, create_model_, prior_FIM=None ) - - result = doe1.compute_FIM( mode="sequential_finite", # calculation mode scale_nominal_param_value=True, # scale nominal parameter value @@ -180,16 +175,23 @@ def main(legacy_create_model_interface=False): expected_log10_trace = 6.815 log10_trace = np.log10(result.trace) relative_error_trace = abs(log10_trace - 6.815) - assert ( - relative_error_trace < 0.01 - ), "log10(tr(FIM)) regression test failed, answer "+str(round(log10_trace,3))+" does not match expected answer of "+str(expected_log10_trace) + assert relative_error_trace < 0.01, ( + "log10(tr(FIM)) regression test failed, answer " + + str(round(log10_trace, 3)) + + " does not match expected answer of " + + str(expected_log10_trace) + ) expected_log10_det = 18.719 log10_det = np.log10(result.det) relative_error_det = abs(log10_det - 18.719) - assert ( - relative_error_det < 0.01 - ), "log10(det(FIM)) regression test failed, answer "+str(round(log10_det,3))+" does not match expected answer of "+str(expected_log10_det) + assert relative_error_det < 0.01, ( + "log10(det(FIM)) regression test failed, answer " + + str(round(log10_det, 3)) + + " does not match expected answer of " + + str(expected_log10_det) + ) + if __name__ == "__main__": main(legacy_create_model_interface=False) diff --git a/pyomo/contrib/doe/tests/test_example.py b/pyomo/contrib/doe/tests/test_example.py index d9fb5e39ed4..635bc3ed82e 100644 --- a/pyomo/contrib/doe/tests/test_example.py +++ b/pyomo/contrib/doe/tests/test_example.py @@ -70,15 +70,17 @@ def test_reactor_grid_search(self): @unittest.skipIf(not numpy_available, "Numpy is not available") def test_reactor_design_slim_create_model_interface(self): from pyomo.contrib.doe.examples import reactor_design + reactor_design.main(legacy_create_model_interface=False) - + @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") @unittest.skipIf(not pandas_available, "pandas is not available") @unittest.skipIf(not numpy_available, "Numpy is not available") - def test_reactor_design_legacy_create_model_interface(self): from pyomo.contrib.doe.examples import reactor_design + reactor_design.main(legacy_create_model_interface=True) + if __name__ == "__main__": unittest.main() From 8cbc854580d3bc0e5871396da7943b3669fc2b85 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sat, 25 May 2024 19:59:38 -0400 Subject: [PATCH 059/113] Added optimization regression test for reactor design example --- pyomo/contrib/doe/examples/reactor_design.py | 47 ++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 82aa33bb5a9..3fc5d805c12 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -192,6 +192,53 @@ def main(legacy_create_model_interface=False): + str(expected_log10_det) ) + doe2 = DesignOfExperiments( + theta_values, + exp_design, + measurements, + create_model_, + prior_FIM=None + ) + + square_result2, optimize_result2 = doe2.stochastic_program( + if_optimize=True, + if_Cholesky=True, + scale_nominal_param_value=True, + objective_option="det", + jac_initial=result.jaco_information.copy(), + step = 0.1 + ) + + optimize_result2.result_analysis() + log_det = np.log(optimize_result2.det) + print("log(det) = ",round(log_det,3)) + log_det_expected = 45.199 + assert abs(log_det - log_det_expected) < 0.01, "log(det) regression test failed" + + doe3 = DesignOfExperiments( + theta_values, + exp_design, + measurements, + create_model_, + prior_FIM=None + ) + + square_result3, optimize_result3 = doe3.stochastic_program( + if_optimize=True, + scale_nominal_param_value=True, + objective_option="trace", + jac_initial=result.jaco_information.copy(), + step = 0.1 + ) + + optimize_result3.result_analysis() + log_trace = np.log(optimize_result3.trace) + log_trace_expected = 17.29 + print("log(trace) = ",round(log_trace,3)) + assert abs(log_trace - log_trace_expected) < 0.01, "log(trace) regression test failed" + + + if __name__ == "__main__": main(legacy_create_model_interface=False) From 27ea04185c739511dd04bb78dd823f8517e473f1 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sat, 25 May 2024 19:59:58 -0400 Subject: [PATCH 060/113] Ran black --- pyomo/contrib/doe/examples/reactor_design.py | 26 +++++++------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 3fc5d805c12..55e23c4a955 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -193,11 +193,7 @@ def main(legacy_create_model_interface=False): ) doe2 = DesignOfExperiments( - theta_values, - exp_design, - measurements, - create_model_, - prior_FIM=None + theta_values, exp_design, measurements, create_model_, prior_FIM=None ) square_result2, optimize_result2 = doe2.stochastic_program( @@ -206,21 +202,17 @@ def main(legacy_create_model_interface=False): scale_nominal_param_value=True, objective_option="det", jac_initial=result.jaco_information.copy(), - step = 0.1 + step=0.1, ) optimize_result2.result_analysis() log_det = np.log(optimize_result2.det) - print("log(det) = ",round(log_det,3)) + print("log(det) = ", round(log_det, 3)) log_det_expected = 45.199 assert abs(log_det - log_det_expected) < 0.01, "log(det) regression test failed" doe3 = DesignOfExperiments( - theta_values, - exp_design, - measurements, - create_model_, - prior_FIM=None + theta_values, exp_design, measurements, create_model_, prior_FIM=None ) square_result3, optimize_result3 = doe3.stochastic_program( @@ -228,17 +220,17 @@ def main(legacy_create_model_interface=False): scale_nominal_param_value=True, objective_option="trace", jac_initial=result.jaco_information.copy(), - step = 0.1 + step=0.1, ) optimize_result3.result_analysis() log_trace = np.log(optimize_result3.trace) log_trace_expected = 17.29 - print("log(trace) = ",round(log_trace,3)) - assert abs(log_trace - log_trace_expected) < 0.01, "log(trace) regression test failed" + print("log(trace) = ", round(log_trace, 3)) + assert ( + abs(log_trace - log_trace_expected) < 0.01 + ), "log(trace) regression test failed" - - if __name__ == "__main__": main(legacy_create_model_interface=False) From 9bed23cc6aaf42f458b6c53423ba1f1743f052c2 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sat, 25 May 2024 20:05:06 -0400 Subject: [PATCH 061/113] Easiest implementation of exploiting symmetry. For the reactor design example, this did not change the D-opt iterations but reduced the number needed for A-opt from ~84 to ~54. --- pyomo/contrib/doe/doe.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 4ae57bb1030..9033ed28cf7 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1175,17 +1175,21 @@ def fim_rule(m, p, q): p: parameter q: parameter """ - return ( - m.fim[p, q] - == sum( - 1 - / self.measurement_vars.variance[n] - * m.sensitivity_jacobian[p, n] - * m.sensitivity_jacobian[q, n] - for n in model.measured_variables + + if p > q: + return m.fim[p, q] == m.fim[q, p] + else: + return ( + m.fim[p, q] + == sum( + 1 + / self.measurement_vars.variance[n] + * m.sensitivity_jacobian[p, n] + * m.sensitivity_jacobian[q, n] + for n in model.measured_variables + ) + + m.priorFIM[p, q] * self.fim_scale_constant_value ) - + m.priorFIM[p, q] * self.fim_scale_constant_value - ) model.jacobian_constraint = pyo.Constraint( model.regression_parameters, model.measured_variables, rule=jacobian_rule From bfa597e9b920cf6a7cb9acee5f3650af9fb82b4f Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sat, 25 May 2024 20:39:13 -0400 Subject: [PATCH 062/113] Added option to only compute lower elements of FIM. --- pyomo/contrib/doe/doe.py | 19 +++++++++++++++++-- pyomo/contrib/doe/examples/reactor_design.py | 5 +++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 9033ed28cf7..752479be269 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -74,6 +74,7 @@ def __init__( discretize_model=None, args=None, logger_level=logging.INFO, + only_compute_fim_lower=True, ): """ This package enables model-based design of experiments analysis with Pyomo. @@ -106,6 +107,8 @@ def __init__( Additional arguments for the create_model function. logger_level: Specify the level of the logger. Change to logging.DEBUG for all messages. + only_compute_fim_lower: + If True, only the lower triangle of the FIM is computed. Default is True. """ # parameters @@ -157,6 +160,8 @@ def __init__( self.logger = logging.getLogger(__name__) self.logger.setLevel(level=logger_level) + self.only_compute_fim_lower = only_compute_fim_lower + def _check_inputs(self): """ Check if the prior FIM is N*N matrix, where N is the number of parameter @@ -342,6 +347,7 @@ def compute_FIM( extract_single_model=None, formula="central", step=0.001, + only_compute_fim_lower=False, ): """ This function calculates the Fisher information matrix (FIM) using sensitivity information obtained @@ -1019,6 +1025,12 @@ def _create_doe_model(self, no_obj=True): ------- model: the DOE model """ + + # Developer recommendation: use the Cholesky decomposition for D-optimality + # The explicit formula is available for benchmarking purposes and is NOT recommended + if self.only_compute_fim_lower and self.objective_option == ObjectiveLib.det and not self.Cholesky_option: + raise ValueError("Cannot compute determinant with explicit formula if only_compute_fim_lower is True.") + model = self._create_block() # variables for jacobian and FIM @@ -1177,7 +1189,10 @@ def fim_rule(m, p, q): """ if p > q: - return m.fim[p, q] == m.fim[q, p] + if self.only_compute_fim_lower: + return pyo.Constraint.Skip + else: + return m.fim[p, q] == m.fim[q, p] else: return ( m.fim[p, q] @@ -1260,7 +1275,7 @@ def trace_calc(m): return m.trace == sum(m.fim[j, j] for j in m.regression_parameters) def det_general(m): - r"""Calculate determinant. Can be applied to FIM of any size. + """Calculate determinant. Can be applied to FIM of any size. det(A) = sum_{\sigma \in \S_n} (sgn(\sigma) * \Prod_{i=1}^n a_{i,\sigma_i}) Use permutation() to get permutations, sgn() to get signature """ diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 55e23c4a955..0fed262d74f 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -162,7 +162,7 @@ def main(legacy_create_model_interface=False): formula="central", # formula for finite difference ) - doe1.model.pprint() + # doe1.model.pprint() result.result_analysis() @@ -208,7 +208,8 @@ def main(legacy_create_model_interface=False): optimize_result2.result_analysis() log_det = np.log(optimize_result2.det) print("log(det) = ", round(log_det, 3)) - log_det_expected = 45.199 + #log_det_expected = 45.199 + log_det_expected = 44.362 assert abs(log_det - log_det_expected) < 0.01, "log(det) regression test failed" doe3 = DesignOfExperiments( From c7c47a92b493ade3ea512a9e53f025c90042415b Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sat, 25 May 2024 20:48:15 -0400 Subject: [PATCH 063/113] Changed objective scaling from log to log10. This is easier to interpret. --- pyomo/contrib/doe/doe.py | 6 +++--- pyomo/contrib/doe/examples/reactor_design.py | 9 ++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 752479be269..ef4a7c7c194 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1311,7 +1311,7 @@ def det_general(m): m.regression_parameters, m.regression_parameters, rule=cholesky_imp ) m.Obj = pyo.Objective( - expr=2 * sum(pyo.log(m.L_ele[j, j]) for j in m.regression_parameters), + expr=2 * sum(pyo.log10(m.L_ele[j, j]) for j in m.regression_parameters), sense=pyo.maximize, ) @@ -1319,13 +1319,13 @@ def det_general(m): # if not cholesky but determinant, calculating det and evaluate the OBJ with det m.det = pyo.Var(initialize=np.linalg.det(fim), bounds=(small_number, None)) m.det_rule = pyo.Constraint(rule=det_general) - m.Obj = pyo.Objective(expr=pyo.log(m.det), sense=pyo.maximize) + m.Obj = pyo.Objective(expr=pyo.log10(m.det), sense=pyo.maximize) elif self.objective_option == ObjectiveLib.trace: # if not determinant or cholesky, calculating the OBJ with trace m.trace = pyo.Var(initialize=np.trace(fim), bounds=(small_number, None)) m.trace_rule = pyo.Constraint(rule=trace_calc) - m.Obj = pyo.Objective(expr=pyo.log(m.trace), sense=pyo.maximize) + m.Obj = pyo.Objective(expr=pyo.log10(m.trace), sense=pyo.maximize) # m.Obj = pyo.Objective(expr=m.trace, sense=pyo.maximize) elif self.objective_option == ObjectiveLib.zero: diff --git a/pyomo/contrib/doe/examples/reactor_design.py b/pyomo/contrib/doe/examples/reactor_design.py index 0fed262d74f..67d6ff02fd2 100644 --- a/pyomo/contrib/doe/examples/reactor_design.py +++ b/pyomo/contrib/doe/examples/reactor_design.py @@ -206,10 +206,9 @@ def main(legacy_create_model_interface=False): ) optimize_result2.result_analysis() - log_det = np.log(optimize_result2.det) + log_det = np.log10(optimize_result2.det) print("log(det) = ", round(log_det, 3)) - #log_det_expected = 45.199 - log_det_expected = 44.362 + log_det_expected = 19.266 assert abs(log_det - log_det_expected) < 0.01, "log(det) regression test failed" doe3 = DesignOfExperiments( @@ -225,8 +224,8 @@ def main(legacy_create_model_interface=False): ) optimize_result3.result_analysis() - log_trace = np.log(optimize_result3.trace) - log_trace_expected = 17.29 + log_trace = np.log10(optimize_result3.trace) + log_trace_expected = 7.509 print("log(trace) = ", round(log_trace, 3)) assert ( abs(log_trace - log_trace_expected) < 0.01 From 48350655cd1b2e0b15909e976dfd1ddb097ada7d Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sat, 25 May 2024 20:49:15 -0400 Subject: [PATCH 064/113] Ran black --- pyomo/contrib/doe/doe.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index ef4a7c7c194..d7d7cbd1395 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1028,8 +1028,14 @@ def _create_doe_model(self, no_obj=True): # Developer recommendation: use the Cholesky decomposition for D-optimality # The explicit formula is available for benchmarking purposes and is NOT recommended - if self.only_compute_fim_lower and self.objective_option == ObjectiveLib.det and not self.Cholesky_option: - raise ValueError("Cannot compute determinant with explicit formula if only_compute_fim_lower is True.") + if ( + self.only_compute_fim_lower + and self.objective_option == ObjectiveLib.det + and not self.Cholesky_option + ): + raise ValueError( + "Cannot compute determinant with explicit formula if only_compute_fim_lower is True." + ) model = self._create_block() From 67afe32c30d89c12efadd2922b2af0f4f944821f Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Sun, 26 May 2024 15:32:41 -0400 Subject: [PATCH 065/113] Divided reaction kinetics unit tests. Changed the assert statements to check the objective instead of the optimal solution. --- pyomo/contrib/doe/tests/test_example.py | 2 +- .../contrib/doe/tests/test_reactor_example.py | 204 +++++++++--------- 2 files changed, 109 insertions(+), 97 deletions(-) diff --git a/pyomo/contrib/doe/tests/test_example.py b/pyomo/contrib/doe/tests/test_example.py index 635bc3ed82e..e4ffbe89142 100644 --- a/pyomo/contrib/doe/tests/test_example.py +++ b/pyomo/contrib/doe/tests/test_example.py @@ -41,7 +41,7 @@ ipopt_available = SolverFactory("ipopt").available() -class TestReactorExample(unittest.TestCase): +class TestReactorExamples(unittest.TestCase): @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") @unittest.skipIf(not scipy_available, "scipy is not available") @unittest.skipIf(not numpy_available, "Numpy is not available") diff --git a/pyomo/contrib/doe/tests/test_reactor_example.py b/pyomo/contrib/doe/tests/test_reactor_example.py index 3fca93b5ded..d8d28a03d76 100644 --- a/pyomo/contrib/doe/tests/test_reactor_example.py +++ b/pyomo/contrib/doe/tests/test_reactor_example.py @@ -37,10 +37,9 @@ ipopt_available = SolverFactory("ipopt").available() -class Test_example_options(unittest.TestCase): - """Test the three options in the kinetics example.""" - - def test_setUP(self): +class Test_Reaction_Kinetics_Example(unittest.TestCase): + def test_reaction_kinetics_create_model(self): + """Test the three options in the kinetics example.""" # parmest option mod = create_model(model_option="parmest") @@ -56,16 +55,116 @@ def test_setUP(self): create_model(model_option="stage2") with self.assertRaises(ValueError): - create_model(model_option="NotDefine") + create_model(model_option="NotDefined") + + @unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + @unittest.skipIf(not pandas_available, "Pandas is not available") + def test_kinetics_example_sequential_finite_then_optimize(self): + """Test the kinetics example with sequential_finite mode and then optimization""" + doe_object = self.specify_reaction_kinetics() + + # Test FIM calculation at nominal values + sensi_opt = "sequential_finite" + result = doe_object.compute_FIM( + mode=sensi_opt, scale_nominal_param_value=True, formula="central" + ) + result.result_analysis() + self.assertAlmostEqual(np.log10(result.trace), 2.7885, places=2) + self.assertAlmostEqual(np.log10(result.det), 2.8218, places=2) + self.assertAlmostEqual(np.log10(result.min_eig), -1.0123, places=2) + + ### check subset feature + sub_name = "C" + sub_indices = {0: ["CB", "CC"], 1: [0.125, 0.25, 0.5, 0.75, 0.875]} + + measure_subset = MeasurementVariables() + measure_subset.add_variables( + sub_name, indices=sub_indices, time_index_position=1 + ) + sub_result = result.subset(measure_subset) + sub_result.result_analysis() + self.assertAlmostEqual(np.log10(sub_result.trace), 2.5535, places=2) + self.assertAlmostEqual(np.log10(sub_result.det), 1.3464, places=2) + self.assertAlmostEqual(np.log10(sub_result.min_eig), -1.5386, places=2) -class Test_doe_object(unittest.TestCase): - """Test the kinetics example with both the sequential_finite mode and the direct_kaug mode""" + ### Test stochastic_program mode + # Prior information (scaled FIM with T=500 and T=300 experiments) + prior = np.asarray( + [ + [28.67892806, 5.41249739, -81.73674601, -24.02377324], + [5.41249739, 26.40935036, -12.41816477, -139.23992532], + [-81.73674601, -12.41816477, 240.46276004, 58.76422806], + [-24.02377324, -139.23992532, 58.76422806, 767.25584508], + ] + ) + doe_object2 = self.specify_reaction_kinetics(prior=prior) + + square_result, optimize_result = doe_object2.stochastic_program( + if_optimize=True, + if_Cholesky=True, + scale_nominal_param_value=True, + objective_option="det", + L_initial=np.linalg.cholesky(prior), + jac_initial=result.jaco_information.copy(), + tee_opt=True, + ) + + optimize_result.result_analysis() + ## 2024-May-26: changing this to test the objective instead of the optimal solution + ## It's possible the objective is flat and the optimal solution is not unique + # self.assertAlmostEqual(value(optimize_result.model.CA0[0]), 5.0, places=2) + # self.assertAlmostEqual(value(optimize_result.model.T[0.5]), 300, places=2) + self.assertAlmostEqual(np.log10(optimize_result.det), 5.744, places=2) + + square_result, optimize_result = doe_object2.stochastic_program( + if_optimize=True, + scale_nominal_param_value=True, + objective_option="trace", + jac_initial=result.jaco_information.copy(), + tee_opt=True, + ) + + optimize_result.result_analysis() + ## 2024-May-26: changing this to test the objective instead of the optimal solution + ## It's possible the objective is flat and the optimal solution is not unique + # self.assertAlmostEqual(value(optimize_result.model.CA0[0]), 5.0, places=2) + # self.assertAlmostEqual(value(optimize_result.model.T[0.5]), 300, places=2) + self.assertAlmostEqual(np.log10(optimize_result.trace), 3.340, places=2) @unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") @unittest.skipIf(not numpy_available, "Numpy is not available") @unittest.skipIf(not pandas_available, "Pandas is not available") - def test_setUP(self): + def test_kinetics_example_direct_k_aug(self): + doe_object = self.specify_reaction_kinetics() + + # Test FIM calculation at nominal values + sensi_opt = "direct_kaug" + result = doe_object.compute_FIM( + mode=sensi_opt, scale_nominal_param_value=True, formula="central" + ) + result.result_analysis() + self.assertAlmostEqual(np.log10(result.trace), 2.7211, places=2) + self.assertAlmostEqual(np.log10(result.det), 2.0845, places=2) + self.assertAlmostEqual(np.log10(result.min_eig), -1.3510, places=2) + + ### check subset feature + sub_name = "C" + sub_indices = {0: ["CB", "CC"], 1: [0.125, 0.25, 0.5, 0.75, 0.875]} + + measure_subset = MeasurementVariables() + measure_subset.add_variables( + sub_name, indices=sub_indices, time_index_position=1 + ) + sub_result = result.subset(measure_subset) + sub_result.result_analysis() + + self.assertAlmostEqual(np.log10(sub_result.trace), 2.5535, places=2) + self.assertAlmostEqual(np.log10(sub_result.det), 1.3464, places=2) + self.assertAlmostEqual(np.log10(sub_result.min_eig), -1.5386, places=2) + + def specify_reaction_kinetics(self, prior=None): ### Define inputs # Control time set [h] t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] @@ -111,9 +210,6 @@ def test_setUP(self): upper_bounds=700, ) - ### Test sequential_finite mode - sensi_opt = "sequential_finite" - design_names = exp_design.variable_names exp1 = [5, 570, 300, 300, 300, 300, 300, 300, 300, 300] exp1_design_dict = dict(zip(design_names, exp1)) @@ -126,94 +222,10 @@ def test_setUP(self): measurements, create_model, discretize_model=disc_for_measure, - ) - - result = doe_object.compute_FIM( - mode=sensi_opt, scale_nominal_param_value=True, formula="central" - ) - - result.result_analysis() - - self.assertAlmostEqual(np.log10(result.trace), 2.7885, places=2) - self.assertAlmostEqual(np.log10(result.det), 2.8218, places=2) - self.assertAlmostEqual(np.log10(result.min_eig), -1.0123, places=2) - - ### check subset feature - sub_name = "C" - sub_indices = {0: ["CB", "CC"], 1: [0.125, 0.25, 0.5, 0.75, 0.875]} - - measure_subset = MeasurementVariables() - measure_subset.add_variables( - sub_name, indices=sub_indices, time_index_position=1 - ) - sub_result = result.subset(measure_subset) - sub_result.result_analysis() - - self.assertAlmostEqual(np.log10(sub_result.trace), 2.5535, places=2) - self.assertAlmostEqual(np.log10(sub_result.det), 1.3464, places=2) - self.assertAlmostEqual(np.log10(sub_result.min_eig), -1.5386, places=2) - - ### Test direct_kaug mode - sensi_opt = "direct_kaug" - # Define a new experiment - - exp1 = [5, 570, 400, 300, 300, 300, 300, 300, 300, 300] - exp1_design_dict = dict(zip(design_names, exp1)) - exp_design.update_values(exp1_design_dict) - - doe_object = DesignOfExperiments( - parameter_dict, - exp_design, - measurements, - create_model, - discretize_model=disc_for_measure, - ) - - result = doe_object.compute_FIM( - mode=sensi_opt, scale_nominal_param_value=True, formula="central" - ) - - result.result_analysis() - - self.assertAlmostEqual(np.log10(result.trace), 2.7211, places=2) - self.assertAlmostEqual(np.log10(result.det), 2.0845, places=2) - self.assertAlmostEqual(np.log10(result.min_eig), -1.3510, places=2) - - ### Test stochastic_program mode - - exp1 = [5, 570, 300, 300, 300, 300, 300, 300, 300, 300] - exp1_design_dict = dict(zip(design_names, exp1)) - exp_design.update_values(exp1_design_dict) - - # add a prior information (scaled FIM with T=500 and T=300 experiments) - prior = np.asarray( - [ - [28.67892806, 5.41249739, -81.73674601, -24.02377324], - [5.41249739, 26.40935036, -12.41816477, -139.23992532], - [-81.73674601, -12.41816477, 240.46276004, 58.76422806], - [-24.02377324, -139.23992532, 58.76422806, 767.25584508], - ] - ) - - doe_object2 = DesignOfExperiments( - parameter_dict, - exp_design, - measurements, - create_model, prior_FIM=prior, - discretize_model=disc_for_measure, - ) - - square_result, optimize_result = doe_object2.stochastic_program( - if_optimize=True, - if_Cholesky=True, - scale_nominal_param_value=True, - objective_option="det", - L_initial=np.linalg.cholesky(prior), ) - self.assertAlmostEqual(value(optimize_result.model.CA0[0]), 5.0, places=2) - self.assertAlmostEqual(value(optimize_result.model.T[0.5]), 300, places=2) + return doe_object if __name__ == "__main__": From 94e8ebfdb28fd1afa15b688d704e5cd7086b3a16 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 27 May 2024 09:41:57 -0400 Subject: [PATCH 066/113] Fix elements above the diagonal of FIM; this ensures the correct number of degrees of freedom. --- pyomo/contrib/doe/doe.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index d7d7cbd1395..7ac754aaa3d 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1182,11 +1182,7 @@ def read_prior(m, i, j): model.regression_parameters, model.regression_parameters, rule=read_prior ) - # TODO: explore exploiting the symmetry of the FIM matrix # The off-diagonal elements are symmetric, thus only half of the elements need to be calculated - # Syntax challenge: determine the order of p and q, i.e., if p > q, then replace with - # equality constraint fim[p, q] == fim[q, p] - def fim_rule(m, p, q): """ m: Pyomo model @@ -1219,6 +1215,15 @@ def fim_rule(m, p, q): model.regression_parameters, model.regression_parameters, rule=fim_rule ) + if self.only_compute_fim_lower: + # Fix the upper half of the FIM matrix elements to be 0.0. + # This eliminates extra variables and ensures the expected number of + # degrees of freedom in the optimization problem. + for p in model.regression_parameters: + for q in model.regression_parameters: + if p > q: + model.fim[p, q].fix(0.0) + return model def _add_objective(self, m): From e80d74f6e72db362bcc3afe56264b1699fc518c5 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 27 May 2024 09:45:17 -0400 Subject: [PATCH 067/113] Updated test. --- pyomo/contrib/doe/tests/test_reactor_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/contrib/doe/tests/test_reactor_example.py b/pyomo/contrib/doe/tests/test_reactor_example.py index d8d28a03d76..aba20c77fa6 100644 --- a/pyomo/contrib/doe/tests/test_reactor_example.py +++ b/pyomo/contrib/doe/tests/test_reactor_example.py @@ -145,7 +145,7 @@ def test_kinetics_example_direct_k_aug(self): mode=sensi_opt, scale_nominal_param_value=True, formula="central" ) result.result_analysis() - self.assertAlmostEqual(np.log10(result.trace), 2.7211, places=2) + self.assertAlmostEqual(np.log10(result.trace), 2.789, places=2) self.assertAlmostEqual(np.log10(result.det), 2.0845, places=2) self.assertAlmostEqual(np.log10(result.min_eig), -1.3510, places=2) From c6655b677954c598a8dc14aa1d8c3f82ad57b035 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 27 May 2024 10:09:29 -0400 Subject: [PATCH 068/113] Updated test that uses k_aug. --- pyomo/contrib/doe/tests/test_reactor_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/contrib/doe/tests/test_reactor_example.py b/pyomo/contrib/doe/tests/test_reactor_example.py index aba20c77fa6..d00b87d3b5f 100644 --- a/pyomo/contrib/doe/tests/test_reactor_example.py +++ b/pyomo/contrib/doe/tests/test_reactor_example.py @@ -146,7 +146,7 @@ def test_kinetics_example_direct_k_aug(self): ) result.result_analysis() self.assertAlmostEqual(np.log10(result.trace), 2.789, places=2) - self.assertAlmostEqual(np.log10(result.det), 2.0845, places=2) + self.assertAlmostEqual(np.log10(result.det), 2.8247, places=2) self.assertAlmostEqual(np.log10(result.min_eig), -1.3510, places=2) ### check subset feature From c262c5fe8defdac3b4090fca5d4e88722ab4850b Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 27 May 2024 11:05:02 -0400 Subject: [PATCH 069/113] Hopefully, this is the final update to the k_aug test. --- pyomo/contrib/doe/tests/test_reactor_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/contrib/doe/tests/test_reactor_example.py b/pyomo/contrib/doe/tests/test_reactor_example.py index d00b87d3b5f..19fb4e61820 100644 --- a/pyomo/contrib/doe/tests/test_reactor_example.py +++ b/pyomo/contrib/doe/tests/test_reactor_example.py @@ -147,7 +147,7 @@ def test_kinetics_example_direct_k_aug(self): result.result_analysis() self.assertAlmostEqual(np.log10(result.trace), 2.789, places=2) self.assertAlmostEqual(np.log10(result.det), 2.8247, places=2) - self.assertAlmostEqual(np.log10(result.min_eig), -1.3510, places=2) + self.assertAlmostEqual(np.log10(result.min_eig), -1.0112, places=2) ### check subset feature sub_name = "C" From e8c098e40bfa9cc80cb0eff50f20f2040d06cdc8 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Mon, 27 May 2024 18:20:32 -0400 Subject: [PATCH 070/113] Added error check if there are no measurements. This mistake came up a few times in the tutorial. --- pyomo/contrib/doe/doe.py | 8 ++++++++ pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb | 1 + pyomo/contrib/doe/tests/test_fim_doe.py | 14 ++++++++++++++ 3 files changed, 23 insertions(+) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 7ac754aaa3d..29e45ec4f5c 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -139,6 +139,14 @@ def __init__( self.measurement_vars = measurement_vars self.measure_name = self.measurement_vars.variable_names + if ( + self.measurement_vars.variable_names is None + or not self.measurement_vars.variable_names + ): + raise ValueError( + "There are no measurement variables. Check for a modeling mistake." + ) + # check if user-defined solver is given if solver: self.solver = solver diff --git a/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb b/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb index 12d5a610db4..36ec42fbe49 100644 --- a/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb +++ b/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb @@ -87,6 +87,7 @@ "if \"google.colab\" in sys.modules:\n", " !wget \"https://raw.githubusercontent.com/IDAES/idaes-pse/main/scripts/colab_helper.py\"\n", " import colab_helper\n", + "\n", " colab_helper.install_idaes()\n", " colab_helper.install_ipopt()\n", "\n", diff --git a/pyomo/contrib/doe/tests/test_fim_doe.py b/pyomo/contrib/doe/tests/test_fim_doe.py index 05664b0a795..d9a8d60fdb4 100644 --- a/pyomo/contrib/doe/tests/test_fim_doe.py +++ b/pyomo/contrib/doe/tests/test_fim_doe.py @@ -144,6 +144,20 @@ def test_non_integer_index_keys(self): time_index_position="time", ) + def test_no_measurements(self): + """This test confirms that an error is thrown when the user forgets to add any measurements. + + It's okay to have no decision variables. With no measurement variables, the FIM is the zero matrix. + This (no measurements) is a common user mistake. + """ + + with self.assertRaises(ValueError): + decisions = DesignVariables() + measurements = MeasurementVariables() + DesignOfExperiments( + {}, decisions, measurements, create_model, disc_for_measure + ) + class TestDesignError(unittest.TestCase): def test(self): From 4957eea10eeab0c63c61883eaa68023debbd5cc9 Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Tue, 28 May 2024 08:17:26 -0400 Subject: [PATCH 071/113] Removed some LaTeX syntax from doc string. --- pyomo/contrib/doe/doe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 29e45ec4f5c..5c7fe4f6e77 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -1295,7 +1295,7 @@ def trace_calc(m): def det_general(m): """Calculate determinant. Can be applied to FIM of any size. - det(A) = sum_{\sigma \in \S_n} (sgn(\sigma) * \Prod_{i=1}^n a_{i,\sigma_i}) + det(A) = sum_{sigma in Sn} (sgn(sigma) * Prod_{i=1}^n a_{i,sigma_i}) Use permutation() to get permutations, sgn() to get signature """ r_list = list(range(len(m.regression_parameters))) From 746737b08dd9d4143e099b07b44ac70b2cf89d15 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 28 May 2024 07:51:18 -0600 Subject: [PATCH 072/113] Support simplifying/eliminating constant defined variables --- pyomo/repn/plugins/nl_writer.py | 27 ++++++++++++++++++--------- pyomo/repn/tests/ampl/test_nlv2.py | 10 ++++------ 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 9d66b37a429..21d64df2a18 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -548,7 +548,7 @@ def __init__(self, ostream, rowstream, colstream, config): self.external_functions = {} self.used_named_expressions = set() self.var_map = {} - self.var_id_to_nl = None + self.var_id_to_nl = {} self.sorter = FileDeterminism_to_SortComponents(config.file_determinism) self.visitor = AMPLRepnVisitor( self.template, @@ -1056,8 +1056,8 @@ def write(self, model): row_comments = [f'\t#{lbl}' for lbl in row_labels] col_labels = [labeler(var_map[_id]) for _id in variables] col_comments = [f'\t#{lbl}' for lbl in col_labels] - self.var_id_to_nl = { - _id: f'v{var_idx}{col_comments[var_idx]}' + id2nl = { + _id: f'v{var_idx}{col_comments[var_idx]}\n' for var_idx, _id in enumerate(variables) } # Write out the .row and .col data @@ -1070,10 +1070,12 @@ def write(self, model): else: row_labels = row_comments = [''] * (n_cons + n_objs) col_labels = col_comments = [''] * len(variables) - self.var_id_to_nl = { - _id: f"v{var_idx}" for var_idx, _id in enumerate(variables) - } + id2nl = {_id: f"v{var_idx}\n" for var_idx, _id in enumerate(variables)} + if self.var_id_to_nl: + self.var_id_to_nl.update(id2nl) + else: + self.var_id_to_nl = id2nl _vmap = self.var_id_to_nl if scale_model: template = self.template @@ -1934,7 +1936,7 @@ def _linear_presolve( # nonlinear portion of a defined variable is a constant # expression. This may now be the case if all the variables in # the original nonlinear expression have been fixed. - for expr, info, _ in self.subexpression_cache.values(): + for _id, (expr, info, sub) in self.subexpression_cache.items(): if not info.nonlinear: continue nl, args = info.nonlinear @@ -1949,12 +1951,19 @@ def _linear_presolve( # guarantee that the user actually initialized the # variables. So, we will fall back on parsing the (now # constant) nonlinear fragment and evaluating it. - if info.linear is None: - info.linear = {} info.nonlinear = None info.const += _evaluate_constant_nl( nl % tuple(template.const % eliminated_vars[i].const for i in args) ) + if not info.linear: + # This has resolved to a constant: the ASL will fail for + # defined variables containing ONLY a constant. We + # need to substitute the constant directly into the + # original constraint/objective expression(s) + info.linear = {} + self.used_named_expressions.discard(_id) + self.var_id_to_nl[_id] = self.template.const % info.const + self.subexpression_cache[_id] = (expr, info, [None, None, True]) return eliminated_cons, eliminated_vars diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 09936b45bbc..b33bf6963dc 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2393,16 +2393,14 @@ def test_presolve_fixes_nl_defined_variables(self): 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) 1 0 #nonzeros in Jacobian, obj. gradient 2 1 #max name lengths: constraints, variables - 0 0 0 2 0 #common exprs: b,c,o,c1,o1 -V1 0 1 #nl(e) -n19 -V2 1 1 #e + 0 0 0 1 0 #common exprs: b,c,o,c1,o1 +V1 1 1 #e 0 1 -v1 #nl(e) +n19 C0 #c1 o2 #* n3 -v2 #e +v1 #e x0 #initial guess r #1 ranges (rhs's) 2 0 #c1 From 6aa55f2bf5990eae0549cd1b3cd03cb0e8f102ef Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 28 May 2024 07:52:10 -0600 Subject: [PATCH 073/113] Simplify handling of NL file newlines around var identifiers --- pyomo/repn/plugins/nl_writer.py | 17 ++++++++++------- pyomo/repn/tests/ampl/test_nlv2.py | 28 ++++++++++++++-------------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 21d64df2a18..e542a177247 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1097,9 +1097,7 @@ def write(self, model): ub *= scale var_bounds[_id] = lb, ub # Update _vmap to output scaled variables in NL expressions - _vmap[_id] = ( - template.division + _vmap[_id] + '\n' + template.const % scale - ).rstrip() + _vmap[_id] = template.division + _vmap[_id] + template.const % scale # Update any eliminated variables to point to the (potentially # scaled) substituted variables @@ -1133,7 +1131,7 @@ def write(self, model): "linear subsystem that was removed from the model. " f"Setting '{var_map[_i]}' == {val}" ) - _vmap[_id] = nl.rstrip() % tuple(_vmap[_i] for _i in args) + _vmap[_id] = nl % tuple(_vmap[_i] for _i in args) r_lines = [None] * n_cons for idx, (con, expr_info, lb, ub) in enumerate(constraints): @@ -2020,7 +2018,7 @@ def _write_v_line(self, expr_id, k): lbl = '\t#%s' % info[0].name else: lbl = '' - self.var_id_to_nl[expr_id] = f"v{self.next_V_line_id}{lbl}" + self.var_id_to_nl[expr_id] = f"v{self.next_V_line_id}{lbl}\n" # Do NOT write out 0 coefficients here: doing so fouls up the # ASL's logic for calculating derivatives, leading to 'nan' in # the Hessian results. @@ -2348,7 +2346,9 @@ class text_nl_debug_template(object): less_equal = 'o23\t# le\n' equality = 'o24\t# eq\n' external_fcn = 'f%d %d%s\n' - var = '%s\n' # NOTE: to support scaling, we do NOT include the 'v' here + # NOTE: to support scaling and substitutions, we do NOT include the + # 'v' or the EOL here: + var = '%s' const = 'n%r\n' string = 'h%d:%s\n' monomial = product + const + var.replace('%', '%%') @@ -2392,7 +2392,10 @@ class text_nl_debug_template(object): def _strip_template_comments(vars_, base_): - vars_['unary'] = {k: v[: v.find('\t#')] + '\n' for k, v in base_.unary.items()} + vars_['unary'] = { + k: v[: v.find('\t#')] + '\n' if v[-1] == '\n' else '' + for k, v in base_.unary.items() + } for k, v in base_.__dict__.items(): if type(v) is str and '\t#' in v: v_lines = v.split('\n') diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index b33bf6963dc..9318f1c5d0f 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -98,7 +98,7 @@ def test_divide(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o5\n%s\nn2\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o5\n%sn2\n', [id(m.x)])) m.p = 2 @@ -150,7 +150,7 @@ def test_divide(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o2\nn0.5\no5\n%s\nn2\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o2\nn0.5\no5\n%sn2\n', [id(m.x)])) info = INFO() with LoggingIntercept() as LOG: @@ -160,7 +160,7 @@ def test_divide(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o3\no43\n%s\n%s\n', [id(m.x), id(m.x)])) + self.assertEqual(repn.nonlinear, ('o3\no43\n%s%s', [id(m.x), id(m.x)])) def test_errors_divide_by_0(self): m = ConcreteModel() @@ -255,7 +255,7 @@ def test_pow(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o5\n%s\nn2\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o5\n%sn2\n', [id(m.x)])) m.p = 1 info = INFO() @@ -542,7 +542,7 @@ def test_errors_propagate_nan(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, InvalidNumber(None)) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear[0], 'o16\no2\no2\n%s\n%s\n%s\n') + self.assertEqual(repn.nonlinear[0], 'o16\no2\no2\n%s%s%s') self.assertEqual(repn.nonlinear[1], [id(m.z[2]), id(m.z[3]), id(m.z[4])]) m.z[3].fix(float('nan')) @@ -592,7 +592,7 @@ def test_eval_pow(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o5\n%s\nn0.5\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o5\n%sn0.5\n', [id(m.x)])) m.x.fix() info = INFO() @@ -617,7 +617,7 @@ def test_eval_abs(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o15\n%s\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o15\n%s', [id(m.x)])) m.x.fix() info = INFO() @@ -642,7 +642,7 @@ def test_eval_unary_func(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o43\n%s\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o43\n%s', [id(m.x)])) m.x.fix() info = INFO() @@ -671,7 +671,7 @@ def test_eval_expr_if_lessEq(self): self.assertEqual(repn.linear, {}) self.assertEqual( repn.nonlinear, - ('o35\no23\n%s\nn4\no5\n%s\nn2\n%s\n', [id(m.x), id(m.x), id(m.y)]), + ('o35\no23\n%sn4\no5\n%sn2\n%s', [id(m.x), id(m.x), id(m.y)]), ) m.x.fix() @@ -712,7 +712,7 @@ def test_eval_expr_if_Eq(self): self.assertEqual(repn.linear, {}) self.assertEqual( repn.nonlinear, - ('o35\no24\n%s\nn4\no5\n%s\nn2\n%s\n', [id(m.x), id(m.x), id(m.y)]), + ('o35\no24\n%sn4\no5\n%sn2\n%s', [id(m.x), id(m.x), id(m.y)]), ) m.x.fix() @@ -754,7 +754,7 @@ def test_eval_expr_if_ranged(self): self.assertEqual( repn.nonlinear, ( - 'o35\no21\no23\nn1\n%s\no23\n%s\nn4\no5\n%s\nn2\n%s\n', + 'o35\no21\no23\nn1\n%so23\n%sn4\no5\n%sn2\n%s', [id(m.x), id(m.x), id(m.x), id(m.y)], ), ) @@ -815,7 +815,7 @@ class CustomExpression(ScalarExpression): self.assertEqual(len(info.subexpression_cache), 1) obj, repn, info = info.subexpression_cache[id(m.e)] self.assertIs(obj, m.e) - self.assertEqual(repn.nl, ('%s\n', (id(m.e),))) + self.assertEqual(repn.nl, ('%s', (id(m.e),))) self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 3) self.assertEqual(repn.linear, {id(m.x): 1}) @@ -842,7 +842,7 @@ def test_nested_operator_zero_arg(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, ('o24\no3\nn1\n%s\nn0\n', [id(m.x)])) + self.assertEqual(repn.nonlinear, ('o24\no3\nn1\n%sn0\n', [id(m.x)])) def test_duplicate_shared_linear_expressions(self): # This tests an issue where AMPLRepn.duplicate() was not copying @@ -929,7 +929,7 @@ def test_AMPLRepn_to_expr(self): self.assertEqual(repn.mult, 1) self.assertEqual(repn.const, 0) self.assertEqual(repn.linear, {id(m.x[2]): 4, id(m.x[3]): 9, id(m.x[4]): 16}) - self.assertEqual(repn.nonlinear, ('o5\n%s\nn2\n', [id(m.x[2])])) + self.assertEqual(repn.nonlinear, ('o5\n%sn2\n', [id(m.x[2])])) with self.assertRaisesRegex( MouseTrap, "Cannot convert nonlinear AMPLRepn to Pyomo Expression" ): From b7145d9b238df4c64d037e5df9f4d284c8c9d0c9 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 28 May 2024 07:52:21 -0600 Subject: [PATCH 074/113] Add additional testing --- pyomo/repn/tests/ampl/test_nlv2.py | 64 ++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 9318f1c5d0f..5af34fab149 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2637,3 +2637,67 @@ def test_presolve_fixes_nl_exernal_function(self): OUT.getvalue(), ) ) + + def test_presolve_defined_var_to_const(self): + # This test is derived from a step in an IDAES initiaization + # where the presolver is able to fix enough variables to cause + # the defined variable to be reduced to a constant. We must not + # emit the defined variable (because doing so generates an error + # in the ASL) + m = ConcreteModel() + m.eq = Var(initialize=100) + m.co2 = Var() + m.n2 = Var() + m.E = Expression(expr=60 / (3 * m.co2 - 4 * m.n2 - 5)) + m.con1 = Constraint(expr=m.co2 == 6) + m.con2 = Constraint(expr=m.n2 == 7) + m.con3 = Constraint(expr=8 / m.E == m.eq) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, OUT, symbolic_solver_labels=True, linear_presolve=True + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 #problem unknown + 1 1 0 0 1 #vars, constraints, objectives, ranges, eqns + 1 0 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 0 0 0 #nonlinear vars in constraints, objectives, both + 0 0 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 1 0 #nonzeros in Jacobian, obj. gradient + 4 2 #max name lengths: constraints, variables + 0 0 0 0 0 #common exprs: b,c,o,c1,o1 +C0 #con3 +o3 #/ +n8 +n-4 +x1 #initial guess +0 100 #eq +r #1 ranges (rhs's) +4 0 #con3 +b #1 bounds (on variables) +3 #eq +k0 #intermediate Jacobian column lengths +J0 1 #con3 +0 -1 +""", + OUT.getvalue(), + ) + ) + + def test_presolve_check_invalid_monomial_constraints(self): + # This checks issue #3272 + m = ConcreteModel() + m.x = Var() + m.c = Constraint(expr=m.x == 5) + m.d = Constraint(expr=m.x >= 10) + + OUT = io.StringIO() + with self.assertRaisesRegex( + nl_writer.InfeasibleConstraintException, + r"model contains a trivially infeasible constraint 'd' " + r"\(fixed body value 5.0 outside bounds \[10, None\]\)\.", + ): + nl_writer.NLWriter().write(m, OUT, linear_presolve=True) From 4d10cdfc91f19a747ac3b2d7e6c830242de7f95d Mon Sep 17 00:00:00 2001 From: John Siirola Date: Tue, 28 May 2024 08:43:16 -0600 Subject: [PATCH 075/113] NFC: fix spelling --- pyomo/repn/tests/ampl/test_nlv2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 5af34fab149..01df5a93257 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2527,7 +2527,7 @@ def test_presolve_fixes_nl_defined_variables(self): ) ) - def test_presolve_fixes_nl_exernal_function(self): + def test_presolve_fixes_nl_external_function(self): # This tests a workaround for a bug in the ASL where external # functions with constant argument expressions are not # evaluated correctly. From cd62d5bdd12836ebd7dcf2f8301d88bc1233f2fa Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 29 May 2024 02:10:20 -0600 Subject: [PATCH 076/113] Improve resolution of constant external function / defined variable subexpressions --- pyomo/repn/plugins/nl_writer.py | 106 ++++++++++++++++++++------------ 1 file changed, 66 insertions(+), 40 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index e542a177247..3c90cf7687f 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -548,7 +548,7 @@ def __init__(self, ostream, rowstream, colstream, config): self.external_functions = {} self.used_named_expressions = set() self.var_map = {} - self.var_id_to_nl = {} + self.var_id_to_nl_map = {} self.sorter = FileDeterminism_to_SortComponents(config.file_determinism) self.visitor = AMPLRepnVisitor( self.template, @@ -622,6 +622,7 @@ def write(self, model): ostream = self.ostream linear_presolve = self.config.linear_presolve + nl_map = self.var_id_to_nl_map var_map = self.var_map initialize_var_map_from_column_order(model, self.config, var_map) timer.toc('Initialized column order', level=logging.DEBUG) @@ -752,6 +753,12 @@ def write(self, model): else: timer.toc('Processed %s constraints', len(all_constraints)) + # We have identified all the external functions (resolving them + # by name). Now we may need to resolve the function by the + # (local) FID, which we know is indexed by integers starting at + # 0. We will convert the dict to a list for efficient lookup. + self.external_functions = list(self.external_functions.values()) + # This may fetch more bounds than needed, but only in the cases # where variables were completely eliminated while walking the # expressions, or when users provide superfluous variables in @@ -766,7 +773,7 @@ def write(self, model): del lcon_by_linear_nnz # Note: defer categorizing constraints until after presolve, as - # the presolver could result in nonlinear constraints to become + # the presolver could result in nonlinear constraints becoming # linear (or trivial) constraints = [] linear_cons = [] @@ -778,10 +785,13 @@ def write(self, model): for info in _constraints: expr_info = info[1] if expr_info.nonlinear: - if expr_info.nonlinear[1]: + nl, args = expr_info.nonlinear + if any(vid not in nl_map for vid in args): constraints.append(info) continue - expr_info.const += _evaluate_constant_nl(expr_info.nonlinear[0]) + expr_info.const += _evaluate_constant_nl( + nl % tuple(nl_map[i] for i in args), self.external_functions + ) expr_info.nonlinear = None if expr_info.linear: linear_cons.append(info) @@ -1072,11 +1082,10 @@ def write(self, model): col_labels = col_comments = [''] * len(variables) id2nl = {_id: f"v{var_idx}\n" for var_idx, _id in enumerate(variables)} - if self.var_id_to_nl: - self.var_id_to_nl.update(id2nl) + if nl_map: + nl_map.update(id2nl) else: - self.var_id_to_nl = id2nl - _vmap = self.var_id_to_nl + self.var_id_to_nl_map = nl_map = id2nl if scale_model: template = self.template objective_scaling = [scaling_cache[id(info[0])] for info in objectives] @@ -1096,8 +1105,8 @@ def write(self, model): if ub is not None: ub *= scale var_bounds[_id] = lb, ub - # Update _vmap to output scaled variables in NL expressions - _vmap[_id] = template.division + _vmap[_id] + template.const % scale + # Update nl_map to output scaled variables in NL expressions + nl_map[_id] = template.division + nl_map[_id] + template.const % scale # Update any eliminated variables to point to the (potentially # scaled) substituted variables @@ -1106,7 +1115,7 @@ def write(self, model): for _i in args: # It is possible that the eliminated variable could # reference another variable that is no longer part of - # the model and therefore does not have a _vmap entry. + # the model and therefore does not have a nl_map entry. # This can happen when there is an underdetermined # independent linear subsystem and the presolve removed # all the constraints from the subsystem. Because the @@ -1114,7 +1123,7 @@ def write(self, model): # anywhere else in the model, they are not part of the # `variables` list. Implicitly "fix" it to an arbitrary # valid value from the presolved domain (see #3192). - if _i not in _vmap: + if _i not in nl_map: lb, ub = var_bounds[_i] if lb is None: lb = -inf @@ -1125,13 +1134,13 @@ def write(self, model): else: val = lb if abs(lb) < abs(ub) else ub eliminated_vars[_i] = AMPLRepn(val, {}, None) - _vmap[_i] = expr_info.compile_repn(visitor)[0] + nl_map[_i] = expr_info.compile_repn(visitor)[0] logger.warning( "presolve identified an underdetermined independent " "linear subsystem that was removed from the model. " f"Setting '{var_map[_i]}' == {val}" ) - _vmap[_id] = nl % tuple(_vmap[_i] for _i in args) + nl_map[_id] = nl % tuple(nl_map[_i] for _i in args) r_lines = [None] * n_cons for idx, (con, expr_info, lb, ub) in enumerate(constraints): @@ -1330,7 +1339,7 @@ def write(self, model): # "F" lines (external function definitions) # amplfunc_libraries = set() - for fid, fcn in sorted(self.external_functions.values()): + for fid, fcn in self.external_functions: amplfunc_libraries.add(fcn._library) ostream.write("F%d 1 -1 %s\n" % (fid, fcn._function)) @@ -1774,6 +1783,7 @@ def _linear_presolve( var_map = self.var_map substitutions_by_linear_var = defaultdict(set) template = self.template + nl_map = self.var_id_to_nl_map one_var = lcon_by_linear_nnz[1] two_var = lcon_by_linear_nnz[2] while 1: @@ -1783,6 +1793,7 @@ def _linear_presolve( b, _ = var_bounds[_id] logger.debug("NL presolve: bounds fixed %s := %s", var_map[_id], b) eliminated_vars[_id] = AMPLRepn(b, {}, None) + nl_map[_id] = template.const % b elif one_var: con_id, info = one_var.popitem() expr_info, lb = info @@ -1792,6 +1803,7 @@ def _linear_presolve( b = expr_info.const = (lb - expr_info.const) / coef logger.debug("NL presolve: substituting %s := %s", var_map[_id], b) eliminated_vars[_id] = expr_info + nl_map[_id] = template.const % b lb, ub = var_bounds[_id] if (lb is not None and lb - b > TOL) or ( ub is not None and ub - b < -TOL @@ -1929,30 +1941,31 @@ def _linear_presolve( expr_info.linear[x] += c * a elif a: expr_info.linear[x] = c * a + elif not expr_info.linear: + nl_map[resubst] = template.const % expr_info.const # Note: the ASL will (silently) produce incorrect answers if the # nonlinear portion of a defined variable is a constant # expression. This may now be the case if all the variables in # the original nonlinear expression have been fixed. for _id, (expr, info, sub) in self.subexpression_cache.items(): - if not info.nonlinear: - continue - nl, args = info.nonlinear - if not args or any( - vid not in eliminated_vars or eliminated_vars[vid].linear - for vid in args - ): - continue - # Ideally, we would just evaluate the named expression. - # However, there might be a linear portion of the named - # expression that still has free variables, and there is no - # guarantee that the user actually initialized the - # variables. So, we will fall back on parsing the (now - # constant) nonlinear fragment and evaluating it. - info.nonlinear = None - info.const += _evaluate_constant_nl( - nl % tuple(template.const % eliminated_vars[i].const for i in args) - ) + if info.nonlinear: + nl, args = info.nonlinear + # Note: 'not args' skips string arguments + # Note: 'vid in nl_map' skips eliminated + # variables and defined variables reduced to constants + if not args or any(vid not in nl_map for vid in args): + continue + # Ideally, we would just evaluate the named expression. + # However, there might be a linear portion of the named + # expression that still has free variables, and there is no + # guarantee that the user actually initialized the + # variables. So, we will fall back on parsing the (now + # constant) nonlinear fragment and evaluating it. + info.nonlinear = None + info.const += _evaluate_constant_nl( + nl % tuple(nl_map[i] for i in args), self.external_functions + ) if not info.linear: # This has resolved to a constant: the ASL will fail for # defined variables containing ONLY a constant. We @@ -1960,7 +1973,7 @@ def _linear_presolve( # original constraint/objective expression(s) info.linear = {} self.used_named_expressions.discard(_id) - self.var_id_to_nl[_id] = self.template.const % info.const + nl_map[_id] = template.const % info.const self.subexpression_cache[_id] = (expr, info, [None, None, True]) return eliminated_cons, eliminated_vars @@ -1990,18 +2003,20 @@ def _write_nl_expression(self, repn, include_const): # constant as the second argument, so we will too. nl = self.template.binary_sum + nl + self.template.const % repn.const try: - self.ostream.write(nl % tuple(map(self.var_id_to_nl.__getitem__, args))) + self.ostream.write( + nl % tuple(map(self.var_id_to_nl_map.__getitem__, args)) + ) except KeyError: final_args = [] for arg in args: - if arg in self.var_id_to_nl: - final_args.append(self.var_id_to_nl[arg]) + if arg in self.var_id_to_nl_map: + final_args.append(self.var_id_to_nl_map[arg]) else: _nl, _ids, _ = self.subexpression_cache[arg][1].compile_repn( self.visitor ) final_args.append( - _nl % tuple(map(self.var_id_to_nl.__getitem__, _ids)) + _nl % tuple(map(self.var_id_to_nl_map.__getitem__, _ids)) ) self.ostream.write(nl % tuple(final_args)) @@ -2018,7 +2033,7 @@ def _write_v_line(self, expr_id, k): lbl = '\t#%s' % info[0].name else: lbl = '' - self.var_id_to_nl[expr_id] = f"v{self.next_V_line_id}{lbl}\n" + self.var_id_to_nl_map[expr_id] = f"v{self.next_V_line_id}{lbl}\n" # Do NOT write out 0 coefficients here: doing so fouls up the # ASL's logic for calculating derivatives, leading to 'nan' in # the Hessian results. @@ -3167,7 +3182,7 @@ def finalizeResult(self, result): return ans -def _evaluate_constant_nl(nl): +def _evaluate_constant_nl(nl, external_functions): expr = nl.splitlines() stack = [] while expr: @@ -3182,6 +3197,15 @@ def _evaluate_constant_nl(nl): # skip blank lines if not tokens: continue + if tokens[0][0] == 'f': + # external function + fid, nargs = tokens + fid = int(fid[1:]) + nargs = int(nargs) + fcn_id, ef = external_functions[fid] + assert fid == fcn_id + stack.append(ef.evaluate(tuple(stack.pop() for i in range(nargs)))) + continue raise DeveloperError( f"Unsupported line format _evaluate_constant_nl() " f"(we expect each line to contain a single token): '{line}'" @@ -3205,6 +3229,8 @@ def _evaluate_constant_nl(nl): # sum) or a string argument. Preserve it as-is until later # when we know which we are expecting. stack.append(term) + elif cmd == 'h': + stack.append(term.split(':', 1)[1]) else: raise DeveloperError( f"Unsupported NL operator in _evaluate_constant_nl(): '{line}'" From 5d4ce0808ab96e8003e14983fc01f80edc9271dd Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 29 May 2024 02:10:43 -0600 Subject: [PATCH 077/113] bugfix: undefined variable --- pyomo/repn/plugins/nl_writer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 3c90cf7687f..8a41eb568e7 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -2779,8 +2779,8 @@ def handle_external_function_node(visitor, node, *args): "correctly." % ( func, - visitor.external_byFcn[func]._library, - visitor.external_byFcn[func]._library.name, + visitor.external_functions[func]._library, + visitor.external_functions[func]._library.name, node._fcn._library, node._fcn.name, ) From 2f0e57a497c9e8e9e2f1ca739d114d41cfd4115f Mon Sep 17 00:00:00 2001 From: Alex Dowling Date: Wed, 29 May 2024 09:29:34 -0400 Subject: [PATCH 078/113] Addressed feedback, ran black. --- pyomo/contrib/doe/doe.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index 5c7fe4f6e77..a120add4200 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -42,7 +42,7 @@ import inspect -import pyomo.contrib.parmest.utils as utils +from pyomo.common import DeveloperError class CalculationMode(Enum): @@ -457,11 +457,6 @@ def _sequential_finite(self, read_output, extract_single_model, store_output): # add zero (dummy/placeholder) objective function mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) - # convert params to vars - # print("self.param.keys():", self.param.keys()) - # mod = utils.convert_params_to_vars(mod, self.param.keys(), fix_vars=True) - # mod.pprint() - # solve model square_result = self._solve_doe(mod, fix=True) @@ -545,9 +540,6 @@ def _direct_kaug(self): # add zero (dummy/placeholder) objective function mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) - # convert params to vars - # mod = utils.convert_params_to_vars(mod, self.param.keys(), fix_vars=True) - # set ub and lb to parameters for par in self.param.keys(): cuid = pyo.ComponentUID(par) @@ -1294,8 +1286,8 @@ def trace_calc(m): return m.trace == sum(m.fim[j, j] for j in m.regression_parameters) def det_general(m): - """Calculate determinant. Can be applied to FIM of any size. - det(A) = sum_{sigma in Sn} (sgn(sigma) * Prod_{i=1}^n a_{i,sigma_i}) + r"""Calculate determinant. Can be applied to FIM of any size. + det(A) = \sum_{\sigma in \S_n} (sgn(\sigma) * \Prod_{i=1}^n a_{i,\sigma_i}) Use permutation() to get permutations, sgn() to get signature """ r_list = list(range(len(m.regression_parameters))) From 15088dee1f4b05e7225c5fb9cf3a58931ef1e79b Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 29 May 2024 07:38:45 -0600 Subject: [PATCH 079/113] Updating baseline to reflect improved linear constraint detection --- pyomo/repn/tests/ampl/test_nlv2.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 01df5a93257..3ca41e97ba4 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2657,11 +2657,15 @@ def test_presolve_defined_var_to_const(self): nl_writer.NLWriter().write( m, OUT, symbolic_solver_labels=True, linear_presolve=True ) + # Note that the presolve will end up recognizing con3 as a + # linear constraint; however, it does not do so until processing + # the constraints after presolve (so the constraint is not + # actually removed and the eq variable still appears in the model) self.assertEqual( *nl_diff( """g3 1 1 0 #problem unknown 1 1 0 0 1 #vars, constraints, objectives, ranges, eqns - 1 0 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb 0 0 #network constraints: nonlinear, linear 0 0 0 #nonlinear vars in constraints, objectives, both 0 0 0 1 #linear network variables; functions; arith, flags @@ -2670,13 +2674,11 @@ def test_presolve_defined_var_to_const(self): 4 2 #max name lengths: constraints, variables 0 0 0 0 0 #common exprs: b,c,o,c1,o1 C0 #con3 -o3 #/ -n8 -n-4 +n0 x1 #initial guess 0 100 #eq r #1 ranges (rhs's) -4 0 #con3 +4 2.0 #con3 b #1 bounds (on variables) 3 #eq k0 #intermediate Jacobian column lengths From 1bed2c601ec8ee515892e4981a684cec7a0a7fd0 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 29 May 2024 11:27:57 -0600 Subject: [PATCH 080/113] NFC: fix typos --- pyomo/repn/plugins/nl_writer.py | 2 +- pyomo/repn/tests/ampl/test_nlv2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 8a41eb568e7..43fd2fade68 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1946,7 +1946,7 @@ def _linear_presolve( # Note: the ASL will (silently) produce incorrect answers if the # nonlinear portion of a defined variable is a constant - # expression. This may now be the case if all the variables in + # expression. This may not be the case if all the variables in # the original nonlinear expression have been fixed. for _id, (expr, info, sub) in self.subexpression_cache.items(): if info.nonlinear: diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index 01df5a93257..7d3b499a4ae 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2639,7 +2639,7 @@ def test_presolve_fixes_nl_external_function(self): ) def test_presolve_defined_var_to_const(self): - # This test is derived from a step in an IDAES initiaization + # This test is derived from a step in an IDAES initialization # where the presolver is able to fix enough variables to cause # the defined variable to be reduced to a constant. We must not # emit the defined variable (because doing so generates an error From c212c13a4ab468ba61dd88c29322f889ca9ee150 Mon Sep 17 00:00:00 2001 From: Bethany Nicholson Date: Wed, 29 May 2024 13:29:15 -0600 Subject: [PATCH 081/113] Updating CHANGELOG in preparation for the 6.7.3 release --- CHANGELOG.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11b4ecbf785..b39165297f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,19 @@ Pyomo CHANGELOG =============== +------------------------------------------------------------------------------- +Pyomo 6.7.3 (29 May 2024) +------------------------------------------------------------------------------- + +- Core + - Deprecate `pyomo.core.plugins.transform.model.to_standard_form()` (#3265) + - Reorder definitions to avoid `NameError` in some situations (#3264) +- Testing + - Add URL checking to GHA linting job (#3259, #3261) + - Skip Windows Python 3.8 conda GHA job (#3269) +- Contributed Packages + - DoE: Bug fixes for workshop (#3267) + ------------------------------------------------------------------------------- Pyomo 6.7.2 (9 May 2024) ------------------------------------------------------------------------------- @@ -57,7 +70,7 @@ Pyomo 6.7.2 (9 May 2024) - CP: Add SequenceVar and other logical expressions for scheduling (#3227) - DoE: Bug fixes (#3245) - iis: Add minimal intractable system infeasibility diagnostics (#3172) - - incidence_analysis: Improve `solve_strongly_connected_components` + - incidence_analysis: Improve `solve_strongly_connected_components` performance for models with named expressions (#3186) - incidence_analysis: Add function to plot incidence graph in Dulmage-Mendelsohn order (#3207) From 97a6ae08aa967a7aaa6fe9ef6e42bafe1eb29eca Mon Sep 17 00:00:00 2001 From: Bethany Nicholson Date: Wed, 29 May 2024 14:37:14 -0600 Subject: [PATCH 082/113] More edits to the CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b39165297f3..f9051e80ade 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ Pyomo 6.7.3 (29 May 2024) - Core - Deprecate `pyomo.core.plugins.transform.model.to_standard_form()` (#3265) - Reorder definitions to avoid `NameError` in some situations (#3264) +- Solver Interfaces + - NLv2: Fix linear presolver with constant defined vars/external fcns (#3276) - Testing - Add URL checking to GHA linting job (#3259, #3261) - Skip Windows Python 3.8 conda GHA job (#3269) From ba2d2cac411e0000bab03eb48b16bb0d573c2fed Mon Sep 17 00:00:00 2001 From: Bethany Nicholson Date: Wed, 29 May 2024 14:42:45 -0600 Subject: [PATCH 083/113] Updating deprecation version to 6.7.3 --- pyomo/core/plugins/transform/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/core/plugins/transform/model.py b/pyomo/core/plugins/transform/model.py index 9f370c96304..8fe828854ce 100644 --- a/pyomo/core/plugins/transform/model.py +++ b/pyomo/core/plugins/transform/model.py @@ -24,7 +24,7 @@ @deprecated( "to_standard_form() is deprecated. " "Please use WriterFactory('compile_standard_form')", - version='6.7.3.dev0', + version='6.7.3', remove_in='6.8.0', ) def to_standard_form(self): From 32e6431e876b90f5268ca448581c50124d7adf68 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 29 May 2024 14:54:47 -0600 Subject: [PATCH 084/113] Update guard for pint import to possibly avoid recursion error on FreeBSD --- pyomo/contrib/viewer/tests/test_data_model_item.py | 9 ++------- pyomo/contrib/viewer/tests/test_data_model_tree.py | 9 ++------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/pyomo/contrib/viewer/tests/test_data_model_item.py b/pyomo/contrib/viewer/tests/test_data_model_item.py index 781ca25508a..d780b315044 100644 --- a/pyomo/contrib/viewer/tests/test_data_model_item.py +++ b/pyomo/contrib/viewer/tests/test_data_model_item.py @@ -46,15 +46,10 @@ from pyomo.contrib.viewer.model_browser import ComponentDataItem from pyomo.contrib.viewer.ui_data import UIData from pyomo.common.dependencies import DeferredImportError +from pyomo.core.base.units_container import pint_available -try: - x = pyo.units.m - units_available = True -except DeferredImportError: - units_available = False - -@unittest.skipIf(not units_available, "Pyomo units are not available") +@unittest.skipIf(not pint_available, "Pyomo units are not available") class TestDataModelItem(unittest.TestCase): def setUp(self): # Borrowed this test model from the trust region tests diff --git a/pyomo/contrib/viewer/tests/test_data_model_tree.py b/pyomo/contrib/viewer/tests/test_data_model_tree.py index d517c91b353..2e5c3592198 100644 --- a/pyomo/contrib/viewer/tests/test_data_model_tree.py +++ b/pyomo/contrib/viewer/tests/test_data_model_tree.py @@ -42,12 +42,7 @@ from pyomo.contrib.viewer.model_browser import ComponentDataModel import pyomo.contrib.viewer.qt as myqt from pyomo.common.dependencies import DeferredImportError - -try: - _x = pyo.units.m - units_available = True -except DeferredImportError: - units_available = False +from pyomo.core.base.units_container import pint_available available = myqt.available @@ -63,7 +58,7 @@ def __init__(*args, **kwargs): pass -@unittest.skipIf(not available or not units_available, "PyQt or units not available") +@unittest.skipIf(not available or not pint_available, "PyQt or units not available") class TestDataModel(unittest.TestCase): def setUp(self): # Borrowed this test model from the trust region tests From f12825eb98f96807cecbfc9515f41b2b8c0e9b16 Mon Sep 17 00:00:00 2001 From: Bethany Nicholson Date: Wed, 29 May 2024 15:14:28 -0600 Subject: [PATCH 085/113] More edits to the CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9051e80ade..8d1d1e45e3d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Pyomo 6.7.3 (29 May 2024) - Skip Windows Python 3.8 conda GHA job (#3269) - Contributed Packages - DoE: Bug fixes for workshop (#3267) + - viewer: Update guard for pint import (#3277) ------------------------------------------------------------------------------- Pyomo 6.7.2 (9 May 2024) From 93e5dab925115d564af09f8a33ff4099b0247083 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Wed, 29 May 2024 15:23:21 -0600 Subject: [PATCH 086/113] Finalizing 6.7.3 release files --- .coin-or/projDesc.xml | 4 ++-- RELEASE.md | 2 +- pyomo/version/info.py | 4 ++-- setup.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.coin-or/projDesc.xml b/.coin-or/projDesc.xml index 073efd968a7..d13ac8804cf 100644 --- a/.coin-or/projDesc.xml +++ b/.coin-or/projDesc.xml @@ -227,8 +227,8 @@ Carl D. Laird, Chair, Pyomo Management Committee, claird at andrew dot cmu dot e Use explicit overrides to disable use of automated version reporting. --> - 6.7.2 - 6.7.2 + 6.7.3 + 6.7.3 diff --git a/RELEASE.md b/RELEASE.md index b0228e53944..e42469cbad5 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,4 +1,4 @@ -We are pleased to announce the release of Pyomo 6.7.2. +We are pleased to announce the release of Pyomo 6.7.3. Pyomo is a collection of Python software packages that supports a diverse set of optimization capabilities for formulating and analyzing diff --git a/pyomo/version/info.py b/pyomo/version/info.py index 36945e8e011..cba680d50ee 100644 --- a/pyomo/version/info.py +++ b/pyomo/version/info.py @@ -27,8 +27,8 @@ major = 6 minor = 7 micro = 3 -releaselevel = 'invalid' -# releaselevel = 'final' +# releaselevel = 'invalid' +releaselevel = 'final' serial = 0 if releaselevel == 'final': diff --git a/setup.py b/setup.py index 6d28e4d184b..9dfa253815e 100644 --- a/setup.py +++ b/setup.py @@ -256,7 +256,7 @@ def __ne__(self, other): 'sphinx-toolbox>=2.16.0', 'sphinx-jinja2-compat>=0.1.1', 'enum_tools', - 'numpy', # Needed by autodoc for pynumero + 'numpy<2.0.0', # Needed by autodoc for pynumero 'scipy', # Needed by autodoc for pynumero ], 'optional': [ @@ -273,7 +273,7 @@ def __ne__(self, other): # installed on python 3.8 'networkx<3.2; python_version<"3.9"', 'networkx; python_version>="3.9"', - 'numpy', + 'numpy<2.0.0', 'openpyxl', # dataportals #'pathos', # requested for #963, but PR currently closed 'pint', # units From 5c3af84e27c19b10987ab451ab892496f5bb9630 Mon Sep 17 00:00:00 2001 From: Bethany Nicholson Date: Wed, 29 May 2024 16:03:44 -0600 Subject: [PATCH 087/113] Resetting main for development (6.7.4.dev0) --- pyomo/version/info.py | 6 +++--- setup.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyomo/version/info.py b/pyomo/version/info.py index cba680d50ee..825483a70a0 100644 --- a/pyomo/version/info.py +++ b/pyomo/version/info.py @@ -26,9 +26,9 @@ # main and needs a hard reference to "suitably new" development. major = 6 minor = 7 -micro = 3 -# releaselevel = 'invalid' -releaselevel = 'final' +micro = 4 +releaselevel = 'invalid' +# releaselevel = 'final' serial = 0 if releaselevel == 'final': diff --git a/setup.py b/setup.py index 9dfa253815e..6d28e4d184b 100644 --- a/setup.py +++ b/setup.py @@ -256,7 +256,7 @@ def __ne__(self, other): 'sphinx-toolbox>=2.16.0', 'sphinx-jinja2-compat>=0.1.1', 'enum_tools', - 'numpy<2.0.0', # Needed by autodoc for pynumero + 'numpy', # Needed by autodoc for pynumero 'scipy', # Needed by autodoc for pynumero ], 'optional': [ @@ -273,7 +273,7 @@ def __ne__(self, other): # installed on python 3.8 'networkx<3.2; python_version<"3.9"', 'networkx; python_version>="3.9"', - 'numpy<2.0.0', + 'numpy', 'openpyxl', # dataportals #'pathos', # requested for #963, but PR currently closed 'pint', # units From 6834c906bc299575f7588bb3b07558b63c5d7050 Mon Sep 17 00:00:00 2001 From: Miranda Rose Mundt Date: Wed, 5 Jun 2024 10:32:35 -0600 Subject: [PATCH 088/113] Add in cfgfile information; turn off SAS Viya tests for now --- pyomo/solvers/tests/checks/test_SAS.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 75534e0e001..843ca3f2d2b 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -29,6 +29,8 @@ from pyomo.opt import SolverFactory, check_available_solvers import warnings +CFGFILE = os.environ.get("SAS_CFG_FILE_PATH", None) + CAS_OPTIONS = { "hostname": os.environ.get("CASHOST", None), "port": os.environ.get("CASPORT", None), @@ -42,11 +44,12 @@ class SASTestAbc: solver_io = "_sas94" session_options = {} + cfgfile = CFGFILE @classmethod def setUpClass(cls): cls.opt_sas = SolverFactory( - "sas", solver_io=cls.solver_io, **cls.session_options + "sas", solver_io=cls.solver_io, cfgfile=cls.cfgfile, **cls.session_options ) @classmethod @@ -322,7 +325,8 @@ def test_solver_error(self, submit_mock, symget_mock): self.assertEqual(results.solver.status, SolverStatus.error) -@unittest.skipIf(not sas_available, "The SAS solver is not available") +#@unittest.skipIf(not sas_available, "The SAS solver is not available") +@unittest.skip("Tests not yet configured for SAS Viya interface.") class SASTestLPCAS(SASTestLP, unittest.TestCase): solver_io = "_sascas" session_options = CAS_OPTIONS @@ -528,6 +532,7 @@ class SASTestMILP94(SASTestMILP, unittest.TestCase): @unittest.skipIf(not sas_available, "The SAS solver is not available") +@unittest.skip("Tests not yet configured for SAS Viya interface.") class SASTestMILPCAS(SASTestMILP, unittest.TestCase): solver_io = "_sascas" session_options = CAS_OPTIONS From 6d5c931bf00b9ac9b0144314584eff2b52d8b92c Mon Sep 17 00:00:00 2001 From: Miranda Rose Mundt Date: Wed, 5 Jun 2024 10:38:20 -0600 Subject: [PATCH 089/113] Apply black to test_SAS --- pyomo/solvers/tests/checks/test_SAS.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 843ca3f2d2b..00b4d7a9b3e 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -325,7 +325,7 @@ def test_solver_error(self, submit_mock, symget_mock): self.assertEqual(results.solver.status, SolverStatus.error) -#@unittest.skipIf(not sas_available, "The SAS solver is not available") +# @unittest.skipIf(not sas_available, "The SAS solver is not available") @unittest.skip("Tests not yet configured for SAS Viya interface.") class SASTestLPCAS(SASTestLP, unittest.TestCase): solver_io = "_sascas" @@ -531,7 +531,7 @@ class SASTestMILP94(SASTestMILP, unittest.TestCase): pass -@unittest.skipIf(not sas_available, "The SAS solver is not available") +# @unittest.skipIf(not sas_available, "The SAS solver is not available") @unittest.skip("Tests not yet configured for SAS Viya interface.") class SASTestMILPCAS(SASTestMILP, unittest.TestCase): solver_io = "_sascas" From 60dcc7a3f9875ab7e32a2d9540edfcfa243f054d Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Wed, 5 Jun 2024 13:14:16 -0600 Subject: [PATCH 090/113] Import specific library from uuid --- pyomo/solvers/plugins/solvers/SAS.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index bccb7d34077..eb5014fbd1a 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -12,7 +12,7 @@ import logging import sys from os import stat -import uuid +from uuid import uuid4 from abc import ABC, abstractmethod from io import StringIO @@ -334,7 +334,7 @@ def _apply_solver(self): rootnode_str = self._create_statement_str("rootnode") # Get a unique identifier, always use the same with different prefixes - unique = uuid.uuid4().hex[:16] + unique = uuid4().hex[:16] # Create unique filename for output datasets primalout_dataset_name = "pout" + unique @@ -703,7 +703,7 @@ def _apply_solver(self): action = "solveMilp" if self._has_integer_variables() else "solveLp" # Get a unique identifier, always use the same with different prefixes - unique = uuid.uuid4().hex[:16] + unique = uuid4().hex[:16] # Creat the output stream, we want to print to a log string as well as to the console self._log = StringIO() From d84a8a019c715082fbbe4167b66adb5f87ce113d Mon Sep 17 00:00:00 2001 From: Miranda Mundt <55767766+mrmundt@users.noreply.github.com> Date: Mon, 10 Jun 2024 12:21:52 -0600 Subject: [PATCH 091/113] Update test_SAS.py --- pyomo/solvers/tests/checks/test_SAS.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py index 00b4d7a9b3e..6dd662bdb21 100644 --- a/pyomo/solvers/tests/checks/test_SAS.py +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -526,7 +526,8 @@ def test_solver_warmstart_capable(self): self.assertTrue(self.opt_sas.warm_start_capable()) -@unittest.skipIf(not sas_available, "The SAS solver is not available") +# @unittest.skipIf(not sas_available, "The SAS solver is not available") +@unittest.skip("MILP94 tests disabled.") class SASTestMILP94(SASTestMILP, unittest.TestCase): pass From 1e7fabdd345c1f283b53abd3c04c0b49e5f10ce4 Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Mon, 17 Jun 2024 10:32:32 -0600 Subject: [PATCH 092/113] Update Pyomo for NumPy 2 --- pyomo/common/dependencies.py | 10 ++++++++-- pyomo/core/kernel/register_numpy_types.py | 10 ++++++---- .../tests/unit/test_kernel_register_numpy_types.py | 6 ++++-- pyomo/core/tests/unit/test_numvalue.py | 8 ++++---- pyomo/core/tests/unit/test_sets.py | 12 ++++++------ 5 files changed, 28 insertions(+), 18 deletions(-) diff --git a/pyomo/common/dependencies.py b/pyomo/common/dependencies.py index 4c9e43002ef..cf05b68612d 100644 --- a/pyomo/common/dependencies.py +++ b/pyomo/common/dependencies.py @@ -999,10 +999,13 @@ def _finalize_numpy(np, available): # registration here (to bypass the deprecation warning) until we # finally remove all support for it numeric_types._native_boolean_types.add(t) - _floats = [np.float_, np.float16, np.float32, np.float64] + _floats = [np.float16, np.float32, np.float64] # float96 and float128 may or may not be defined in this particular # numpy build (it depends on platform and version). # Register them only if they are present + if hasattr(np, 'float_'): + # Prepend to preserve previous functionality + _floats = [np.float_] + _floats if hasattr(np, 'float96'): _floats.append(np.float96) if hasattr(np, 'float128'): @@ -1013,10 +1016,13 @@ def _finalize_numpy(np, available): # registration here (to bypass the deprecation warning) until we # finally remove all support for it numeric_types._native_boolean_types.add(t) - _complex = [np.complex_, np.complex64, np.complex128] + _complex = [np.complex64, np.complex128] # complex192 and complex256 may or may not be defined in this # particular numpy build (it depends on platform and version). # Register them only if they are present + if hasattr(np, 'np.complex_'): + # Prepend to preserve functionality + _complex = [np.complex_] + _complex if hasattr(np, 'complex192'): _complex.append(np.complex192) if hasattr(np, 'complex256'): diff --git a/pyomo/core/kernel/register_numpy_types.py b/pyomo/core/kernel/register_numpy_types.py index 86877be2230..b3405645d97 100644 --- a/pyomo/core/kernel/register_numpy_types.py +++ b/pyomo/core/kernel/register_numpy_types.py @@ -45,10 +45,12 @@ # Historically, the lists included several numpy aliases numpy_int_names.extend(('int_', 'intc', 'intp')) numpy_int.extend((numpy.int_, numpy.intc, numpy.intp)) - numpy_float_names.append('float_') - numpy_float.append(numpy.float_) - numpy_complex_names.append('complex_') - numpy_complex.append(numpy.complex_) + if hasattr(numpy, 'float_'): + numpy_float_names.append('float_') + numpy_float.append(numpy.float_) + if hasattr(numpy, 'complex_'): + numpy_complex_names.append('complex_') + numpy_complex.append(numpy.complex_) # Re-build the old numpy_* lists for t in native_boolean_types: diff --git a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py index 91a0f571881..d49b3cd6b4a 100644 --- a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py +++ b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py @@ -34,7 +34,8 @@ # Reals numpy_float_names = [] if numpy_available: - numpy_float_names.append('float_') + if hasattr(numpy, 'float_'): + numpy_float_names.append('float_') numpy_float_names.append('float16') numpy_float_names.append('float32') numpy_float_names.append('float64') @@ -46,7 +47,8 @@ # Complex numpy_complex_names = [] if numpy_available: - numpy_complex_names.append('complex_') + if hasattr(numpy, 'complex_'): + numpy_complex_names.append('complex_') numpy_complex_names.append('complex64') numpy_complex_names.append('complex128') if hasattr(numpy, 'complex192'): diff --git a/pyomo/core/tests/unit/test_numvalue.py b/pyomo/core/tests/unit/test_numvalue.py index 1cccd3863ea..4d39a42ed70 100644 --- a/pyomo/core/tests/unit/test_numvalue.py +++ b/pyomo/core/tests/unit/test_numvalue.py @@ -552,10 +552,10 @@ def test_unknownNumericType(self): @unittest.skipUnless(numpy_available, "This test requires NumPy") def test_numpy_basic_float_registration(self): - self.assertIn(numpy.float_, native_numeric_types) - self.assertNotIn(numpy.float_, native_integer_types) - self.assertIn(numpy.float_, _native_boolean_types) - self.assertIn(numpy.float_, native_types) + self.assertIn(numpy.float64, native_numeric_types) + self.assertNotIn(numpy.float64, native_integer_types) + self.assertIn(numpy.float64, _native_boolean_types) + self.assertIn(numpy.float64, native_types) @unittest.skipUnless(numpy_available, "This test requires NumPy") def test_numpy_basic_int_registration(self): diff --git a/pyomo/core/tests/unit/test_sets.py b/pyomo/core/tests/unit/test_sets.py index 48869397aae..4d305ebab86 100644 --- a/pyomo/core/tests/unit/test_sets.py +++ b/pyomo/core/tests/unit/test_sets.py @@ -1051,7 +1051,7 @@ def setUp(self): self.instance = self.model.create_instance(currdir + "setA.dat") self.e1 = numpy.bool_(1) self.e2 = numpy.int_(2) - self.e3 = numpy.float_(3.0) + self.e3 = numpy.float64(3.0) self.e4 = numpy.int_(4) self.e5 = numpy.int_(5) self.e6 = numpy.int_(6) @@ -1068,7 +1068,7 @@ def test_numpy_int(self): def test_numpy_float(self): model = ConcreteModel() - model.A = Set(initialize=[numpy.float_(1.0), numpy.float_(0.0)]) + model.A = Set(initialize=[numpy.float64(1.0), numpy.float64(0.0)]) self.assertEqual(model.A.bounds(), (0, 1)) @@ -3213,7 +3213,7 @@ def test_numpy_membership(self): self.assertEqual(numpy.int_(1) in Boolean, True) self.assertEqual(numpy.bool_(True) in Boolean, True) self.assertEqual(numpy.bool_(False) in Boolean, True) - self.assertEqual(numpy.float_(1.1) in Boolean, False) + self.assertEqual(numpy.float64(1.1) in Boolean, False) self.assertEqual(numpy.int_(2) in Boolean, False) self.assertEqual(numpy.int_(0) in Integers, True) @@ -3222,7 +3222,7 @@ def test_numpy_membership(self): # identically to 1 self.assertEqual(numpy.bool_(True) in Integers, True) self.assertEqual(numpy.bool_(False) in Integers, True) - self.assertEqual(numpy.float_(1.1) in Integers, False) + self.assertEqual(numpy.float64(1.1) in Integers, False) self.assertEqual(numpy.int_(2) in Integers, True) self.assertEqual(numpy.int_(0) in Reals, True) @@ -3231,14 +3231,14 @@ def test_numpy_membership(self): # identically to 1 self.assertEqual(numpy.bool_(True) in Reals, True) self.assertEqual(numpy.bool_(False) in Reals, True) - self.assertEqual(numpy.float_(1.1) in Reals, True) + self.assertEqual(numpy.float64(1.1) in Reals, True) self.assertEqual(numpy.int_(2) in Reals, True) self.assertEqual(numpy.int_(0) in Any, True) self.assertEqual(numpy.int_(1) in Any, True) self.assertEqual(numpy.bool_(True) in Any, True) self.assertEqual(numpy.bool_(False) in Any, True) - self.assertEqual(numpy.float_(1.1) in Any, True) + self.assertEqual(numpy.float64(1.1) in Any, True) self.assertEqual(numpy.int_(2) in Any, True) def test_setargs1(self): From cb6723041d999380ef4452158b9dce99629020c0 Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Mon, 17 Jun 2024 12:50:10 -0600 Subject: [PATCH 093/113] Change to insert --- pyomo/common/dependencies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyomo/common/dependencies.py b/pyomo/common/dependencies.py index cf05b68612d..bbcea0b85d7 100644 --- a/pyomo/common/dependencies.py +++ b/pyomo/common/dependencies.py @@ -1005,7 +1005,7 @@ def _finalize_numpy(np, available): # Register them only if they are present if hasattr(np, 'float_'): # Prepend to preserve previous functionality - _floats = [np.float_] + _floats + _floats.insert(0, np.float_) if hasattr(np, 'float96'): _floats.append(np.float96) if hasattr(np, 'float128'): @@ -1022,7 +1022,7 @@ def _finalize_numpy(np, available): # Register them only if they are present if hasattr(np, 'np.complex_'): # Prepend to preserve functionality - _complex = [np.complex_] + _complex + _complex.insert(0, np.complex_) if hasattr(np, 'complex192'): _complex.append(np.complex192) if hasattr(np, 'complex256'): From 6f86e3231f0c82e4a42693030fb64ba1f58b51dc Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 24 Jun 2024 10:15:53 -0600 Subject: [PATCH 094/113] Switch writers to use str() (instead of repr()) for numeric constants In Python 3.x, str() and repr() are the same for float, and beginning in numpy 2.0 `repr(float64(n))` produces `numpy.float64(n)`, whereas str() produces `n` --- pyomo/repn/plugins/lp_writer.py | 24 ++++++++++++------------ pyomo/repn/plugins/nl_writer.py | 28 ++++++++++++++-------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pyomo/repn/plugins/lp_writer.py b/pyomo/repn/plugins/lp_writer.py index 627a54e3f68..814f79a4eb9 100644 --- a/pyomo/repn/plugins/lp_writer.py +++ b/pyomo/repn/plugins/lp_writer.py @@ -458,13 +458,13 @@ def write(self, model): addSymbol(con, label) ostream.write(f'\n{label}:\n') self.write_expression(ostream, repn, False) - ostream.write(f'>= {(lb - offset)!r}\n') + ostream.write(f'>= {(lb - offset)!s}\n') elif lb == ub: label = f'c_e_{symbol}_' addSymbol(con, label) ostream.write(f'\n{label}:\n') self.write_expression(ostream, repn, False) - ostream.write(f'= {(lb - offset)!r}\n') + ostream.write(f'= {(lb - offset)!s}\n') else: # We will need the constraint body twice. Generate # in a buffer so we only have to do that once. @@ -476,18 +476,18 @@ def write(self, model): addSymbol(con, label) ostream.write(f'\n{label}:\n') ostream.write(buf) - ostream.write(f'>= {(lb - offset)!r}\n') + ostream.write(f'>= {(lb - offset)!s}\n') label = f'r_u_{symbol}_' aliasSymbol(con, label) ostream.write(f'\n{label}:\n') ostream.write(buf) - ostream.write(f'<= {(ub - offset)!r}\n') + ostream.write(f'<= {(ub - offset)!s}\n') elif ub is not None: label = f'c_u_{symbol}_' addSymbol(con, label) ostream.write(f'\n{label}:\n') self.write_expression(ostream, repn, False) - ostream.write(f'<= {(ub - offset)!r}\n') + ostream.write(f'<= {(ub - offset)!s}\n') if with_debug_timing: # report the last constraint @@ -527,8 +527,8 @@ def write(self, model): # Note: Var.bounds guarantees the values are either (finite) # native_numeric_types or None lb, ub = v.bounds - lb = '-inf' if lb is None else repr(lb) - ub = '+inf' if ub is None else repr(ub) + lb = '-inf' if lb is None else str(lb) + ub = '+inf' if ub is None else str(ub) ostream.write(f"\n {lb} <= {v_symbol} <= {ub}") if integer_vars: @@ -565,7 +565,7 @@ def write(self, model): for v, w in getattr(soscon, 'get_items', soscon.items)(): if w.__class__ not in int_float: w = float(f) - ostream.write(f" {getSymbol(v)}:{w!r}\n") + ostream.write(f" {getSymbol(v)}:{w!s}\n") ostream.write("\nend\n") @@ -584,9 +584,9 @@ def write_expression(self, ostream, expr, is_objective): expr.linear.items(), key=lambda x: getVarOrder(x[0]) ): if coef < 0: - ostream.write(f'{coef!r} {getSymbol(getVar(vid))}\n') + ostream.write(f'{coef!s} {getSymbol(getVar(vid))}\n') else: - ostream.write(f'+{coef!r} {getSymbol(getVar(vid))}\n') + ostream.write(f'+{coef!s} {getSymbol(getVar(vid))}\n') quadratic = getattr(expr, 'quadratic', None) if quadratic: @@ -605,9 +605,9 @@ def _normalize_constraint(data): col = c1, c2 sym = f' {getSymbol(getVar(vid1))} * {getSymbol(getVar(vid2))}\n' if coef < 0: - return col, repr(coef) + sym + return col, str(coef) + sym else: - return col, '+' + repr(coef) + sym + return col, f'+{coef!s}{sym}' if is_objective: # diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 43fd2fade68..a8966e44f71 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1150,17 +1150,17 @@ def write(self, model): r_lines[idx] = "3" else: # _type = 4 # L == c == U - r_lines[idx] = f"4 {lb - expr_info.const!r}" + r_lines[idx] = f"4 {lb - expr_info.const!s}" n_equality += 1 elif lb is None: # _type = 1 # c <= U - r_lines[idx] = f"1 {ub - expr_info.const!r}" + r_lines[idx] = f"1 {ub - expr_info.const!s}" elif ub is None: # _type = 2 # L <= c - r_lines[idx] = f"2 {lb - expr_info.const!r}" + r_lines[idx] = f"2 {lb - expr_info.const!s}" else: # _type = 0 # L <= c <= U - r_lines[idx] = f"0 {lb - expr_info.const!r} {ub - expr_info.const!r}" + r_lines[idx] = f"0 {lb - expr_info.const!s} {ub - expr_info.const!s}" n_ranges += 1 expr_info.const = 0 # FIXME: this is a HACK to be compatible with the NLv1 @@ -1375,7 +1375,7 @@ def write(self, model): ostream.write(f"S{_field|_float} {len(_vals)} {name}\n") # Note: _SuffixData.compile() guarantees the value is int/float ostream.write( - ''.join(f"{_id} {_vals[_id]!r}\n" for _id in sorted(_vals)) + ''.join(f"{_id} {_vals[_id]!s}\n" for _id in sorted(_vals)) ) # @@ -1485,7 +1485,7 @@ def write(self, model): ostream.write(f"d{len(data.con)}\n") # Note: _SuffixData.compile() guarantees the value is int/float ostream.write( - ''.join(f"{_id} {data.con[_id]!r}\n" for _id in sorted(data.con)) + ''.join(f"{_id} {data.con[_id]!s}\n" for _id in sorted(data.con)) ) # @@ -1507,7 +1507,7 @@ def write(self, model): ) ostream.write( ''.join( - f'{var_idx} {val!r}{col_comments[var_idx]}\n' + f'{var_idx} {val!s}{col_comments[var_idx]}\n' for var_idx, val in _init_lines ) ) @@ -1548,13 +1548,13 @@ def write(self, model): if lb is None: # unbounded ostream.write(f"3{col_comments[var_idx]}\n") else: # == - ostream.write(f"4 {lb!r}{col_comments[var_idx]}\n") + ostream.write(f"4 {lb!s}{col_comments[var_idx]}\n") elif lb is None: # var <= ub - ostream.write(f"1 {ub!r}{col_comments[var_idx]}\n") + ostream.write(f"1 {ub!s}{col_comments[var_idx]}\n") elif ub is None: # lb <= body - ostream.write(f"2 {lb!r}{col_comments[var_idx]}\n") + ostream.write(f"2 {lb!s}{col_comments[var_idx]}\n") else: # lb <= body <= ub - ostream.write(f"0 {lb!r} {ub!r}{col_comments[var_idx]}\n") + ostream.write(f"0 {lb!s} {ub!s}{col_comments[var_idx]}\n") # # "k" lines (column offsets in Jacobian NNZ) @@ -1589,7 +1589,7 @@ def write(self, model): linear[_id] /= scaling_cache[_id] ostream.write(f'J{row_idx} {len(linear)}{row_comments[row_idx]}\n') for _id in sorted(linear, key=column_order.__getitem__): - ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') + ostream.write(f'{column_order[_id]} {linear[_id]!s}\n') # # "G" lines (non-empty terms in the Objective) @@ -1605,7 +1605,7 @@ def write(self, model): linear[_id] /= scaling_cache[_id] ostream.write(f'G{obj_idx} {len(linear)}{row_comments[obj_idx + n_cons]}\n') for _id in sorted(linear, key=column_order.__getitem__): - ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') + ostream.write(f'{column_order[_id]} {linear[_id]!s}\n') # Generate the return information eliminated_vars = [ @@ -2041,7 +2041,7 @@ def _write_v_line(self, expr_id, k): # ostream.write(f'V{self.next_V_line_id} {len(linear)} {k}{lbl}\n') for _id in sorted(linear, key=column_order.__getitem__): - ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') + ostream.write(f'{column_order[_id]} {linear[_id]!s}\n') self._write_nl_expression(info[1], True) self.next_V_line_id += 1 From 2145a1684c726d8170460ef148be673459c3eac5 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 24 Jun 2024 11:06:37 -0600 Subject: [PATCH 095/113] Improve tokenization in baseline comparisons --- pyomo/common/unittest.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pyomo/common/unittest.py b/pyomo/common/unittest.py index 84d962eb784..a947420a25d 100644 --- a/pyomo/common/unittest.py +++ b/pyomo/common/unittest.py @@ -807,6 +807,15 @@ def filter_file_contents(self, lines, abstol=None): item_list = [] items = line.strip().split() for i in items: + # Split up lists, dicts, and sets + while i and i[0] in '[{': + item_list.append(i[0]) + i = i[1:] + tail = [] + while i and i[-1] in ',:]}': + tail.append(i[-1]) + i = i[:-1] + # A few substitutions to get tests passing on pypy3 if ".inf" in i: i = i.replace(".inf", "inf") @@ -817,6 +826,9 @@ def filter_file_contents(self, lines, abstol=None): item_list.append(float(i)) except: item_list.append(i) + if tail: + tail.reverse() + item_list.extend(tail) # We can get printed results objects where the baseline is # exactly 0 (and omitted) and the test is slightly non-zero. From e5973d4014a63fe7093b3ea70292b14650f9f06f Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 24 Jun 2024 11:07:06 -0600 Subject: [PATCH 096/113] Map new numpy type representations back to pre-numpy2.0 behavior --- pyomo/common/unittest.py | 10 +++++++++- .../tests/unit/test_kernel_register_numpy_types.py | 5 ++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/pyomo/common/unittest.py b/pyomo/common/unittest.py index a947420a25d..5b6d1f003d8 100644 --- a/pyomo/common/unittest.py +++ b/pyomo/common/unittest.py @@ -783,6 +783,7 @@ def filter_fcn(self, line): return False def filter_file_contents(self, lines, abstol=None): + _numpy_scalar_re = re.compile(r'np.(int|float)\d+\(([^\)]+)\)') filtered = [] deprecated = None for line in lines: @@ -823,7 +824,14 @@ def filter_file_contents(self, lines, abstol=None): i = i.replace("null", "None") try: - item_list.append(float(i)) + # Numpy 2.x changed the repr for scalars. Convert + # the new scalar reprs back to the original (which + # were indistinguishable from python floats/ints) + np_match = _numpy_scalar_re.match(i) + if np_match: + item_list.append(float(np_match.group(2))) + else: + item_list.append(float(i)) except: item_list.append(i) if tail: diff --git a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py index d49b3cd6b4a..8186c3d6028 100644 --- a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py +++ b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py @@ -16,7 +16,10 @@ # Boolean numpy_bool_names = [] if numpy_available: - numpy_bool_names.append('bool_') + if numpy.__version__[0] == '2': + numpy_bool_names.append('bool') + else: + numpy_bool_names.append('bool_') # Integers numpy_int_names = [] if numpy_available: From c88e0d5ad9130e24d8019052001196afe0da2222 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Mon, 24 Jun 2024 13:03:57 -0600 Subject: [PATCH 097/113] Update near-0 filter based on new tokenizer --- pyomo/common/unittest.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pyomo/common/unittest.py b/pyomo/common/unittest.py index 5b6d1f003d8..c78e003a07d 100644 --- a/pyomo/common/unittest.py +++ b/pyomo/common/unittest.py @@ -844,12 +844,13 @@ def filter_file_contents(self, lines, abstol=None): # results objects and remote them if they are within # tolerance of 0 if ( - len(item_list) == 2 - and item_list[0] == 'Value:' - and type(item_list[1]) is float - and abs(item_list[1]) < (abstol or 0) - and len(filtered[-1]) == 1 - and filtered[-1][0][-1] == ':' + len(item_list) == 3 + and item_list[0] == 'Value' + and item_list[1] == ':' + and type(item_list[2]) is float + and abs(item_list[2]) < (abstol or 0) + and len(filtered[-1]) == 2 + and filtered[-1][1] == ':' ): filtered.pop() else: From 3ea998aa71f297016fdee8541f232ed8746aaeff Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Mon, 24 Jun 2024 14:17:54 -0600 Subject: [PATCH 098/113] Sneak in fix for codecov tokenless upload --- .github/workflows/test_branches.yml | 6 ++++++ .github/workflows/test_pr_and_main.yml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/.github/workflows/test_branches.yml b/.github/workflows/test_branches.yml index 8ba04eec466..2689b97746b 100644 --- a/.github/workflows/test_branches.yml +++ b/.github/workflows/test_branches.yml @@ -855,6 +855,9 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }} flags: ${{ matrix.TARGET }} + # downgrading after v0.7.0 broke tokenless upload + # see codecov/codecov-action#1487 + version: v0.6.0 fail_ci_if_error: true - name: Upload other coverage reports @@ -867,4 +870,7 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }}/other flags: ${{ matrix.TARGET }},other + # downgrading after v0.7.0 broke tokenless upload + # see codecov/codecov-action#1487 + version: v0.6.0 fail_ci_if_error: true diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index bdf1f7e1aa5..c804372b18a 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -899,6 +899,9 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }} flags: ${{ matrix.TARGET }} + # downgrading after v0.7.0 broke tokenless upload + # see codecov/codecov-action#1487 + version: v0.6.0 fail_ci_if_error: true - name: Upload other coverage reports @@ -911,4 +914,7 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }}/other flags: ${{ matrix.TARGET }},other + # downgrading after v0.7.0 broke tokenless upload + # see codecov/codecov-action#1487 + version: v0.6.0 fail_ci_if_error: true From 1c0a0307e00848f03f4cf5c46efd8c4a114b1adf Mon Sep 17 00:00:00 2001 From: "Philipp Christophel (phchri)" Date: Wed, 26 Jun 2024 08:07:30 -0400 Subject: [PATCH 099/113] Fix warning and improve session handling --- pyomo/solvers/plugins/solvers/SAS.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index eb5014fbd1a..fbbd6e5c3eb 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -28,7 +28,7 @@ ) from pyomo.common.tempfiles import TempfileManager from pyomo.core.base import Var -from pyomo.core.base.block import _BlockData +from pyomo.core.base.block import BlockData from pyomo.core.kernel.block import IBlock from pyomo.common.log import LogStream from pyomo.common.tee import capture_output, TeeStream @@ -157,7 +157,7 @@ def _presolve(self, *args, **kwds): # Store the model, too bad this is not done in the base class for arg in args: - if isinstance(arg, (_BlockData, IBlock)): + if isinstance(arg, (BlockData, IBlock)): # Store the instance self._instance = arg self._vars = [] @@ -282,16 +282,17 @@ def __init__(self, **kwds): self._python_api_exists = True self._sas.logger.setLevel(logger.level) - # Create the session only as its needed - self._sas_session = None - # Store other options for the SAS session self._session_options = kwds + # Create the session + self._sas_session = self._sas.SASsession(**self._session_options) + def __del__(self): # Close the session, if we created one if self._sas_session: self._sas_session.endsas() + del self._sas_session def _create_statement_str(self, statement): """Helper function to create the strings for the statements of the proc OPTLP/OPTMILP code.""" @@ -369,7 +370,10 @@ def _apply_solver(self): sas_options = "option notes nonumber nodate nosource pagesize=max;" # Get the current SAS session, submit the code and return the results - sas = self._sas_session = self._sas.SASsession(**self._session_options) + if not self._sas_session: + sas = self._sas_session = self._sas.SASsession(**self._session_options) + else: + sas = self._sas_session # Find the version of 9.4 we are using self._sasver = sas.sasver @@ -576,14 +580,16 @@ def __init__(self, **kwds): else: self._python_api_exists = True - # Create the session only as its needed - self._sas_session = None self._session_options = kwds + # Create the session + self._sas_session = self._sas.CAS(**self._session_options) + def __del__(self): # Close the session, if we created one if self._sas_session: self._sas_session.close() + del self._sas_session def _uploadMpsFile(self, s, unique): # Declare a unique table name for the mps table From 5d0e19c85ec64c9fe65246f240d1e84c92a64bb7 Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Wed, 26 Jun 2024 09:11:51 -0600 Subject: [PATCH 100/113] Update codecov yml to new version --- .codecov.yml => codecov.yml | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) rename .codecov.yml => codecov.yml (54%) diff --git a/.codecov.yml b/codecov.yml similarity index 54% rename from .codecov.yml rename to codecov.yml index 6b88f948fe1..1184eaf877d 100644 --- a/.codecov.yml +++ b/codecov.yml @@ -1,19 +1,21 @@ +codecov: + notify: + # GHA: 5, Jenkins: 11 + # Accurate as of Jun 26, 2024 + # Potential to change when Python versions change + after_n_builds: 16 + wait_for_ci: true coverage: - range: "50...100" + range: + - 50.0 + - 100.0 status: + patch: + default: + # Force patches to be covered at the level of the codebase + threshold: 0.0 project: default: # Allow overall coverage to drop to avoid failures due to code # cleanup or CI unavailability/lag - threshold: 5% - patch: - default: - # Force patches to be covered at the level of the codebase - threshold: 0% -# ci: -# - !ci.appveyor.com -codecov: - notify: - # GHA: 4, Jenkins: 8 - after_n_builds: 12 # all - wait_for_ci: yes + threshold: 5.0 From e15ec25c554fae58bf936c65cf01f8bf010b1861 Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Wed, 26 Jun 2024 09:13:34 -0600 Subject: [PATCH 101/113] Unpinning 0.6.0 --- .github/workflows/test_branches.yml | 6 ------ .github/workflows/test_pr_and_main.yml | 6 ------ 2 files changed, 12 deletions(-) diff --git a/.github/workflows/test_branches.yml b/.github/workflows/test_branches.yml index 2689b97746b..8ba04eec466 100644 --- a/.github/workflows/test_branches.yml +++ b/.github/workflows/test_branches.yml @@ -855,9 +855,6 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }} flags: ${{ matrix.TARGET }} - # downgrading after v0.7.0 broke tokenless upload - # see codecov/codecov-action#1487 - version: v0.6.0 fail_ci_if_error: true - name: Upload other coverage reports @@ -870,7 +867,4 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }}/other flags: ${{ matrix.TARGET }},other - # downgrading after v0.7.0 broke tokenless upload - # see codecov/codecov-action#1487 - version: v0.6.0 fail_ci_if_error: true diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index c804372b18a..bdf1f7e1aa5 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -899,9 +899,6 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }} flags: ${{ matrix.TARGET }} - # downgrading after v0.7.0 broke tokenless upload - # see codecov/codecov-action#1487 - version: v0.6.0 fail_ci_if_error: true - name: Upload other coverage reports @@ -914,7 +911,4 @@ jobs: token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }}/other flags: ${{ matrix.TARGET }},other - # downgrading after v0.7.0 broke tokenless upload - # see codecov/codecov-action#1487 - version: v0.6.0 fail_ci_if_error: true From 34b131ebd905c62eb69544ad41af0e53030eacda Mon Sep 17 00:00:00 2001 From: Miranda Mundt <55767766+mrmundt@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:29:01 -0600 Subject: [PATCH 102/113] Changing date to re-trigger all jobs from scratch --- codecov.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecov.yml b/codecov.yml index 1184eaf877d..5c2e8b4177d 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,7 +1,7 @@ codecov: notify: # GHA: 5, Jenkins: 11 - # Accurate as of Jun 26, 2024 + # Accurate as of July 1, 2024 # Potential to change when Python versions change after_n_builds: 16 wait_for_ci: true From 6cce08fba429f16c6294fd8ff4a463a09d802af6 Mon Sep 17 00:00:00 2001 From: Miranda Mundt <55767766+mrmundt@users.noreply.github.com> Date: Tue, 2 Jul 2024 12:44:42 -0600 Subject: [PATCH 103/113] Change `_sas_session` check --- pyomo/solvers/plugins/solvers/SAS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index fbbd6e5c3eb..bc46f1c7465 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -290,7 +290,7 @@ def __init__(self, **kwds): def __del__(self): # Close the session, if we created one - if self._sas_session: + if hasattr(self, '_sas_session'): self._sas_session.endsas() del self._sas_session From ee9d446a882642dd8b7c2e269b9bd6a77a846169 Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Tue, 2 Jul 2024 15:44:57 -0600 Subject: [PATCH 104/113] Add a try-except block around session creation --- pyomo/solvers/plugins/solvers/SAS.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index bc46f1c7465..80ec8d2877d 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -286,11 +286,14 @@ def __init__(self, **kwds): self._session_options = kwds # Create the session - self._sas_session = self._sas.SASsession(**self._session_options) + try: + self._sas_session = self._sas.SASsession(**self._session_options) + except: + self._sas_session = None def __del__(self): # Close the session, if we created one - if hasattr(self, '_sas_session'): + if self._sas_session: self._sas_session.endsas() del self._sas_session @@ -583,7 +586,10 @@ def __init__(self, **kwds): self._session_options = kwds # Create the session - self._sas_session = self._sas.CAS(**self._session_options) + try: + self._sas_session = self._sas.CAS(**self._session_options) + except: + self._sas_session = None def __del__(self): # Close the session, if we created one From 0af995b5eeea07e59f014786d8c60e00ed82da8c Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Wed, 3 Jul 2024 07:45:43 -0600 Subject: [PATCH 105/113] Change uuid import to attempt_import --- pyomo/solvers/plugins/solvers/SAS.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index 80ec8d2877d..865efa36dc3 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -12,13 +12,13 @@ import logging import sys from os import stat -from uuid import uuid4 from abc import ABC, abstractmethod from io import StringIO from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver from pyomo.opt.base.solvers import SolverFactory from pyomo.common.collections import Bunch +from pyomo.common.dependencies import attempt_import from pyomo.opt.results import ( SolverResults, SolverStatus, @@ -34,6 +34,7 @@ from pyomo.common.tee import capture_output, TeeStream +uuid, uuid_available = attempt_import('uuid') logger = logging.getLogger("pyomo.solvers") @@ -338,7 +339,7 @@ def _apply_solver(self): rootnode_str = self._create_statement_str("rootnode") # Get a unique identifier, always use the same with different prefixes - unique = uuid4().hex[:16] + unique = uuid.uuid4().hex[:16] # Create unique filename for output datasets primalout_dataset_name = "pout" + unique From 945082fde32a4e2fedd4f6e5870d85500a7e5ff4 Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Wed, 3 Jul 2024 10:41:53 -0600 Subject: [PATCH 106/113] Enable codecovcli uploader in jenkins script --- .jenkins.sh | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/.jenkins.sh b/.jenkins.sh index 696847fd92c..60ece9ca63d 100644 --- a/.jenkins.sh +++ b/.jenkins.sh @@ -20,8 +20,11 @@ # # CODECOV_TOKEN: the token to use when uploading results to codecov.io # -# CODECOV_ARGS: additional arguments to pass to the codecov uploader -# (e.g., to support SSL certificates) +# CODECOV_SOURCE_BRANCH: passed to the 'codecov-cli' command; branch of Pyomo +# (e.g., to enable correct codecov uploads) +# +# CODECOV_REPO_OWNER: passed to the 'codecov-cli' command; owner of repo +# (e.g., to enable correct codecov uploads) # # DISABLE_COVERAGE: if nonempty, then coverage analysis is disabled # @@ -208,16 +211,23 @@ if test -z "$MODE" -o "$MODE" == test; then coverage xml else CODECOV_JOB_NAME=`echo ${JOB_NAME} | sed -r 's/^(.*autotest_)?Pyomo_([^\/]+).*/\2/'`.$BUILD_NUMBER.$python + if test -z "$CODECOV_REPO_OWNER"; then + CODECOV_REPO_OWNER="pyomo" + fi + if test -z "CODECOV_SOURCE_BRANCH"; then + CODECOV_SOURCE_BRANCH="main" + fi i=0 while /bin/true; do i=$[$i+1] echo "Uploading coverage to codecov (attempt $i)" - codecov -X gcovcodecov -X gcov -X s3 --no-color \ - -t $CODECOV_TOKEN --root `pwd` -e OS,python \ - --name $CODECOV_JOB_NAME $CODECOV_ARGS \ - | tee .cover.upload - if test $? == 0 -a `grep -i error .cover.upload \ - | grep -v branch= | wc -l` -eq 0; then + codecovcli -v upload-process --sha $PYOMO_SOURCE_SHA \ + --fail-on-error --git-service github --token $CODECOV_TOKEN \ + --slug pyomo/pyomo --file coverage.xml --disable-search \ + --name $CODECOV_JOB_NAME \ + --branch $CODECOV_REPO_OWNER:$CODECOV_SOURCE_BRANCH \ + --env OS,python --network-root-folder `pwd` --plugin noop + if test $? == 0; then break elif test $i -ge 4; then exit 1 From 51568d1a2e92685d640f96e66ddedee3c35727b8 Mon Sep 17 00:00:00 2001 From: Bethany Nicholson Date: Wed, 3 Jul 2024 11:59:00 -0600 Subject: [PATCH 107/113] Fix typos from code review --- pyomo/solvers/plugins/solvers/SAS.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py index 865efa36dc3..f2cfe279fdc 100644 --- a/pyomo/solvers/plugins/solvers/SAS.py +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -397,12 +397,12 @@ def _apply_solver(self): ) upload_pin = True - # Using a function call to make it easier to moch the version check + # Using a function call to make it easier to mock the version check major_version = self.sas_version()[0] minor_version = self.sas_version().split("M", 1)[1][0] if major_version == "9" and int(minor_version) < 5: raise NotImplementedError( - "Support for SAS 9.4 M4 and earlier is no implemented." + "Support for SAS 9.4 M4 and earlier is not implemented." ) elif major_version == "9" and int(minor_version) == 5: # In 9.4M5 we have to create an MPS data set from an MPS file first @@ -529,7 +529,7 @@ def _apply_solver(self): if proc == "OPTLP": # Convert primal out data set to variable dictionary - # Use panda functions for efficiency + # Use pandas functions for efficiency primal_out = primal_out[["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"]] primal_out = primal_out.set_index("_VAR_", drop=True) primal_out = primal_out.rename( From 079b7bdc83df3b029b4c1d1eb707f9b569e0642d Mon Sep 17 00:00:00 2001 From: Miranda Mundt Date: Wed, 3 Jul 2024 15:53:38 -0600 Subject: [PATCH 108/113] Changing date to reset test suites --- codecov.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecov.yml b/codecov.yml index 5c2e8b4177d..318a907905f 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,7 +1,7 @@ codecov: notify: # GHA: 5, Jenkins: 11 - # Accurate as of July 1, 2024 + # Accurate as of July 3, 2024 # Potential to change when Python versions change after_n_builds: 16 wait_for_ci: true From 4f9acf0427f9715f21e5e678aa498a64d1e595e9 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Sun, 7 Jul 2024 05:48:24 -0600 Subject: [PATCH 109/113] Explicitly create coverage.xml file --- .jenkins.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.jenkins.sh b/.jenkins.sh index 60ece9ca63d..51dd92ba123 100644 --- a/.jenkins.sh +++ b/.jenkins.sh @@ -205,11 +205,10 @@ if test -z "$MODE" -o "$MODE" == test; then # Note, that the PWD should still be $WORKSPACE/pyomo # coverage combine || exit 1 - coverage report -i + coverage report -i || exit 1 + coverage xml -i || exit 1 export OS=`uname` - if test -z "$CODECOV_TOKEN"; then - coverage xml - else + if test -n "$CODECOV_TOKEN"; then CODECOV_JOB_NAME=`echo ${JOB_NAME} | sed -r 's/^(.*autotest_)?Pyomo_([^\/]+).*/\2/'`.$BUILD_NUMBER.$python if test -z "$CODECOV_REPO_OWNER"; then CODECOV_REPO_OWNER="pyomo" From 2167033cbea0143aa3c31fce30a6fd731bc9de73 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Sun, 7 Jul 2024 05:57:22 -0600 Subject: [PATCH 110/113] Fix spelling caught by new typos release --- .github/workflows/typos.toml | 2 ++ pyomo/common/config.py | 2 +- pyomo/common/tests/test_dependencies.py | 2 +- pyomo/core/base/set.py | 6 +++--- pyomo/core/plugins/transform/radix_linearization.py | 2 +- 5 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml index 7a38164898b..e24957290a5 100644 --- a/.github/workflows/typos.toml +++ b/.github/workflows/typos.toml @@ -67,4 +67,6 @@ RO = "RO" EOF = "EOF" # Ignore lst as shorthand for list lst = "lst" +# Abbreviation of gamma (used in stochpdegas1_automatic.py) +gam = "gam" # AS NEEDED: Add More Words Below diff --git a/pyomo/common/config.py b/pyomo/common/config.py index f9c3a725bb8..ebba2f2732a 100644 --- a/pyomo/common/config.py +++ b/pyomo/common/config.py @@ -996,7 +996,7 @@ class will still create ``c`` instances that only have the single :py:meth:`generate_documentation()`. The simplest is :py:meth:`display()`, which prints out the current values of the configuration object (and if it is a container type, all of it's -children). :py:meth:`generate_yaml_template` is simular to +children). :py:meth:`generate_yaml_template` is similar to :py:meth:`display`, but also includes the description fields as formatted comments. diff --git a/pyomo/common/tests/test_dependencies.py b/pyomo/common/tests/test_dependencies.py index 31f9520b613..6aedc428244 100644 --- a/pyomo/common/tests/test_dependencies.py +++ b/pyomo/common/tests/test_dependencies.py @@ -209,7 +209,7 @@ def test_and_or(self): _and_or = avail0 & avail1 | avail2 self.assertTrue(_and_or) - # Verify operator prescedence + # Verify operator precedence _or_and = avail0 | avail2 & avail2 self.assertTrue(_or_and) _or_and = (avail0 | avail2) & avail2 diff --git a/pyomo/core/base/set.py b/pyomo/core/base/set.py index 8b7c2a246d6..c0d427747c5 100644 --- a/pyomo/core/base/set.py +++ b/pyomo/core/base/set.py @@ -1986,7 +1986,7 @@ class InsertionOrder(object): class SortedOrder(object): pass - _ValidOrderedAuguments = {True, False, InsertionOrder, SortedOrder} + _ValidOrderedArguments = {True, False, InsertionOrder, SortedOrder} _UnorderedInitializers = {set} @overload @@ -2015,7 +2015,7 @@ def __new__(cls, *args, **kwds): ordered = kwds.get('ordered', Set.InsertionOrder) if ordered is True: ordered = Set.InsertionOrder - if ordered not in Set._ValidOrderedAuguments: + if ordered not in Set._ValidOrderedArguments: if inspect.isfunction(ordered): ordered = Set.SortedOrder else: @@ -2032,7 +2032,7 @@ def __new__(cls, *args, **kwds): str(_) for _ in sorted_robust( 'Set.' + x.__name__ if isinstance(x, type) else x - for x in Set._ValidOrderedAuguments.union( + for x in Set._ValidOrderedArguments.union( {''} ) ) diff --git a/pyomo/core/plugins/transform/radix_linearization.py b/pyomo/core/plugins/transform/radix_linearization.py index 92270655f31..3cfde28db3c 100644 --- a/pyomo/core/plugins/transform/radix_linearization.py +++ b/pyomo/core/plugins/transform/radix_linearization.py @@ -280,7 +280,7 @@ def _collect_bilinear(self, expr, bilin, quad): if type(expr) is PowExpression and value(expr._args[1]) == 2: # Note: directly testing the value of the exponent above is # safe: we have already verified that this expression is - # polynominal, so the exponent must be constant. + # polynomial, so the exponent must be constant. tmp = ProductExpression() tmp._numerator = [expr._args[0], expr._args[0]] tmp._denominator = [] From fb60c7f0f627bc2c63574d8ea8d6419b127b7605 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Sun, 7 Jul 2024 06:10:11 -0600 Subject: [PATCH 111/113] (temporarily) disable checking idaes.org URL --- .github/workflows/test_pr_and_main.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index bdf1f7e1aa5..c3634682462 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -68,9 +68,11 @@ jobs: verbose: true # How many times to retry a failed request (defaults to 1) retry_count: 3 - # Exclude Jenkins because it's behind a firewall; ignore RTD because - # a magically-generated string is triggering a failure - exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html + # Exclude: + # - Jenkins because it's behind a firewall + # - RTD because a magically-generated string triggers failures + # - IDAES.org because the webserver is currently down + exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html,https://idaes.org/ build: From 995209725dc31b462de174421f9c334f6cbe0157 Mon Sep 17 00:00:00 2001 From: John Siirola Date: Sun, 7 Jul 2024 06:13:16 -0600 Subject: [PATCH 112/113] (temporarily) disable checking idaes.org URL --- .github/workflows/test_pr_and_main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index c3634682462..0a2a4f51143 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -72,7 +72,7 @@ jobs: # - Jenkins because it's behind a firewall # - RTD because a magically-generated string triggers failures # - IDAES.org because the webserver is currently down - exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html,https://idaes.org/ + exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html,https://idaes.org build: From 9a2704b9df28af91b0d58ecab31125317920b8ba Mon Sep 17 00:00:00 2001 From: John Siirola Date: Sun, 7 Jul 2024 07:45:26 -0600 Subject: [PATCH 113/113] Remove idaes.org skip (site is back up) --- .github/workflows/test_pr_and_main.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index 0a2a4f51143..0bfd12b998d 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -71,8 +71,7 @@ jobs: # Exclude: # - Jenkins because it's behind a firewall # - RTD because a magically-generated string triggers failures - # - IDAES.org because the webserver is currently down - exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html,https://idaes.org + exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html build: