diff --git a/.github/workflows/test_branches.yml b/.github/workflows/test_branches.yml index 8ba04eec466..03894a1cb20 100644 --- a/.github/workflows/test_branches.yml +++ b/.github/workflows/test_branches.yml @@ -519,7 +519,7 @@ jobs: $BARON_DIR = "${env:TPL_DIR}/baron" echo "$BARON_DIR" | ` Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - $URL = "https://www.minlp.com/downloads/xecs/baron/current/" + $URL = "https://minlp.com/downloads/xecs/baron/current/" if ( "${{matrix.TARGET}}" -eq "win" ) { $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.exe" $URL += "baron-win64.exe" diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index bdf1f7e1aa5..cc9760cbe5d 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -68,8 +68,9 @@ jobs: verbose: true # How many times to retry a failed request (defaults to 1) retry_count: 3 - # Exclude Jenkins because it's behind a firewall; ignore RTD because - # a magically-generated string is triggering a failure + # Exclude: + # - Jenkins because it's behind a firewall + # - RTD because a magically-generated string triggers failures exclude_urls: https://pyomo-jenkins.sandia.gov/,https://pyomo.readthedocs.io/en/%s/errors.html @@ -561,7 +562,7 @@ jobs: $BARON_DIR = "${env:TPL_DIR}/baron" echo "$BARON_DIR" | ` Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - $URL = "https://www.minlp.com/downloads/xecs/baron/current/" + $URL = "https://minlp.com/downloads/xecs/baron/current/" if ( "${{matrix.TARGET}}" -eq "win" ) { $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.exe" $URL += "baron-win64.exe" diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml index 7a38164898b..80d50477ca4 100644 --- a/.github/workflows/typos.toml +++ b/.github/workflows/typos.toml @@ -38,6 +38,8 @@ caf = "caf" WRONLY = "WRONLY" # Ignore the name Hax Hax = "Hax" +# Ignore dout (short for dual output in SAS solvers) +dout = "dout" # Big Sur Sur = "Sur" # contrib package named mis and the acronym whence the name comes @@ -67,4 +69,6 @@ RO = "RO" EOF = "EOF" # Ignore lst as shorthand for list lst = "lst" +# Abbreviation of gamma (used in stochpdegas1_automatic.py) +gam = "gam" # AS NEEDED: Add More Words Below diff --git a/.jenkins.sh b/.jenkins.sh index 696847fd92c..8771427805d 100644 --- a/.jenkins.sh +++ b/.jenkins.sh @@ -20,8 +20,11 @@ # # CODECOV_TOKEN: the token to use when uploading results to codecov.io # -# CODECOV_ARGS: additional arguments to pass to the codecov uploader -# (e.g., to support SSL certificates) +# CODECOV_SOURCE_BRANCH: passed to the 'codecov-cli' command; branch of Pyomo +# (e.g., to enable correct codecov uploads) +# +# CODECOV_REPO_OWNER: passed to the 'codecov-cli' command; owner of repo +# (e.g., to enable correct codecov uploads) # # DISABLE_COVERAGE: if nonempty, then coverage analysis is disabled # @@ -202,22 +205,43 @@ if test -z "$MODE" -o "$MODE" == test; then # Note, that the PWD should still be $WORKSPACE/pyomo # coverage combine || exit 1 - coverage report -i + coverage report -i || exit 1 + coverage xml -i || exit 1 export OS=`uname` - if test -z "$CODECOV_TOKEN"; then - coverage xml - else - CODECOV_JOB_NAME=`echo ${JOB_NAME} | sed -r 's/^(.*autotest_)?Pyomo_([^\/]+).*/\2/'`.$BUILD_NUMBER.$python + if test -z "$PYOMO_SOURCE_SHA"; then + PYOMO_SOURCE_SHA=$GIT_COMMIT + fi + if test -n "$CODECOV_TOKEN" -a -n "$PYOMO_SOURCE_SHA"; then + CODECOV_JOB_NAME=$(echo ${JOB_NAME} \ + | sed -r 's/^(.*autotest_)?Pyomo_([^\/]+).*/\2/').$BUILD_NUMBER.$python + if test -z "$CODECOV_REPO_OWNER"; then + if test -n "$PYOMO_SOURCE_REPO"; then + CODECOV_REPO_OWNER=$(echo "$PYOMO_SOURCE_REPO" | cut -d '/' -f 4) + elif test -n "$GIT_URL"; then + CODECOV_REPO_OWNER=$(echo "$GIT_URL" | cut -d '/' -f 4) + else + CODECOV_REPO_OWNER="" + fi + fi + if test -z "$CODECOV_SOURCE_BRANCH"; then + CODECOV_SOURCE_BRANCH=$(git branch -av --contains "$PYOMO_SOURCE_SHA" \ + | grep "${PYOMO_SOURCE_SHA:0:7}" | grep "/origin/" \ + | cut -d '/' -f 3 | cut -d' ' -f 1) + if test -z "$CODECOV_SOURCE_BRANCH"; then + CODECOV_SOURCE_BRANCH=main + fi + fi i=0 while /bin/true; do i=$[$i+1] echo "Uploading coverage to codecov (attempt $i)" - codecov -X gcovcodecov -X gcov -X s3 --no-color \ - -t $CODECOV_TOKEN --root `pwd` -e OS,python \ - --name $CODECOV_JOB_NAME $CODECOV_ARGS \ - | tee .cover.upload - if test $? == 0 -a `grep -i error .cover.upload \ - | grep -v branch= | wc -l` -eq 0; then + codecovcli -v upload-process --sha $PYOMO_SOURCE_SHA \ + --fail-on-error --git-service github --token $CODECOV_TOKEN \ + --slug pyomo/pyomo --file coverage.xml --disable-search \ + --name $CODECOV_JOB_NAME \ + --branch $CODECOV_REPO_OWNER:$CODECOV_SOURCE_BRANCH \ + --env OS,python --network-root-folder `pwd` --plugin noop + if test $? == 0; then break elif test $i -ge 4; then exit 1 diff --git a/.codecov.yml b/codecov.yml similarity index 54% rename from .codecov.yml rename to codecov.yml index 6b88f948fe1..318a907905f 100644 --- a/.codecov.yml +++ b/codecov.yml @@ -1,19 +1,21 @@ +codecov: + notify: + # GHA: 5, Jenkins: 11 + # Accurate as of July 3, 2024 + # Potential to change when Python versions change + after_n_builds: 16 + wait_for_ci: true coverage: - range: "50...100" + range: + - 50.0 + - 100.0 status: + patch: + default: + # Force patches to be covered at the level of the codebase + threshold: 0.0 project: default: # Allow overall coverage to drop to avoid failures due to code # cleanup or CI unavailability/lag - threshold: 5% - patch: - default: - # Force patches to be covered at the level of the codebase - threshold: 0% -# ci: -# - !ci.appveyor.com -codecov: - notify: - # GHA: 4, Jenkins: 8 - after_n_builds: 12 # all - wait_for_ci: yes + threshold: 5.0 diff --git a/doc/Archive/contributed_packages/gdpopt.rst b/doc/Archive/contributed_packages/gdpopt.rst index d550b0ced76..670d7633f6d 100644 --- a/doc/Archive/contributed_packages/gdpopt.rst +++ b/doc/Archive/contributed_packages/gdpopt.rst @@ -93,10 +93,10 @@ An example that includes the modeling approach may be found below. Variables: x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain - None : -1.2 : 0.0 : 2 : False : False : Reals + None : -1.2 : 0 : 2 : False : False : Reals y : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain - None : -10 : 1.0 : 10 : False : False : Reals + None : -10 : 1 : 10 : False : False : Reals Objectives: objective : Size=1, Index=None, Active=True @@ -106,7 +106,7 @@ An example that includes the modeling approach may be found below. Constraints: c : Size=1 Key : Lower : Body : Upper - None : 1.0 : 1.0 : 1.0 + None : 1.0 : 1 : 1.0 .. note:: diff --git a/examples/pyomo/tutorials/set.dat b/examples/pyomo/tutorials/set.dat index ab0d00b43cc..e2ad04122d8 100644 --- a/examples/pyomo/tutorials/set.dat +++ b/examples/pyomo/tutorials/set.dat @@ -16,3 +16,6 @@ set S[5] := 2 3; set T[2] := 1 3; set T[5] := 2 3; + +set X[2] := 1; +set X[5] := 2 3; \ No newline at end of file diff --git a/examples/pyomo/tutorials/set.out b/examples/pyomo/tutorials/set.out index 818977f6155..dd1ef2d4335 100644 --- a/examples/pyomo/tutorials/set.out +++ b/examples/pyomo/tutorials/set.out @@ -1,4 +1,4 @@ -23 Set Declarations +24 Set Declarations A : Size=1, Index=None, Ordered=Insertion Key : Dimen : Domain : Size : Members None : 1 : Any : 3 : {1, 2, 3} @@ -89,5 +89,9 @@ 2 : 1 : Any : 5 : {1, 3, 5, 7, 9} 3 : 1 : Any : 5 : {1, 4, 7, 10, 13} 4 : 1 : Any : 5 : {1, 5, 9, 13, 17} + X : Size=2, Index=B, Ordered=Insertion + Key : Dimen : Domain : Size : Members + 2 : 1 : S[2] : 1 : {1,} + 5 : 1 : S[5] : 2 : {2, 3} -23 Declarations: A B C D E F G H Hsub I J K K_2 L M N O P R S T U V +24 Declarations: A B C D E F G H Hsub I J K K_2 L M N O P R S X T U V diff --git a/examples/pyomo/tutorials/set.py b/examples/pyomo/tutorials/set.py index a14301484c9..c1ea60b48ad 100644 --- a/examples/pyomo/tutorials/set.py +++ b/examples/pyomo/tutorials/set.py @@ -171,6 +171,13 @@ def P_init(model, i, j): # model.S = Set(model.B, within=model.A) +# +# Validation of a set array can also be linked to another set array. If so, the +# elements under each index must also be found under the corresponding index in +# the validation set array: +# +model.X = Set(model.B, within=model.S) + # # Validation of set arrays can also be performed with the _validate_ option. diff --git a/pyomo/common/config.py b/pyomo/common/config.py index f9c3a725bb8..ebba2f2732a 100644 --- a/pyomo/common/config.py +++ b/pyomo/common/config.py @@ -996,7 +996,7 @@ class will still create ``c`` instances that only have the single :py:meth:`generate_documentation()`. The simplest is :py:meth:`display()`, which prints out the current values of the configuration object (and if it is a container type, all of it's -children). :py:meth:`generate_yaml_template` is simular to +children). :py:meth:`generate_yaml_template` is similar to :py:meth:`display`, but also includes the description fields as formatted comments. diff --git a/pyomo/common/dependencies.py b/pyomo/common/dependencies.py index 4c9e43002ef..bbcea0b85d7 100644 --- a/pyomo/common/dependencies.py +++ b/pyomo/common/dependencies.py @@ -999,10 +999,13 @@ def _finalize_numpy(np, available): # registration here (to bypass the deprecation warning) until we # finally remove all support for it numeric_types._native_boolean_types.add(t) - _floats = [np.float_, np.float16, np.float32, np.float64] + _floats = [np.float16, np.float32, np.float64] # float96 and float128 may or may not be defined in this particular # numpy build (it depends on platform and version). # Register them only if they are present + if hasattr(np, 'float_'): + # Prepend to preserve previous functionality + _floats.insert(0, np.float_) if hasattr(np, 'float96'): _floats.append(np.float96) if hasattr(np, 'float128'): @@ -1013,10 +1016,13 @@ def _finalize_numpy(np, available): # registration here (to bypass the deprecation warning) until we # finally remove all support for it numeric_types._native_boolean_types.add(t) - _complex = [np.complex_, np.complex64, np.complex128] + _complex = [np.complex64, np.complex128] # complex192 and complex256 may or may not be defined in this # particular numpy build (it depends on platform and version). # Register them only if they are present + if hasattr(np, 'np.complex_'): + # Prepend to preserve functionality + _complex.insert(0, np.complex_) if hasattr(np, 'complex192'): _complex.append(np.complex192) if hasattr(np, 'complex256'): diff --git a/pyomo/common/tests/test_dependencies.py b/pyomo/common/tests/test_dependencies.py index 31f9520b613..6aedc428244 100644 --- a/pyomo/common/tests/test_dependencies.py +++ b/pyomo/common/tests/test_dependencies.py @@ -209,7 +209,7 @@ def test_and_or(self): _and_or = avail0 & avail1 | avail2 self.assertTrue(_and_or) - # Verify operator prescedence + # Verify operator precedence _or_and = avail0 | avail2 & avail2 self.assertTrue(_or_and) _or_and = (avail0 | avail2) & avail2 diff --git a/pyomo/common/unittest.py b/pyomo/common/unittest.py index 84d962eb784..c78e003a07d 100644 --- a/pyomo/common/unittest.py +++ b/pyomo/common/unittest.py @@ -783,6 +783,7 @@ def filter_fcn(self, line): return False def filter_file_contents(self, lines, abstol=None): + _numpy_scalar_re = re.compile(r'np.(int|float)\d+\(([^\)]+)\)') filtered = [] deprecated = None for line in lines: @@ -807,6 +808,15 @@ def filter_file_contents(self, lines, abstol=None): item_list = [] items = line.strip().split() for i in items: + # Split up lists, dicts, and sets + while i and i[0] in '[{': + item_list.append(i[0]) + i = i[1:] + tail = [] + while i and i[-1] in ',:]}': + tail.append(i[-1]) + i = i[:-1] + # A few substitutions to get tests passing on pypy3 if ".inf" in i: i = i.replace(".inf", "inf") @@ -814,9 +824,19 @@ def filter_file_contents(self, lines, abstol=None): i = i.replace("null", "None") try: - item_list.append(float(i)) + # Numpy 2.x changed the repr for scalars. Convert + # the new scalar reprs back to the original (which + # were indistinguishable from python floats/ints) + np_match = _numpy_scalar_re.match(i) + if np_match: + item_list.append(float(np_match.group(2))) + else: + item_list.append(float(i)) except: item_list.append(i) + if tail: + tail.reverse() + item_list.extend(tail) # We can get printed results objects where the baseline is # exactly 0 (and omitted) and the test is slightly non-zero. @@ -824,12 +844,13 @@ def filter_file_contents(self, lines, abstol=None): # results objects and remote them if they are within # tolerance of 0 if ( - len(item_list) == 2 - and item_list[0] == 'Value:' - and type(item_list[1]) is float - and abs(item_list[1]) < (abstol or 0) - and len(filtered[-1]) == 1 - and filtered[-1][0][-1] == ':' + len(item_list) == 3 + and item_list[0] == 'Value' + and item_list[1] == ':' + and type(item_list[2]) is float + and abs(item_list[2]) < (abstol or 0) + and len(filtered[-1]) == 2 + and filtered[-1][1] == ':' ): filtered.pop() else: diff --git a/pyomo/contrib/appsi/base.py b/pyomo/contrib/appsi/base.py index 6d2b5ccfcd4..9c7da1eb60b 100644 --- a/pyomo/contrib/appsi/base.py +++ b/pyomo/contrib/appsi/base.py @@ -1007,7 +1007,7 @@ def add_constraints(self, cons: List[ConstraintData]): raise ValueError( 'constraint {name} has already been added'.format(name=con.name) ) - self._active_constraints[con] = (con.lower, con.body, con.upper) + self._active_constraints[con] = con.expr if self.use_extensions and cmodel_available: tmp = cmodel.prep_for_repn(con.body, self._expr_types) else: @@ -1363,40 +1363,13 @@ def update(self, timer: HierarchicalTimer = None): cons_to_remove_and_add = dict() need_to_set_objective = False if config.update_constraints: - cons_to_update = list() - sos_to_update = list() for c in current_cons_dict.keys(): - if c not in new_cons_set: - cons_to_update.append(c) + if c not in new_cons_set and c.expr is not self._active_constraints[c]: + cons_to_remove_and_add[c] = None + sos_to_update = [] for c in current_sos_dict.keys(): if c not in new_sos_set: sos_to_update.append(c) - for c in cons_to_update: - lower, body, upper = self._active_constraints[c] - new_lower, new_body, new_upper = c.lower, c.body, c.upper - if new_body is not body: - cons_to_remove_and_add[c] = None - continue - if new_lower is not lower: - if ( - type(new_lower) is NumericConstant - and type(lower) is NumericConstant - and new_lower.value == lower.value - ): - pass - else: - cons_to_remove_and_add[c] = None - continue - if new_upper is not upper: - if ( - type(new_upper) is NumericConstant - and type(upper) is NumericConstant - and new_upper.value == upper.value - ): - pass - else: - cons_to_remove_and_add[c] = None - continue self.remove_sos_constraints(sos_to_update) self.add_sos_constraints(sos_to_update) timer.stop('cons') diff --git a/pyomo/contrib/appsi/cmodel/src/fbbt_model.cpp b/pyomo/contrib/appsi/cmodel/src/fbbt_model.cpp index bd8d7dbf854..ca865d429e2 100644 --- a/pyomo/contrib/appsi/cmodel/src/fbbt_model.cpp +++ b/pyomo/contrib/appsi/cmodel/src/fbbt_model.cpp @@ -205,7 +205,7 @@ void process_fbbt_constraints(FBBTModel *model, PyomoExprTypes &expr_types, py::handle con_body; for (py::handle c : cons) { - lower_body_upper = active_constraints[c]; + lower_body_upper = c.attr("to_bounded_expression")(); con_lb = lower_body_upper[0]; con_body = lower_body_upper[1]; con_ub = lower_body_upper[2]; diff --git a/pyomo/contrib/appsi/cmodel/src/lp_writer.cpp b/pyomo/contrib/appsi/cmodel/src/lp_writer.cpp index 68baf2b8ae8..f33060ee523 100644 --- a/pyomo/contrib/appsi/cmodel/src/lp_writer.cpp +++ b/pyomo/contrib/appsi/cmodel/src/lp_writer.cpp @@ -289,7 +289,7 @@ void process_lp_constraints(py::list cons, py::object writer) { py::object nonlinear_expr; PyomoExprTypes expr_types = PyomoExprTypes(); for (py::handle c : cons) { - lower_body_upper = active_constraints[c]; + lower_body_upper = c.attr("to_bounded_expression")(); cname = getSymbol(c, labeler); repn = generate_standard_repn( lower_body_upper[1], "compute_values"_a = false, "quadratic"_a = true); diff --git a/pyomo/contrib/appsi/cmodel/src/nl_writer.cpp b/pyomo/contrib/appsi/cmodel/src/nl_writer.cpp index 8de6cc74ab4..854262496ea 100644 --- a/pyomo/contrib/appsi/cmodel/src/nl_writer.cpp +++ b/pyomo/contrib/appsi/cmodel/src/nl_writer.cpp @@ -527,7 +527,7 @@ void process_nl_constraints(NLWriter *nl_writer, PyomoExprTypes &expr_types, py::handle repn_nonlinear_expr; for (py::handle c : cons) { - lower_body_upper = active_constraints[c]; + lower_body_upper = c.attr("to_bounded_expression")(); repn = generate_standard_repn( lower_body_upper[1], "compute_values"_a = false, "quadratic"_a = false); _const = appsi_expr_from_pyomo_expr(repn.attr("constant"), var_map, diff --git a/pyomo/contrib/appsi/solvers/highs.py b/pyomo/contrib/appsi/solvers/highs.py index c948444839d..57a7b1eac72 100644 --- a/pyomo/contrib/appsi/solvers/highs.py +++ b/pyomo/contrib/appsi/solvers/highs.py @@ -481,7 +481,7 @@ def _remove_constraints(self, cons: List[ConstraintData]): indices_to_remove.append(con_ndx) self._mutable_helpers.pop(con, None) self._solver_model.deleteRows( - len(indices_to_remove), np.array(indices_to_remove) + len(indices_to_remove), np.sort(np.array(indices_to_remove)) ) con_ndx = 0 new_con_map = dict() diff --git a/pyomo/contrib/appsi/solvers/maingo.py b/pyomo/contrib/appsi/solvers/maingo.py index e52130061f7..c5860b42ce7 100644 --- a/pyomo/contrib/appsi/solvers/maingo.py +++ b/pyomo/contrib/appsi/solvers/maingo.py @@ -57,33 +57,13 @@ from pyomo.repn.util import valid_expr_ctypes_minlp -def _import_SolverModel(): - try: - from . import maingo_solvermodel - except ImportError: - raise - return maingo_solvermodel - - -maingo_solvermodel, solvermodel_available = attempt_import( - "maingo_solvermodel", importer=_import_SolverModel -) - -MaingoVar = namedtuple("MaingoVar", "type name lb ub init") - logger = logging.getLogger(__name__) - - -def _import_maingopy(): - try: - import maingopy - except ImportError: - MAiNGO._available = MAiNGO.Availability.NotFound - raise - return maingopy - - -maingopy, maingopy_available = attempt_import("maingopy", importer=_import_maingopy) +MaingoVar = namedtuple("MaingoVar", "type name lb ub init") +maingopy, maingopy_available = attempt_import("maingopy") +# Note that importing maingo_solvermodel will trigger the import of +# maingopy, so we defer that import using attempt_import (which will +# always succeed, even if maingopy is not available) +maingo_solvermodel = attempt_import("pyomo.contrib.appsi.solvers.maingo_solvermodel")[0] class MAiNGOConfig(MIPSolverConfig): @@ -185,9 +165,11 @@ def __init__(self, only_child_vars=False): self._last_results_object: Optional[MAiNGOResults] = None def available(self): - if not maingopy_available: - return self.Availability.NotFound - self._available = True + if self._available is None: + if maingopy_available: + MAiNGO._available = True + else: + MAiNGO._available = MAiNGO.Availability.NotFound return self._available def version(self): diff --git a/pyomo/contrib/appsi/solvers/maingo_solvermodel.py b/pyomo/contrib/appsi/solvers/maingo_solvermodel.py index ca746c4a9b7..b12a386284c 100644 --- a/pyomo/contrib/appsi/solvers/maingo_solvermodel.py +++ b/pyomo/contrib/appsi/solvers/maingo_solvermodel.py @@ -28,15 +28,7 @@ from pyomo.repn.util import valid_expr_ctypes_minlp -def _import_maingopy(): - try: - import maingopy - except ImportError: - raise - return maingopy - - -maingopy, maingopy_available = attempt_import("maingopy", importer=_import_maingopy) +maingopy, maingopy_available = attempt_import("maingopy") _plusMinusOne = {1, -1} @@ -219,7 +211,7 @@ def _linear_to_maingo(self, node): return sum(values) -class SolverModel(maingopy.MAiNGOmodel): +class SolverModel(maingopy.MAiNGOmodel if maingopy_available else object): def __init__(self, var_list, objective, con_list, idmap, logger): maingopy.MAiNGOmodel.__init__(self) self._var_list = var_list diff --git a/pyomo/contrib/appsi/solvers/tests/test_highs_persistent.py b/pyomo/contrib/appsi/solvers/tests/test_highs_persistent.py index b26f45ff2cc..4d8251e0de9 100644 --- a/pyomo/contrib/appsi/solvers/tests/test_highs_persistent.py +++ b/pyomo/contrib/appsi/solvers/tests/test_highs_persistent.py @@ -80,6 +80,43 @@ def test_mutable_params_with_remove_vars(self): res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, -9) + def test_fix_and_unfix(self): + # Tests issue https://github.com/Pyomo/pyomo/issues/3127 + + m = pe.ConcreteModel() + m.x = pe.Var(domain=pe.Binary) + m.y = pe.Var(domain=pe.Binary) + m.fx = pe.Var(domain=pe.NonNegativeReals) + m.fy = pe.Var(domain=pe.NonNegativeReals) + m.c1 = pe.Constraint(expr=m.fx <= m.x) + m.c2 = pe.Constraint(expr=m.fy <= m.y) + m.c3 = pe.Constraint(expr=m.x + m.y <= 1) + + m.obj = pe.Objective(expr=m.fx * 0.5 + m.fy * 0.4, sense=pe.maximize) + + opt = Highs() + + # solution 1 has m.x == 1 and m.y == 0 + r = opt.solve(m) + self.assertAlmostEqual(m.fx.value, 1, places=5) + self.assertAlmostEqual(m.fy.value, 0, places=5) + self.assertAlmostEqual(r.best_feasible_objective, 0.5, places=5) + + # solution 2 has m.x == 0 and m.y == 1 + m.y.fix(1) + r = opt.solve(m) + self.assertAlmostEqual(m.fx.value, 0, places=5) + self.assertAlmostEqual(m.fy.value, 1, places=5) + self.assertAlmostEqual(r.best_feasible_objective, 0.4, places=5) + + # solution 3 should be equal solution 1 + m.y.unfix() + m.x.fix(1) + r = opt.solve(m) + self.assertAlmostEqual(m.fx.value, 1, places=5) + self.assertAlmostEqual(m.fy.value, 0, places=5) + self.assertAlmostEqual(r.best_feasible_objective, 0.5, places=5) + def test_capture_highs_output(self): # tests issue #3003 # diff --git a/pyomo/contrib/community_detection/community_graph.py b/pyomo/contrib/community_detection/community_graph.py index 889940b5996..c67a8cd6690 100644 --- a/pyomo/contrib/community_detection/community_graph.py +++ b/pyomo/contrib/community_detection/community_graph.py @@ -123,7 +123,7 @@ def generate_model_graph( # Create a list of the variable numbers that occur in the given constraint equation numbered_variables_in_constraint_equation = [ component_number_map[constraint_variable] - for constraint_variable in identify_variables(model_constraint.body) + for constraint_variable in identify_variables(model_constraint.expr) ] # Update constraint_variable_map diff --git a/pyomo/contrib/fbbt/expression_bounds_walker.py b/pyomo/contrib/fbbt/expression_bounds_walker.py index cb287d54df5..3cb32fcbf29 100644 --- a/pyomo/contrib/fbbt/expression_bounds_walker.py +++ b/pyomo/contrib/fbbt/expression_bounds_walker.py @@ -232,15 +232,15 @@ def _handle_unknowable_bounds(visitor, node, arg): def _handle_equality(visitor, node, arg1, arg2): - return eq(*arg1, *arg2) + return eq(*arg1, *arg2, feasibility_tol=visitor.feasibility_tol) def _handle_inequality(visitor, node, arg1, arg2): - return ineq(*arg1, *arg2) + return ineq(*arg1, *arg2, feasibility_tol=visitor.feasibility_tol) def _handle_ranged(visitor, node, arg1, arg2, arg3): - return ranged(*arg1, *arg2, *arg3) + return ranged(*arg1, *arg2, *arg3, feasibility_tol=visitor.feasibility_tol) def _handle_expr_if(visitor, node, arg1, arg2, arg3): diff --git a/pyomo/contrib/fbbt/fbbt.py b/pyomo/contrib/fbbt/fbbt.py index 1507c4a3cc5..4bd0e4552a1 100644 --- a/pyomo/contrib/fbbt/fbbt.py +++ b/pyomo/contrib/fbbt/fbbt.py @@ -12,6 +12,7 @@ from collections import defaultdict from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.contrib.fbbt.expression_bounds_walker import ExpressionBoundsVisitor +import pyomo.core.expr.relational_expr as relational_expr import pyomo.core.expr.numeric_expr as numeric_expr from pyomo.core.expr.visitor import ( ExpressionValueVisitor, @@ -80,6 +81,27 @@ class FBBTException(PyomoException): pass +def _prop_bnds_leaf_to_root_equality(visitor, node, arg1, arg2): + bnds_dict = visitor.bnds_dict + bnds_dict[node] = interval.eq( + *bnds_dict[arg1], *bnds_dict[arg2], visitor.feasibility_tol + ) + + +def _prop_bnds_leaf_to_root_inequality(visitor, node, arg1, arg2): + bnds_dict = visitor.bnds_dict + bnds_dict[node] = interval.ineq( + *bnds_dict[arg1], *bnds_dict[arg2], visitor.feasibility_tol + ) + + +def _prop_bnds_leaf_to_root_ranged(visitor, node, arg1, arg2, arg3): + bnds_dict = visitor.bnds_dict + bnds_dict[node] = interval.ranged( + *bnds_dict[arg1], *bnds_dict[arg2], *bnds_dict[arg3], visitor.feasibility_tol + ) + + def _prop_bnds_leaf_to_root_ProductExpression(visitor, node, arg1, arg2): """ @@ -367,6 +389,9 @@ def _prop_bnds_leaf_to_root_NamedExpression(visitor, node, expr): numeric_expr.UnaryFunctionExpression: _prop_bnds_leaf_to_root_UnaryFunctionExpression, numeric_expr.LinearExpression: _prop_bnds_leaf_to_root_SumExpression, numeric_expr.AbsExpression: _prop_bnds_leaf_to_root_abs, + relational_expr.EqualityExpression: _prop_bnds_leaf_to_root_equality, + relational_expr.InequalityExpression: _prop_bnds_leaf_to_root_inequality, + relational_expr.RangedExpression: _prop_bnds_leaf_to_root_ranged, ExpressionData: _prop_bnds_leaf_to_root_NamedExpression, ScalarExpression: _prop_bnds_leaf_to_root_NamedExpression, ObjectiveData: _prop_bnds_leaf_to_root_NamedExpression, @@ -375,6 +400,43 @@ def _prop_bnds_leaf_to_root_NamedExpression(visitor, node, expr): ) +def _prop_bnds_root_to_leaf_equality(node, bnds_dict, feasibility_tol): + assert bnds_dict[node][1] # This expression is feasible + arg1, arg2 = node.args + lb1, ub1 = bnds_dict[arg1] + lb2, ub2 = bnds_dict[arg2] + bnds_dict[arg1] = bnds_dict[arg2] = max(lb1, lb2), min(ub1, ub2) + + +def _prop_bnds_root_to_leaf_inequality(node, bnds_dict, feasibility_tol): + assert bnds_dict[node][1] # This expression is feasible + arg1, arg2 = node.args + lb1, ub1 = bnds_dict[arg1] + lb2, ub2 = bnds_dict[arg2] + if lb1 > lb2: + bnds_dict[arg2] = lb1, ub2 + if ub1 > ub2: + bnds_dict[arg1] = lb1, ub2 + + +def _prop_bnds_root_to_leaf_ranged(node, bnds_dict, feasibility_tol): + assert bnds_dict[node][1] # This expression is feasible + arg1, arg2, arg3 = node.args + lb1, ub1 = bnds_dict[arg1] + lb2, ub2 = bnds_dict[arg2] + lb3, ub3 = bnds_dict[arg3] + if lb1 > lb2: + bnds_dict[arg2] = lb1, ub2 + lb2 = lb1 + if lb2 > lb3: + bnds_dict[arg3] = lb2, ub3 + if ub2 > ub3: + bnds_dict[arg2] = lb2, ub3 + ub2 = ub3 + if ub1 > ub2: + bnds_dict[arg1] = lb1, ub2 + + def _prop_bnds_root_to_leaf_ProductExpression(node, bnds_dict, feasibility_tol): """ @@ -953,6 +1015,16 @@ def _prop_bnds_root_to_leaf_NamedExpression(node, bnds_dict, feasibility_tol): _prop_bnds_root_to_leaf_map[ObjectiveData] = _prop_bnds_root_to_leaf_NamedExpression _prop_bnds_root_to_leaf_map[ScalarObjective] = _prop_bnds_root_to_leaf_NamedExpression +_prop_bnds_root_to_leaf_map[relational_expr.EqualityExpression] = ( + _prop_bnds_root_to_leaf_equality +) +_prop_bnds_root_to_leaf_map[relational_expr.InequalityExpression] = ( + _prop_bnds_root_to_leaf_inequality +) +_prop_bnds_root_to_leaf_map[relational_expr.RangedExpression] = ( + _prop_bnds_root_to_leaf_ranged +) + def _check_and_reset_bounds(var, lb, ub): """ @@ -1250,36 +1322,19 @@ def _fbbt_con(con, config): # a walker to propagate bounds from the variables to the root visitorA = _FBBTVisitorLeafToRoot(bnds_dict, feasibility_tol=config.feasibility_tol) - visitorA.walk_expression(con.body) + visitorA.walk_expression(con.expr) - # Now we need to replace the bounds in bnds_dict for the root - # node with the bounds on the constraint (if those bounds are - # better). - _lb = value(con.lower) - _ub = value(con.upper) - if _lb is None: - _lb = -interval.inf - if _ub is None: - _ub = interval.inf - - lb, ub = bnds_dict[con.body] + always_feasible, possibly_feasible = bnds_dict[con.expr] # check if the constraint is infeasible - if lb > _ub + config.feasibility_tol or ub < _lb - config.feasibility_tol: + if not possibly_feasible: raise InfeasibleConstraintException( 'Detected an infeasible constraint during FBBT: {0}'.format(str(con)) ) # check if the constraint is always satisfied - if config.deactivate_satisfied_constraints: - if lb >= _lb - config.feasibility_tol and ub <= _ub + config.feasibility_tol: - con.deactivate() - - if _lb > lb: - lb = _lb - if _ub < ub: - ub = _ub - bnds_dict[con.body] = (lb, ub) + if config.deactivate_satisfied_constraints and always_feasible: + con.deactivate() # Now, propagate bounds back from the root to the variables visitorB = _FBBTVisitorRootToLeaf( @@ -1287,7 +1342,7 @@ def _fbbt_con(con, config): integer_tol=config.integer_tol, feasibility_tol=config.feasibility_tol, ) - visitorB.dfs_postorder_stack(con.body) + visitorB.dfs_postorder_stack(con.expr) new_var_bounds = ComponentMap() for _node, _bnds in bnds_dict.items(): @@ -1334,7 +1389,7 @@ def _fbbt_block(m, config): for c in m.component_data_objects( ctype=Constraint, active=True, descend_into=config.descend_into, sort=True ): - for v in identify_variables(c.body): + for v in identify_variables(c.expr): if v not in var_to_con_map: var_to_con_map[v] = list() if v.lb is None: @@ -1521,14 +1576,14 @@ def __init__(self, comp): if comp.ctype == Constraint: if comp.is_indexed(): for c in comp.values(): - self._vars.update(identify_variables(c.body)) + self._vars.update(identify_variables(c.expr)) else: - self._vars.update(identify_variables(comp.body)) + self._vars.update(identify_variables(comp.expr)) else: for c in comp.component_data_objects( Constraint, descend_into=True, active=True, sort=True ): - self._vars.update(identify_variables(c.body)) + self._vars.update(identify_variables(c.expr)) def save_bounds(self): bnds = ComponentMap() diff --git a/pyomo/contrib/fbbt/interval.py b/pyomo/contrib/fbbt/interval.py index a12d1a4529f..4b93d6e3f31 100644 --- a/pyomo/contrib/fbbt/interval.py +++ b/pyomo/contrib/fbbt/interval.py @@ -57,7 +57,7 @@ def BoolFlag(val): return _true if val else _false -def ineq(xl, xu, yl, yu): +def ineq(xl, xu, yl, yu, feasibility_tol): """Compute the "bounds" on an InequalityExpression Note this is *not* performing interval arithmetic: we are @@ -67,9 +67,9 @@ def ineq(xl, xu, yl, yu): """ ans = [] - if yl < xu: + if yl < xu - feasibility_tol: ans.append(_false) - if xl <= yu: + if xl <= yu + feasibility_tol: ans.append(_true) assert ans if len(ans) == 1: @@ -77,7 +77,7 @@ def ineq(xl, xu, yl, yu): return tuple(ans) -def eq(xl, xu, yl, yu): +def eq(xl, xu, yl, yu, feasibility_tol): """Compute the "bounds" on an EqualityExpression Note this is *not* performing interval arithmetic: we are @@ -87,9 +87,13 @@ def eq(xl, xu, yl, yu): """ ans = [] - if xl != xu or yl != yu or xl != yl: + if ( + abs(xl - xu) > feasibility_tol + or abs(yl - yu) > feasibility_tol + or abs(xl - yl) > feasibility_tol + ): ans.append(_false) - if xl <= yu and yl <= xu: + if xl <= yu + feasibility_tol and yl <= xu + feasibility_tol: ans.append(_true) assert ans if len(ans) == 1: @@ -97,7 +101,7 @@ def eq(xl, xu, yl, yu): return tuple(ans) -def ranged(xl, xu, yl, yu, zl, zu): +def ranged(xl, xu, yl, yu, zl, zu, feasibility_tol): """Compute the "bounds" on a RangedExpression Note this is *not* performing interval arithmetic: we are @@ -106,8 +110,8 @@ def ranged(xl, xu, yl, yu, zl, zu): `z` and `z`, `y` can be outside the range `x` and `z`, or both. """ - lb = ineq(xl, xu, yl, yu) - ub = ineq(yl, yu, zl, zu) + lb = ineq(xl, xu, yl, yu, feasibility_tol) + ub = ineq(yl, yu, zl, zu, feasibility_tol) ans = [] if not lb[0] or not ub[0]: ans.append(_false) diff --git a/pyomo/contrib/fbbt/tests/test_fbbt.py b/pyomo/contrib/fbbt/tests/test_fbbt.py index f7d08d11215..83e69233bb5 100644 --- a/pyomo/contrib/fbbt/tests/test_fbbt.py +++ b/pyomo/contrib/fbbt/tests/test_fbbt.py @@ -1335,3 +1335,31 @@ def test_named_expr(self): class TestFBBT(FbbtTestBase, unittest.TestCase): def setUp(self) -> None: self.tightener = fbbt + + def test_ranged_expression(self): + # The python version of FBBT is slightly more flexible than + # APPSI's cmodel (it allows - and correctly handles - + # RangedExpressions with variable lower / upper bounds). If we + # ever port that functionality into APPSI, then this test can be + # moved into the base class. + m = pyo.ConcreteModel() + m.l = pyo.Var(bounds=(2, None)) + m.x = pyo.Var() + m.u = pyo.Var(bounds=(None, 8)) + m.c = pyo.Constraint(expr=pyo.inequality(m.l, m.x, m.u)) + self.tightener(m) + self.tightener(m) + self.assertEqual(m.l.bounds, (2, 8)) + self.assertEqual(m.x.bounds, (2, 8)) + self.assertEqual(m.u.bounds, (2, 8)) + + m = pyo.ConcreteModel() + m.l = pyo.Var(bounds=(2, None)) + m.x = pyo.Var(bounds=(3, 7)) + m.u = pyo.Var(bounds=(None, 8)) + m.c = pyo.Constraint(expr=pyo.inequality(m.l, m.x, m.u)) + self.tightener(m) + self.tightener(m) + self.assertEqual(m.l.bounds, (2, 7)) + self.assertEqual(m.x.bounds, (3, 7)) + self.assertEqual(m.u.bounds, (3, 8)) diff --git a/pyomo/contrib/incidence_analysis/scc_solver.py b/pyomo/contrib/incidence_analysis/scc_solver.py index 378647c190c..db201dccb0a 100644 --- a/pyomo/contrib/incidence_analysis/scc_solver.py +++ b/pyomo/contrib/incidence_analysis/scc_solver.py @@ -66,7 +66,14 @@ def generate_strongly_connected_components( ) ) - assert len(variables) == len(constraints) + if len(variables) != len(constraints): + nvar = len(variables) + ncon = len(constraints) + raise RuntimeError( + "generate_strongly_connected_components only supports systems with the" + f" same numbers of variables and equality constraints. Got {nvar}" + f" variables and {ncon} constraints." + ) if igraph is None: igraph = IncidenceGraphInterface() @@ -78,6 +85,8 @@ def generate_strongly_connected_components( subsets, include_fixed=include_fixed ): # TODO: How does len scale for reference-to-list? + # If this assert fails, it may be due to a bug in block_triangularize + # or generate_subsystem_block. assert len(block.vars) == len(block.cons) yield (block, inputs) diff --git a/pyomo/contrib/incidence_analysis/tests/test_scc_solver.py b/pyomo/contrib/incidence_analysis/tests/test_scc_solver.py index b75f93e4a12..ef4853d7e9a 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_scc_solver.py +++ b/pyomo/contrib/incidence_analysis/tests/test_scc_solver.py @@ -501,5 +501,22 @@ def test_with_inequalities(self): self.assertEqual(m.x[3].value, 1.0) +@unittest.skipUnless(scipy_available, "SciPy is not available") +@unittest.skipUnless(networkx_available, "NetworkX is not available") +class TestExceptions(unittest.TestCase): + def test_nonsquare_system(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2], initialize=1) + m.eq = pyo.Constraint(expr=m.x[1] + m.x[2] == 1) + + msg = "Got 2 variables and 1 constraints" + with self.assertRaisesRegex(RuntimeError, msg): + list( + generate_strongly_connected_components( + constraints=[m.eq], variables=[m.x[1], m.x[2]] + ) + ) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/pyros/tests/test_grcs.py b/pyomo/contrib/pyros/tests/test_grcs.py index f7efec4d6e7..f2954750a16 100644 --- a/pyomo/contrib/pyros/tests/test_grcs.py +++ b/pyomo/contrib/pyros/tests/test_grcs.py @@ -42,6 +42,7 @@ from pyomo.contrib.pyros.util import get_vars_from_component from pyomo.contrib.pyros.util import identify_objective_functions from pyomo.common.collections import Bunch +from pyomo.repn.plugins import nl_writer as pyomo_nl_writer import time import math from pyomo.contrib.pyros.util import time_code @@ -68,7 +69,7 @@ from pyomo.common.dependencies import numpy as np, numpy_available from pyomo.common.dependencies import scipy as sp, scipy_available from pyomo.environ import maximize as pyo_max -from pyomo.common.errors import ApplicationError +from pyomo.common.errors import ApplicationError, InfeasibleConstraintException from pyomo.opt import ( SolverResults, SolverStatus, @@ -4616,6 +4617,76 @@ def test_discrete_separation_subsolver_error(self): ), ) + @unittest.skipUnless(ipopt_available, "IPOPT is not available.") + def test_pyros_nl_writer_tol(self): + """ + Test PyROS subsolver call routine behavior + with respect to the NL writer tolerance is as + expected. + """ + m = ConcreteModel() + m.q = Param(initialize=1, mutable=True) + m.x1 = Var(initialize=1, bounds=(0, 1)) + m.x2 = Var(initialize=2, bounds=(0, m.q)) + m.obj = Objective(expr=m.x1 + m.x2) + + # fixed just inside the PyROS-specified NL writer tolerance. + m.x1.fix(m.x1.upper + 9.9e-5) + + current_nl_writer_tol = pyomo_nl_writer.TOL + ipopt_solver = SolverFactory("ipopt") + pyros_solver = SolverFactory("pyros") + + pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.q], + uncertainty_set=BoxSet([[0, 1]]), + local_solver=ipopt_solver, + global_solver=ipopt_solver, + decision_rule_order=0, + solve_master_globally=False, + bypass_global_separation=True, + ) + + self.assertEqual( + pyomo_nl_writer.TOL, + current_nl_writer_tol, + msg="Pyomo NL writer tolerance not restored as expected.", + ) + + # fixed just outside the PyROS-specified NL writer tolerance. + # this should be exceptional. + m.x1.fix(m.x1.upper + 1.01e-4) + + err_msg = ( + "model contains a trivially infeasible variable.*x1" + ".*fixed.*outside bounds" + ) + with self.assertRaisesRegex(InfeasibleConstraintException, err_msg): + pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.q], + uncertainty_set=BoxSet([[0, 1]]), + local_solver=ipopt_solver, + global_solver=ipopt_solver, + decision_rule_order=0, + solve_master_globally=False, + bypass_global_separation=True, + ) + + self.assertEqual( + pyomo_nl_writer.TOL, + current_nl_writer_tol, + msg=( + "Pyomo NL writer tolerance not restored as expected " + "after exceptional test." + ), + ) + @unittest.skipUnless( baron_license_is_valid, "Global NLP solver is not available and licensed." ) diff --git a/pyomo/contrib/pyros/util.py b/pyomo/contrib/pyros/util.py index 3b0187af7dd..ecabca8f115 100644 --- a/pyomo/contrib/pyros/util.py +++ b/pyomo/contrib/pyros/util.py @@ -38,6 +38,7 @@ from pyomo.core.expr import value from pyomo.core.expr.numeric_expr import NPV_MaxExpression, NPV_MinExpression from pyomo.repn.standard_repn import generate_standard_repn +from pyomo.repn.plugins import nl_writer as pyomo_nl_writer from pyomo.core.expr.visitor import ( identify_variables, identify_mutable_parameters, @@ -377,7 +378,14 @@ def revert_solver_max_time_adjustment( elif isinstance(solver, SolverFactory.get_class("baron")): options_key = "MaxTime" elif isinstance(solver, SolverFactory.get_class("ipopt")): - options_key = "max_cpu_time" + options_key = ( + # IPOPT 3.14.0+ added support for specifying + # wall time limit explicitly; this is preferred + # over CPU time limit + "max_wall_time" + if solver.version() >= (3, 14, 0, 0) + else "max_cpu_time" + ) elif isinstance(solver, SolverFactory.get_class("scip")): options_key = "limits/time" else: @@ -1809,6 +1817,16 @@ def call_solver(model, solver, config, timing_obj, timer_name, err_msg): timing_obj.start_timer(timer_name) tt_timer.tic(msg=None) + # tentative: reduce risk of InfeasibleConstraintException + # occurring due to discrepancies between Pyomo NL writer + # tolerance and (default) subordinate solver (e.g. IPOPT) + # feasibility tolerances. + # e.g., a Var fixed outside bounds beyond the Pyomo NL writer + # tolerance, but still within the default IPOPT feasibility + # tolerance + current_nl_writer_tol = pyomo_nl_writer.TOL + pyomo_nl_writer.TOL = 1e-4 + try: results = solver.solve( model, @@ -1827,6 +1845,8 @@ def call_solver(model, solver, config, timing_obj, timer_name, err_msg): results.solver, TIC_TOC_SOLVE_TIME_ATTR, tt_timer.toc(msg=None, delta=True) ) finally: + pyomo_nl_writer.TOL = current_nl_writer_tol + timing_obj.stop_timer(timer_name) revert_solver_max_time_adjustment( solver, orig_setting, custom_setting_present, config diff --git a/pyomo/contrib/sensitivity_toolbox/sens.py b/pyomo/contrib/sensitivity_toolbox/sens.py index a3d69b2c7b1..34fbb92327a 100644 --- a/pyomo/contrib/sensitivity_toolbox/sens.py +++ b/pyomo/contrib/sensitivity_toolbox/sens.py @@ -9,16 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# ______________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2024 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License -# ______________________________________________________________________________ from pyomo.environ import ( Param, Var, @@ -34,8 +24,10 @@ from pyomo.common.sorting import sorted_robust from pyomo.core.expr import ExpressionReplacementVisitor +from pyomo.core.expr.numvalue import is_potentially_variable from pyomo.common.modeling import unique_component_name +from pyomo.common.dependencies import numpy as np, scipy from pyomo.common.deprecation import deprecated from pyomo.common.tempfiles import TempfileManager from pyomo.opt import SolverFactory, SolverStatus @@ -44,8 +36,6 @@ import os import io import shutil -from pyomo.common.dependencies import numpy as np, numpy_available -from pyomo.common.dependencies import scipy, scipy_available logger = logging.getLogger('pyomo.contrib.sensitivity_toolbox') @@ -684,25 +674,29 @@ def _replace_parameters_in_constraints(self, variableSubMap): ) last_idx = 0 for con in old_con_list: - if con.equality or con.lower is None or con.upper is None: - new_expr = param_replacer.walk_expression(con.expr) - block.constList.add(expr=new_expr) + new_expr = param_replacer.walk_expression(con.expr) + # TODO: We could only create new constraints for expressions + # where substitution actually happened, but that breaks some + # current tests: + # + # if new_expr is con.expr: + # # No params were substituted. We can ignore this constraint + # continue + if new_expr.nargs() == 3 and ( + is_potentially_variable(new_expr.arg(0)) + or is_potentially_variable(new_expr.arg(2)) + ): + # This is a potentially "invalid" range constraint: it + # may now have variables in the bounds. For safety, we + # will split it into two simple inequalities. + block.constList.add(expr=(new_expr.arg(0) <= new_expr.arg(1))) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con - else: - # Constraint must be a ranged inequality, break into - # separate constraints - new_body = param_replacer.walk_expression(con.body) - new_lower = param_replacer.walk_expression(con.lower) - new_upper = param_replacer.walk_expression(con.upper) - - # Add constraint for lower bound - block.constList.add(expr=(new_lower <= new_body)) + block.constList.add(expr=(new_expr.arg(1) <= new_expr.arg(2))) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con - - # Add constraint for upper bound - block.constList.add(expr=(new_body <= new_upper)) + else: + block.constList.add(expr=new_expr) last_idx += 1 new_old_comp_map[block.constList[last_idx]] = con con.deactivate() diff --git a/pyomo/contrib/solver/persistent.py b/pyomo/contrib/solver/persistent.py index 71322b7043e..65da81a0c08 100644 --- a/pyomo/contrib/solver/persistent.py +++ b/pyomo/contrib/solver/persistent.py @@ -111,8 +111,8 @@ def add_constraints(self, cons: List[ConstraintData]): raise ValueError( 'constraint {name} has already been added'.format(name=con.name) ) - self._active_constraints[con] = (con.lower, con.body, con.upper) - tmp = collect_vars_and_named_exprs(con.body) + self._active_constraints[con] = con.expr + tmp = collect_vars_and_named_exprs(con.expr) named_exprs, variables, fixed_vars, external_functions = tmp self._check_for_new_vars(variables) self._named_expressions[con] = [(e, e.expr) for e in named_exprs] @@ -417,40 +417,13 @@ def update(self, timer: HierarchicalTimer = None): cons_to_remove_and_add = {} need_to_set_objective = False if config.update_constraints: - cons_to_update = [] - sos_to_update = [] for c in current_cons_dict.keys(): - if c not in new_cons_set: - cons_to_update.append(c) + if c not in new_cons_set and c.expr is not self._active_constraints[c]: + cons_to_remove_and_add[c] = None + sos_to_update = [] for c in current_sos_dict.keys(): if c not in new_sos_set: sos_to_update.append(c) - for c in cons_to_update: - lower, body, upper = self._active_constraints[c] - new_lower, new_body, new_upper = c.lower, c.body, c.upper - if new_body is not body: - cons_to_remove_and_add[c] = None - continue - if new_lower is not lower: - if ( - type(new_lower) is NumericConstant - and type(lower) is NumericConstant - and new_lower.value == lower.value - ): - pass - else: - cons_to_remove_and_add[c] = None - continue - if new_upper is not upper: - if ( - type(new_upper) is NumericConstant - and type(upper) is NumericConstant - and new_upper.value == upper.value - ): - pass - else: - cons_to_remove_and_add[c] = None - continue self.remove_sos_constraints(sos_to_update) self.add_sos_constraints(sos_to_update) timer.stop('cons') diff --git a/pyomo/contrib/viewer/README.md b/pyomo/contrib/viewer/README.md index cfc50b54ce2..93d773e3829 100644 --- a/pyomo/contrib/viewer/README.md +++ b/pyomo/contrib/viewer/README.md @@ -42,6 +42,24 @@ ui = get_mainwindow(model=model) # Do model things, the viewer will stay in sync with the Pyomo model ``` +If you are working in Jupyter notebook, Jupyter qtconsole, or other Jupyter- +based IDEs, and your model is in the __main__ namespace (this is the usual case), +you can specify the model by its variable name as below. The advantage of this +is that if you replace the model with a new model having the same variable name, +the UI will automatically update without having to manually reset the model pointer. + +```python +%gui qt #Enables IPython's GUI event loop integration. +# Execute the above in its own cell and wait for it to finish before moving on. +from pyomo.contrib.viewer.ui import get_mainwindow +import pyomo.environ as pyo + +model = pyo.ConcreteModel() # could import an existing model here +ui = get_mainwindow(model_var_name_in_main="model") + +# Do model things, the viewer will stay in sync with the Pyomo model +``` + **Note:** the ```%gui qt``` cell must be executed in its own cell and execution must complete before running any other cells (you can't use "run all"). diff --git a/pyomo/contrib/viewer/model_select.py b/pyomo/contrib/viewer/model_select.py index e9c82740708..1e65e91a089 100644 --- a/pyomo/contrib/viewer/model_select.py +++ b/pyomo/contrib/viewer/model_select.py @@ -60,31 +60,33 @@ def select_model(self): items = self.tableWidget.selectedItems() if len(items) == 0: return - self.ui_data.model = self.models[items[0].row()] + self.ui_data.model_var_name_in_main = self.models[items[0].row()][1] + self.ui_data.model = self.models[items[0].row()][0] self.close() def update_models(self): import __main__ - s = __main__.__dict__ + s = dir(__main__) keys = [] for k in s: - if isinstance(s[k], pyo.Block): + if isinstance(getattr(__main__, k), pyo.Block): keys.append(k) self.tableWidget.clearContents() self.tableWidget.setRowCount(len(keys)) self.models = [] for row, k in enumerate(sorted(keys)): + model = getattr(__main__, k) item = myqt.QTableWidgetItem() item.setText(k) self.tableWidget.setItem(row, 0, item) item = myqt.QTableWidgetItem() try: - item.setText(s[k].name) + item.setText(model.name) except: item.setText("None") self.tableWidget.setItem(row, 1, item) item = myqt.QTableWidgetItem() - item.setText(str(type(s[k]))) + item.setText(str(type(model))) self.tableWidget.setItem(row, 2, item) - self.models.append(s[k]) + self.models.append((model, k)) diff --git a/pyomo/contrib/viewer/pyomo_viewer.py b/pyomo/contrib/viewer/pyomo_viewer.py index 6a24e12aa61..e4f75c86840 100644 --- a/pyomo/contrib/viewer/pyomo_viewer.py +++ b/pyomo/contrib/viewer/pyomo_viewer.py @@ -41,7 +41,7 @@ class QtApp( model except NameError: model=None - ui, model = get_mainwindow(model=model, ask_close=False) + ui = get_mainwindow(model=model, ask_close=False) ui.setWindowTitle('Pyomo Model Viewer -- {}')""" _kernel_cmd_hide_ui = """try: diff --git a/pyomo/contrib/viewer/tests/test_qt.py b/pyomo/contrib/viewer/tests/test_qt.py index e71921500f9..b7250729cd9 100644 --- a/pyomo/contrib/viewer/tests/test_qt.py +++ b/pyomo/contrib/viewer/tests/test_qt.py @@ -103,7 +103,7 @@ def blackbox(a, b): @unittest.skipIf(not available, "Qt packages are not available.") def test_get_mainwindow(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) assert hasattr(mw, "menuBar") assert isinstance(mw.variables, ModelBrowser) assert isinstance(mw.constraints, ModelBrowser) @@ -113,13 +113,13 @@ def test_get_mainwindow(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_close_mainwindow(qtbot): - mw, m = get_mainwindow(model=None, testing=True) + mw = get_mainwindow(model=None, testing=True) mw.exit_action() @unittest.skipIf(not available, "Qt packages are not available.") def test_show_model_select_no_models(qtbot): - mw, m = get_mainwindow(model=None, testing=True) + mw = get_mainwindow(model=None, testing=True) ms = mw.show_model_select() ms.update_models() ms.select_model() @@ -128,7 +128,7 @@ def test_show_model_select_no_models(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_model_information(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) mw.model_information() assert isinstance(mw._dialog, QMessageBox) text = mw._dialog.text() @@ -149,7 +149,7 @@ def test_model_information(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_tree_expand_collapse(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) mw.variables.treeView.expandAll() mw.variables.treeView.collapseAll() @@ -157,7 +157,7 @@ def test_tree_expand_collapse(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_residual_table(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) mw.residuals_restart() mw.ui_data.calculate_expressions() mw.residuals.calculate() @@ -184,7 +184,7 @@ def test_residual_table(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_var_tree(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) qtbot.addWidget(mw) mw.variables.treeView.expandAll() root_index = mw.variables.datmodel.index(0, 0) @@ -218,7 +218,7 @@ def test_var_tree(qtbot): @unittest.skipIf(not available, "Qt packages are not available.") def test_bad_view(qtbot): m = get_model() - mw, m = get_mainwindow(model=m, testing=True) + mw = get_mainwindow(model=m, testing=True) err = None try: mw.badTree = mw._tree_restart( diff --git a/pyomo/contrib/viewer/ui.py b/pyomo/contrib/viewer/ui.py index 374af8a26f0..ac96e58eea9 100644 --- a/pyomo/contrib/viewer/ui.py +++ b/pyomo/contrib/viewer/ui.py @@ -66,7 +66,9 @@ class _MainWindow(object): _log.error(_err) -def get_mainwindow(model=None, show=True, ask_close=True, testing=False): +def get_mainwindow( + model=None, show=True, ask_close=True, model_var_name_in_main=None, testing=False +): """ Create a UI MainWindow. @@ -79,16 +81,32 @@ def get_mainwindow(model=None, show=True, ask_close=True, testing=False): (ui, model): ui is the MainWindow widget, and model is the linked Pyomo model. If no model is provided a new ConcreteModel is created """ + model_name = model_var_name_in_main if model is None: - model = pyo.ConcreteModel(name="Default") - ui = MainWindow(model=model, ask_close=ask_close, testing=testing) + import __main__ + + if model_name in dir(__main__): + if isinstance(getattr(__main__, model_name), pyo.Block): + model = getattr(__main__, model_name) + else: + for s in dir(__main__): + if isinstance(getattr(__main__, s), pyo.Block): + model = getattr(__main__, s) + model_name = s + break + ui = MainWindow( + model=model, + model_var_name_in_main=model_name, + ask_close=ask_close, + testing=testing, + ) try: get_ipython().events.register("post_execute", ui.refresh_on_execute) except AttributeError: pass # not in ipy kernel, so is fine to not register callback if show: ui.show() - return ui, model + return ui class MainWindow(_MainWindow, _MainWindowUI): @@ -97,6 +115,7 @@ def __init__(self, *args, **kwargs): main = self.main = kwargs.pop("main", None) ask_close = self.ask_close = kwargs.pop("ask_close", True) self.testing = kwargs.pop("testing", False) + model_var_name_in_main = kwargs.pop("model_var_name_in_main", None) flags = kwargs.pop("flags", 0) self.ui_data = UIData(model=model) super().__init__(*args, **kwargs) @@ -128,6 +147,7 @@ def __init__(self, *args, **kwargs): self.actionCalculateExpressions.triggered.connect( self.ui_data.calculate_expressions ) + self.ui_data.model_var_name_in_main = model_var_name_in_main self.actionTile.triggered.connect(self.mdiArea.tileSubWindows) self.actionCascade.triggered.connect(self.mdiArea.cascadeSubWindows) self.actionTabs.triggered.connect(self.toggle_tabs) @@ -256,6 +276,18 @@ def refresh_on_execute(self): ipython kernel. The main purpose of this right now it to refresh the UI display so that it matches the current state of the model. """ + if self.ui_data.model_var_name_in_main is not None: + import __main__ + + try: + mname = self.ui_data.model_var_name_in_main + mid = id(getattr(__main__, mname)) + if id(self.ui_data.model) != mid: + self.ui_data.model = getattr(__main__, mname) + self.update_model + return + except AttributeError: + pass for w in self._refresh_list: try: w.refresh() diff --git a/pyomo/contrib/viewer/ui_data.py b/pyomo/contrib/viewer/ui_data.py index c716cfeedf6..8d83be91e5f 100644 --- a/pyomo/contrib/viewer/ui_data.py +++ b/pyomo/contrib/viewer/ui_data.py @@ -39,16 +39,27 @@ class UIDataNoUi(object): UIData. The class is split this way for testing when PyQt is not available. """ - def __init__(self, model=None): + def __init__(self, model=None, model_var_name_in_main=None): """ This class holds the basic UI setup, but doesn't depend on Qt. It shouldn't really be used except for testing when Qt is not available. Args: model: The Pyomo model to view + model_var_name_in_main: if this is set, check that the model variable + which points to a model object in __main__ has the same id when + the UI is refreshed due to a command being executed in jupyter + notebook or QtConsole, if not the same id, then update the model + Since the model viewer is not necessarily pointed at a model in the + __main__ namespace only set this if you want the model to auto + update. Since the model selector dialog lets you choose models + from the __main__ namespace it sets this when you select a model. + This is useful if you run a script repeatedly that replaces a model + preventing you from looking at a previous version of the model. """ super().__init__() self._model = None + self.model_var_name_in_main = model_var_name_in_main self._begin_update = False self.value_cache = ComponentMap() self.value_cache_units = ComponentMap() diff --git a/pyomo/core/base/constraint.py b/pyomo/core/base/constraint.py index e12860991c2..bc9a32f5404 100644 --- a/pyomo/core/base/constraint.py +++ b/pyomo/core/base/constraint.py @@ -64,6 +64,7 @@ InequalityExpression, RangedExpression, } +_strict_relational_exprs = {True, (False, True), (True, False), (True, True)} _rule_returned_none_error = """Constraint '%s': rule returned None. Constraint rules must return either a valid expression, a 2- or 3-member @@ -151,7 +152,7 @@ class ConstraintData(ActiveComponentData): _active A boolean that indicates whether this data is active """ - __slots__ = ('_body', '_lower', '_upper', '_expr') + __slots__ = ('_expr',) # Set to true when a constraint class stores its expression # in linear canonical form @@ -167,126 +168,172 @@ def __init__(self, expr=None, component=None): self._component = weakref_ref(component) if (component is not None) else None self._active = True - self._body = None - self._lower = None - self._upper = None self._expr = None if expr is not None: self.set_value(expr) def __call__(self, exception=True): """Compute the value of the body of this constraint.""" - return value(self.body, exception=exception) + body = self.to_bounded_expression()[1] + if body.__class__ not in native_numeric_types: + body = value(self.body, exception=exception) + return body + + def to_bounded_expression(self): + """Convert this constraint to a tuple of 3 expressions (lb, body, ub) + + This method "standardizes" the expression into a 3-tuple of + expressions: (`lower_bound`, `body`, `upper_bound`). Upon + conversion, `lower_bound` and `upper_bound` are guaranteed to be + `None`, numeric constants, or fixed (not necessarily constant) + expressions. + + Note + ---- + As this method operates on the *current state* of the + expression, any required expression manipulations (and by + extension, the result) can change after fixing / unfixing + :py:class:`Var` objects. + + Raises + ------ + + ValueError: Raised if the expression cannot be mapped to this + form (i.e., :py:class:`RangedExpression` constraints with + variable lower or upper bounds. + + """ + expr = self._expr + if expr.__class__ is RangedExpression: + lb, body, ub = ans = expr.args + if ( + lb.__class__ not in native_types + and lb.is_potentially_variable() + and not lb.is_fixed() + ): + raise ValueError( + f"Constraint '{self.name}' is a Ranged Inequality with a " + "variable lower bound. Cannot normalize the " + "constraint or send it to a solver." + ) + if ( + ub.__class__ not in native_types + and ub.is_potentially_variable() + and not ub.is_fixed() + ): + raise ValueError( + f"Constraint '{self.name}' is a Ranged Inequality with a " + "variable upper bound. Cannot normalize the " + "constraint or send it to a solver." + ) + return ans + elif expr is not None: + lhs, rhs = expr.args + if rhs.__class__ in native_types or not rhs.is_potentially_variable(): + return rhs if expr.__class__ is EqualityExpression else None, lhs, rhs + if lhs.__class__ in native_types or not lhs.is_potentially_variable(): + return lhs, rhs, lhs if expr.__class__ is EqualityExpression else None + return 0 if expr.__class__ is EqualityExpression else None, lhs - rhs, 0 + return None, None, None @property def body(self): """Access the body of a constraint expression.""" - if self._body is not None: - return self._body - # The incoming RangedInequality had a potentially variable - # bound. The "body" is fine, but the bounds may not be - # (although the responsibility for those checks lies with the - # lower/upper properties) - body = self._expr.arg(1) - if body.__class__ in native_types and body is not None: - return as_numeric(body) - return body - - def _get_range_bound(self, range_arg): - # Equalities and simple inequalities can always be (directly) - # reformulated at construction time to force constant bounds. - # The only time we need to defer the determination of bounds is - # for ranged inequalities that contain non-constant bounds (so - # we *know* that the expr will have 3 args) - # - # It is possible that there is no expression at all (so catch that) - if self._expr is None: - return None - bound = self._expr.arg(range_arg) - if not is_fixed(bound): - raise ValueError( - "Constraint '%s' is a Ranged Inequality with a " - "variable %s bound. Cannot normalize the " - "constraint or send it to a solver." - % (self.name, {0: 'lower', 2: 'upper'}[range_arg]) - ) - return bound + try: + ans = self.to_bounded_expression()[1] + except ValueError: + # It is possible that the expression is not currently valid + # (i.e., a ranged expression with a non-fixed bound). We + # will catch that exception here and - if this actually *is* + # a RangedExpression - return the body. + if self._expr.__class__ is RangedExpression: + _, ans, _ = self._expr.args + else: + raise + if ans.__class__ in native_types and ans is not None: + # Historically, constraint.lower was guaranteed to return a type + # derived from Pyomo NumericValue (or None). Replicate that. + # + # [JDS 6/2024: it would be nice to remove this behavior, + # although possibly unnecessary, as people should use + # to_bounded_expression() instead] + return as_numeric(ans) + return ans @property def lower(self): """Access the lower bound of a constraint expression.""" - bound = self._lower if self._body is not None else self._get_range_bound(0) - # Historically, constraint.lower was guaranteed to return a type - # derived from Pyomo NumericValue (or None). Replicate that - # functionality, although clients should in almost all cases - # move to using ConstraintData.lb instead of accessing - # lower/body/upper to avoid the unnecessary creation (and - # inevitable destruction) of the NumericConstant wrappers. - if bound is None: - return None - return as_numeric(bound) + ans = self.to_bounded_expression()[0] + if ans.__class__ in native_types and ans is not None: + # Historically, constraint.lower was guaranteed to return a type + # derived from Pyomo NumericValue (or None). Replicate that + # functionality, although clients should in almost all cases + # move to using ConstraintData.lb instead of accessing + # lower/body/upper to avoid the unnecessary creation (and + # inevitable destruction) of the NumericConstant wrappers. + return as_numeric(ans) + return ans @property def upper(self): """Access the upper bound of a constraint expression.""" - bound = self._upper if self._body is not None else self._get_range_bound(2) - # Historically, constraint.upper was guaranteed to return a type - # derived from Pyomo NumericValue (or None). Replicate that - # functionality, although clients should in almost all cases - # move to using ConstraintData.ub instead of accessing - # lower/body/upper to avoid the unnecessary creation (and - # inevitable destruction) of the NumericConstant wrappers. - if bound is None: - return None - return as_numeric(bound) + ans = self.to_bounded_expression()[2] + if ans.__class__ in native_types and ans is not None: + # Historically, constraint.upper was guaranteed to return a type + # derived from Pyomo NumericValue (or None). Replicate that + # functionality, although clients should in almost all cases + # move to using ConstraintData.lb instead of accessing + # lower/body/upper to avoid the unnecessary creation (and + # inevitable destruction) of the NumericConstant wrappers. + return as_numeric(ans) + return ans @property def lb(self): """Access the value of the lower bound of a constraint expression.""" - bound = self._lower if self._body is not None else self._get_range_bound(0) + bound = self.to_bounded_expression()[0] + if bound is None: + return None if bound.__class__ not in native_numeric_types: - if bound is None: - return None bound = float(value(bound)) + # Note that "bound != bound" catches float('nan') if bound in _nonfinite_values or bound != bound: - # Note that "bound != bound" catches float('nan') if bound == -_inf: return None - else: - raise ValueError( - "Constraint '%s' created with an invalid non-finite " - "lower bound (%s)." % (self.name, bound) - ) + raise ValueError( + f"Constraint '{self.name}' created with an invalid non-finite " + f"lower bound ({bound})." + ) return bound @property def ub(self): """Access the value of the upper bound of a constraint expression.""" - bound = self._upper if self._body is not None else self._get_range_bound(2) + bound = self.to_bounded_expression()[2] + if bound is None: + return None if bound.__class__ not in native_numeric_types: - if bound is None: - return None bound = float(value(bound)) + # Note that "bound != bound" catches float('nan') if bound in _nonfinite_values or bound != bound: - # Note that "bound != bound" catches float('nan') if bound == _inf: return None - else: - raise ValueError( - "Constraint '%s' created with an invalid non-finite " - "upper bound (%s)." % (self.name, bound) - ) + raise ValueError( + f"Constraint '{self.name}' created with an invalid non-finite " + f"upper bound ({bound})." + ) return bound @property def equality(self): """A boolean indicating whether this is an equality constraint.""" - if self._expr.__class__ is EqualityExpression: + expr = self.expr + if expr.__class__ is EqualityExpression: return True - elif self._expr.__class__ is RangedExpression: + elif expr.__class__ is RangedExpression: # TODO: this is a very restrictive form of structural equality. - lb = self._expr.arg(0) - if lb is not None and lb is self._expr.arg(2): + lb = expr.arg(0) + if lb is not None and lb is expr.arg(2): return True return False @@ -317,15 +364,22 @@ def expr(self): def get_value(self): """Get the expression on this constraint.""" - return self._expr + return self.expr def set_value(self, expr): """Set the expression on this constraint.""" # Clear any previously-cached normalized constraint - self._lower = self._upper = self._body = self._expr = None - + self._expr = None if expr.__class__ in _known_relational_expressions: + if getattr(expr, 'strict', False) in _strict_relational_exprs: + raise ValueError( + "Constraint '%s' encountered a strict " + "inequality expression ('>' or '<'). All " + "constraints must be formulated using " + "using '<=', '>=', or '=='." % (self.name,) + ) self._expr = expr + elif expr.__class__ is tuple: # or expr_type is list: for arg in expr: if ( @@ -422,120 +476,6 @@ def set_value(self, expr): "\n (0, model.price[item], 50)" % (self.name, str(expr)) ) raise ValueError(msg) - # - # Normalize the incoming expressions, if we can - # - args = self._expr.args - if self._expr.__class__ is InequalityExpression: - if self._expr.strict: - raise ValueError( - "Constraint '%s' encountered a strict " - "inequality expression ('>' or '< '). All" - " constraints must be formulated using " - "using '<=', '>=', or '=='." % (self.name,) - ) - if ( - args[1] is None - or args[1].__class__ in native_numeric_types - or not args[1].is_potentially_variable() - ): - self._body = args[0] - self._upper = args[1] - elif ( - args[0] is None - or args[0].__class__ in native_numeric_types - or not args[0].is_potentially_variable() - ): - self._lower = args[0] - self._body = args[1] - else: - self._body = args[0] - args[1] - self._upper = 0 - elif self._expr.__class__ is EqualityExpression: - if args[0] is None or args[1] is None: - # Error check: ensure equality does not have infinite RHS - raise ValueError( - "Equality constraint '%s' defined with " - "non-finite term (%sHS == None)." - % (self.name, 'L' if args[0] is None else 'R') - ) - if ( - args[0].__class__ in native_numeric_types - or not args[0].is_potentially_variable() - ): - self._lower = self._upper = args[0] - self._body = args[1] - elif ( - args[1].__class__ in native_numeric_types - or not args[1].is_potentially_variable() - ): - self._lower = self._upper = args[1] - self._body = args[0] - else: - self._lower = self._upper = 0 - self._body = args[0] - args[1] - # The following logic is caught below when checking for - # invalid non-finite bounds: - # - # if self._lower.__class__ in native_numeric_types and \ - # not math.isfinite(self._lower): - # raise ValueError( - # "Equality constraint '%s' defined with " - # "non-finite term." % (self.name)) - elif self._expr.__class__ is RangedExpression: - if any(self._expr.strict): - raise ValueError( - "Constraint '%s' encountered a strict " - "inequality expression ('>' or '< '). All" - " constraints must be formulated using " - "using '<=', '>=', or '=='." % (self.name,) - ) - if all( - ( - arg is None - or arg.__class__ in native_numeric_types - or not arg.is_potentially_variable() - ) - for arg in (args[0], args[2]) - ): - self._lower, self._body, self._upper = args - else: - # Defensive programming: we currently only support three - # relational expression types. This will only be hit if - # someone defines a fourth... - raise DeveloperError( - "Unrecognized relational expression type: %s" - % (self._expr.__class__.__name__,) - ) - - # We have historically forced the body to be a numeric expression. - # TODO: remove this requirement - if self._body.__class__ in native_types and self._body is not None: - self._body = as_numeric(self._body) - - # We have historically mapped incoming inf to None - if self._lower.__class__ in native_numeric_types: - bound = self._lower - if bound in _nonfinite_values or bound != bound: - # Note that "bound != bound" catches float('nan') - if bound == -_inf: - self._lower = None - else: - raise ValueError( - "Constraint '%s' created with an invalid non-finite " - "lower bound (%s)." % (self.name, self._lower) - ) - if self._upper.__class__ in native_numeric_types: - bound = self._upper - if bound in _nonfinite_values or bound != bound: - # Note that "bound != bound" catches float('nan') - if bound == _inf: - self._upper = None - else: - raise ValueError( - "Constraint '%s' created with an invalid non-finite " - "upper bound (%s)." % (self.name, self._upper) - ) def lslack(self): """ @@ -911,6 +851,7 @@ class SimpleConstraint(metaclass=RenamedClass): { 'add', 'set_value', + 'to_bounded_expression', 'body', 'lower', 'upper', diff --git a/pyomo/core/base/set.py b/pyomo/core/base/set.py index 8b7c2a246d6..e4a6d13e96e 100644 --- a/pyomo/core/base/set.py +++ b/pyomo/core/base/set.py @@ -16,15 +16,18 @@ import math import sys import weakref -from pyomo.common.pyomo_typing import overload -from typing import Union, Type, Any as typingAny + from collections.abc import Iterator +from functools import partial +from typing import Union, Type, Any as typingAny +from pyomo.common.autoslots import AutoSlots from pyomo.common.collections import ComponentSet from pyomo.common.deprecation import deprecated, deprecation_warning, RenamedClass from pyomo.common.errors import DeveloperError, PyomoException from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET +from pyomo.common.pyomo_typing import overload from pyomo.common.sorting import sorted_robust from pyomo.common.timing import ConstructionTimer @@ -478,9 +481,7 @@ def __call__(self, parent, index): if not isinstance(_val, Sequence): _val = tuple(_val) - if len(_val) == 0: - return _val - if isinstance(_val[0], tuple): + if not _val or isinstance(_val[0], tuple): return _val return self._tuplize(_val, parent, index) @@ -501,7 +502,7 @@ def _tuplize(self, _val, parent, index): "length %s is not a multiple of dimen=%s" % (len(_val), d) ) - return list(tuple(_val[d * i : d * (i + 1)]) for i in range(len(_val) // d)) + return (tuple(_val[i : i + d]) for i in range(0, len(_val), d)) class _NotFound(object): @@ -1364,87 +1365,12 @@ def filter(self): return self._filter def add(self, *values): - count = 0 - _block = self.parent_block() - for value in values: - if normalize_index.flatten: - _value = normalize_index(value) - if _value.__class__ is tuple: - _d = len(_value) - else: - _d = 1 - else: - # If we are not normalizing indices, then we cannot reliably - # infer the set dimen - _d = 1 - if isinstance(value, Sequence) and self.dimen != 1: - _d = len(value) - _value = value - if _value not in self._domain: - raise ValueError( - "Cannot add value %s to Set %s.\n" - "\tThe value is not in the domain %s" - % (value, self.name, self._domain) - ) + N = len(self) + self.update(values) + return len(self) - N - # We wrap this check in a try-except because some values - # (like lists) are not hashable and can raise exceptions. - try: - if _value in self: - logger.warning( - "Element %s already exists in Set %s; no action taken" - % (value, self.name) - ) - continue - except: - exc = sys.exc_info() - raise TypeError( - "Unable to insert '%s' into Set %s:\n\t%s: %s" - % (value, self.name, exc[0].__name__, exc[1]) - ) - - if self._filter is not None: - if not self._filter(_block, _value): - continue - - if self._validate is not None: - try: - flag = self._validate(_block, _value) - except: - logger.error( - "Exception raised while validating element '%s' " - "for Set %s" % (value, self.name) - ) - raise - if not flag: - raise ValueError( - "The value=%s violates the validation rule of Set %s" - % (value, self.name) - ) - - # If the Set has a fixed dimension, check that this element is - # compatible. - if self._dimen is not None: - if _d != self._dimen: - if self._dimen is UnknownSetDimen: - # The first thing added to a Set with unknown - # dimension sets its dimension - self._dimen = _d - else: - raise ValueError( - "The value=%s has dimension %s and is not " - "valid for Set %s which has dimen=%s" - % (value, _d, self.name, self._dimen) - ) - - # Add the value to this object (this last redirection allows - # derived classes to implement a different storage mechanism) - self._add_impl(_value) - count += 1 - return count - - def _add_impl(self, value): - self._values.add(value) + def _update_impl(self, values): + self._values.update(values) def remove(self, val): self._values.remove(val) @@ -1457,17 +1383,147 @@ def clear(self): def set_value(self, val): self.clear() - for x in val: - self.add(x) + self.update(val) + + def _initialize(self, val): + try: + # We want to explicitly call the update() on *this class* to + # bypass potential double logging of the use of unordered + # data with ordered Sets + FiniteSetData.update(self, val) + except TypeError as e: + if 'not iterable' in str(e): + logger.error( + "Initializer for Set %s returned non-iterable object " + "of type %s." + % ( + self.name, + (val if val.__class__ is type else type(val).__name__), + ) + ) + raise def update(self, values): - for v in values: - if v not in self: - self.add(v) + # Special case: set operations that are not first attached + # to the model must be constructed. + if isinstance(values, SetOperator): + values.construct() + # It is important that val_iter is an actual iterator + val_iter = iter(values) + if self._dimen is not None: + if normalize_index.flatten: + val_iter = self._cb_normalized_dimen_verifier(self._dimen, val_iter) + else: + val_iter = self._cb_raw_dimen_verifier(self._dimen, val_iter) + elif normalize_index.flatten: + val_iter = map(normalize_index, val_iter) + else: + val_iter = self._cb_check_set_end(val_iter) + + if self._domain is not Any: + val_iter = self._cb_domain_verifier(self._domain, val_iter) + + if self._filter is not None: + val_iter = filter(partial(self._filter, self.parent_block()), val_iter) + + if self._validate is not None: + val_iter = self._cb_validate(self._validate, self.parent_block(), val_iter) + + # We wrap this check in a try-except because some values + # (like lists) are not hashable and can raise exceptions. + try: + self._update_impl(val_iter) + except Set._SetEndException: + pass def pop(self): return self._values.pop() + def _cb_domain_verifier(self, domain, val_iter): + for value in val_iter: + if value not in domain: + raise ValueError( + "Cannot add value %s to Set %s.\n" + "\tThe value is not in the domain %s" + % (value, self.name, self._domain) + ) + yield value + + def _cb_check_set_end(self, val_iter): + for value in val_iter: + if value is Set.End: + return + yield value + + def _cb_validate(self, validate, block, val_iter): + for value in val_iter: + try: + flag = validate(block, value) + except: + logger.error( + "Exception raised while validating element '%s' " + "for Set %s" % (value, self.name) + ) + raise + if not flag: + raise ValueError( + "The value=%s violates the validation rule of Set %s" + % (value, self.name) + ) + yield value + + def _cb_normalized_dimen_verifier(self, dimen, val_iter): + for value in val_iter: + if value.__class__ in native_types: + if dimen == 1: + yield value + continue + normalized_value = value + else: + normalized_value = normalize_index(value) + # Note: normalize_index() will never return a 1-tuple + if normalized_value.__class__ is tuple: + if dimen == len(normalized_value): + yield normalized_value[0] if dimen == 1 else normalized_value + continue + + _d = len(normalized_value) if normalized_value.__class__ is tuple else 1 + if _d == dimen: + yield normalized_value + elif dimen is UnknownSetDimen: + # The first thing added to a Set with unknown dimension + # sets its dimension + self._dimen = dimen = _d + yield normalized_value + else: + raise ValueError( + "The value=%s has dimension %s and is not " + "valid for Set %s which has dimen=%s" + % (value, _d, self.name, self._dimen) + ) + + def _cb_raw_dimen_verifier(self, dimen, val_iter): + for value in val_iter: + if isinstance(value, Sequence): + if dimen == len(value): + yield value + continue + elif dimen == 1: + yield value + continue + _d = len(value) if isinstance(value, Sequence) else 1 + if dimen is UnknownSetDimen: + # The first thing added to a Set with unknown dimension + # sets its dimension + self._dimen = dimen = _d + yield value + else: + raise ValueError( + "The value=%s has dimension %s and is not " + "valid for Set %s which has dimen=%s" + % (value, _d, self.name, self._dimen) + ) + class _FiniteSetData(metaclass=RenamedClass): __renamed__new_class__ = FiniteSetData @@ -1545,10 +1601,16 @@ def ordered_iter(self): return iter(self) def first(self): - return self.at(1) + try: + return next(iter(self)) + except StopIteration: + raise IndexError(f"{self.name} index out of range") from None def last(self): - return self.at(len(self)) + try: + return next(reversed(self)) + except StopIteration: + raise IndexError(f"{self.name} index out of range") from None def next(self, item, step=1): """ @@ -1655,27 +1717,30 @@ class OrderedSetData(_OrderedSetMixin, FiniteSetData): def __init__(self, component): self._values = {} - self._ordered_values = [] + self._ordered_values = None FiniteSetData.__init__(self, component=component) def _iter_impl(self): """ Return an iterator for the set. """ - return iter(self._ordered_values) + return iter(self._values) def __reversed__(self): - return reversed(self._ordered_values) + return reversed(self._values) - def _add_impl(self, value): - self._values[value] = len(self._values) - self._ordered_values.append(value) + def _update_impl(self, values): + for val in values: + # Note that we reset _ordered_values within the loop because + # of an old example where the initializer rule makes + # reference to values previously inserted into the Set + # (which triggered the creation of the _ordered_values) + self._ordered_values = None + self._values[val] = None def remove(self, val): - idx = self._values.pop(val) - self._ordered_values.pop(idx) - for i in range(idx, len(self._ordered_values)): - self._values[self._ordered_values[i]] -= 1 + self._values.pop(val) + self._ordered_values = None def discard(self, val): try: @@ -1685,15 +1750,15 @@ def discard(self, val): def clear(self): self._values.clear() - self._ordered_values = [] + self._ordered_values = None def pop(self): try: ans = self.last() except IndexError: - # Map the index error to a KeyError for consistency with - # set().pop() - raise KeyError('pop from an empty set') + # Map the exception for iterating over an empty dict to a + # KeyError for consistency with set().pop() + raise KeyError('pop from an empty set') from None self.discard(ans) return ans @@ -1704,6 +1769,8 @@ def at(self, index): The public Set API is 1-based, even though the internal _lookup and _values are (pythonically) 0-based. """ + if self._ordered_values is None: + self._rebuild_ordered_values() i = self._to_0_based_index(index) try: return self._ordered_values[i] @@ -1723,6 +1790,8 @@ def ord(self, item): # when they are actually put as Set members. So, we will look # for the exact thing that the user sent us and then fall back # on the scalar. + if self._ordered_values is None: + self._rebuild_ordered_values() try: return self._values[item] + 1 except KeyError: @@ -1733,6 +1802,12 @@ def ord(self, item): except KeyError: raise ValueError("%s.ord(x): x not in %s" % (self.name, self.name)) + def _rebuild_ordered_values(self): + _set = self._values + self._ordered_values = list(_set) + for i, v in enumerate(self._ordered_values): + _set[v] = i + class _OrderedSetData(metaclass=RenamedClass): __renamed__new_class__ = OrderedSetData @@ -1752,6 +1827,16 @@ class InsertionOrderSetData(OrderedSetData): __slots__ = () + def _initialize(self, val): + if type(val) in Set._UnorderedInitializers: + logger.warning( + "Initializing ordered Set %s with " + "a fundamentally unordered data source (type: %s). " + "This WILL potentially lead to nondeterministic behavior " + "in Pyomo" % (self.name, type(val).__name__) + ) + super()._initialize(val) + def set_value(self, val): if type(val) in Set._UnorderedInitializers: logger.warning( @@ -1760,7 +1845,8 @@ def set_value(self, val): "This WILL potentially lead to nondeterministic behavior " "in Pyomo" % (type(val).__name__,) ) - super(InsertionOrderSetData, self).set_value(val) + self.clear() + super().update(val) def update(self, values): if type(values) in Set._UnorderedInitializers: @@ -1770,7 +1856,7 @@ def update(self, values): "This WILL potentially lead to nondeterministic behavior " "in Pyomo" % (type(values).__name__,) ) - super(InsertionOrderSetData, self).update(values) + super().update(values) class _InsertionOrderSetData(metaclass=RenamedClass): @@ -1800,73 +1886,42 @@ class SortedSetData(_SortedSetMixin, OrderedSetData): Public Class Attributes: """ - __slots__ = ('_is_sorted',) - - def __init__(self, component): - # An empty set is sorted... - self._is_sorted = True - OrderedSetData.__init__(self, component=component) + __slots__ = () def _iter_impl(self): """ Return an iterator for the set. """ - if not self._is_sorted: - self._sort() - return super(SortedSetData, self)._iter_impl() + if self._ordered_values is None: + self._rebuild_ordered_values() + return iter(self._ordered_values) def __reversed__(self): - if not self._is_sorted: - self._sort() - return super(SortedSetData, self).__reversed__() + if self._ordered_values is None: + self._rebuild_ordered_values() + return reversed(self._ordered_values) - def _add_impl(self, value): - # Note that the sorted status has no bearing on insertion, - # so there is no reason to check if the data is correctly sorted - self._values[value] = len(self._values) - self._ordered_values.append(value) - self._is_sorted = False + def _update_impl(self, values): + for val in values: + # Note that we reset _ordered_values within the loop because + # of an old example where the initializer rule makes + # reference to values previously inserted into the Set + # (which triggered the creation of the _ordered_values) + self._ordered_values = None + self._values[val] = None # Note: removing data does not affect the sorted flag # def remove(self, val): # def discard(self, val): - def clear(self): - super(SortedSetData, self).clear() - self._is_sorted = True - - def at(self, index): - """ - Return the specified member of the set. - - The public Set API is 1-based, even though the - internal _lookup and _values are (pythonically) 0-based. - """ - if not self._is_sorted: - self._sort() - return super(SortedSetData, self).at(index) - - def ord(self, item): - """ - Return the position index of the input value. - - Note that Pyomo Set objects have positions starting at 1 (not 0). - - If the search item is not in the Set, then an IndexError is raised. - """ - if not self._is_sorted: - self._sort() - return super(SortedSetData, self).ord(item) - def sorted_data(self): return self.data() - def _sort(self): - self._ordered_values = list( - self.parent_component()._sort_fcn(self._ordered_values) - ) - self._values = {j: i for i, j in enumerate(self._ordered_values)} - self._is_sorted = True + def _rebuild_ordered_values(self): + _set = self._values + self._ordered_values = list(self.parent_component()._sort_fcn(_set)) + for i, v in enumerate(self._ordered_values): + _set[v] = i class _SortedSetData(metaclass=RenamedClass): @@ -1932,7 +1987,8 @@ class Set(IndexedComponent): within : initialiser(set), optional A set that defines the valid values that can be contained - in this set + in this set. If the latter is indexed, the former can be indexed or + non-indexed, in which case it applies to all indices. domain : initializer(set), optional A set that defines the valid values that can be contained in this set @@ -1974,10 +2030,14 @@ class Set(IndexedComponent): """ - class End(object): + class _SetEndException(Exception): pass - class Skip(object): + class _SetEndType(type): + def __hash__(self): + raise Set._SetEndException() + + class End(metaclass=_SetEndType): pass class InsertionOrder(object): @@ -1986,7 +2046,7 @@ class InsertionOrder(object): class SortedOrder(object): pass - _ValidOrderedAuguments = {True, False, InsertionOrder, SortedOrder} + _ValidOrderedArguments = {True, False, InsertionOrder, SortedOrder} _UnorderedInitializers = {set} @overload @@ -2015,7 +2075,7 @@ def __new__(cls, *args, **kwds): ordered = kwds.get('ordered', Set.InsertionOrder) if ordered is True: ordered = Set.InsertionOrder - if ordered not in Set._ValidOrderedAuguments: + if ordered not in Set._ValidOrderedArguments: if inspect.isfunction(ordered): ordered = Set.SortedOrder else: @@ -2032,7 +2092,7 @@ def __new__(cls, *args, **kwds): str(_) for _ in sorted_robust( 'Set.' + x.__name__ if isinstance(x, type) else x - for x in Set._ValidOrderedAuguments.union( + for x in Set._ValidOrderedArguments.union( {''} ) ) @@ -2218,25 +2278,10 @@ def _getitem_when_not_present(self, index): domain = self._init_domain(_block, index, self) if domain is not None: - domain.construct() + domain.parent_component().construct() if _d is UnknownSetDimen and domain is not None and domain.dimen is not None: _d = domain.dimen - if self._init_values is not None: - self._init_values._dimen = _d - try: - _values = self._init_values(_block, index) - except TuplizeError as e: - raise ValueError( - str(e) % (self._name, "[%s]" % index if self.is_indexed() else "") - ) - - if _values is Set.Skip: - return - elif _values is None: - raise ValueError( - "Set rule or initializer returned None instead of Set.Skip" - ) if index is None and not self.is_indexed(): obj = self._data[index] = self else: @@ -2258,55 +2303,35 @@ def _getitem_when_not_present(self, index): obj._validate = self._init_validate if self._init_filter is not None: try: - _filter = Initializer(self._init_filter(_block, index)) - if _filter.constant(): + obj._filter = Initializer(self._init_filter(_block, index)) + if obj._filter.constant(): # _init_filter was the actual filter function; use it. - _filter = self._init_filter + obj._filter = self._init_filter except: # We will assume any exceptions raised when getting the # filter for this index indicate that the function # should have been passed directly to the underlying sets. - _filter = self._init_filter + obj._filter = self._init_filter else: - _filter = None + obj._filter = None if self._init_values is not None: - # _values was initialized above... - if obj.isordered() and type(_values) in Set._UnorderedInitializers: - logger.warning( - "Initializing ordered Set %s with a fundamentally " - "unordered data source (type: %s). This WILL potentially " - "lead to nondeterministic behavior in Pyomo" - % (self.name, type(_values).__name__) - ) - # Special case: set operations that are not first attached - # to the model must be constructed. - if isinstance(_values, SetOperator): - _values.construct() + # record the user-provided dimen in the initializer + self._init_values._dimen = _d try: - val_iter = iter(_values) - except TypeError: - logger.error( - "Initializer for Set %s%s returned non-iterable object " - "of type %s." - % ( - self.name, - ("[%s]" % (index,) if self.is_indexed() else ""), - ( - _values - if _values.__class__ is type - else type(_values).__name__ - ), - ) + _values = self._init_values(_block, index) + except TuplizeError as e: + raise ValueError( + str(e) % (self._name, "[%s]" % index if self.is_indexed() else "") ) - raise - for val in val_iter: - if val is Set.End: - break - if _filter is None or _filter(_block, val): - obj.add(val) - # We defer adding the filter until now so that add() doesn't - # call it a second time. - obj._filter = _filter + if _values is Set.Skip: + del self._data[index] + return + elif _values is None: + raise ValueError( + "Set rule or initializer returned None instead of Set.Skip" + ) + + obj._initialize(_values) return obj @staticmethod diff --git a/pyomo/core/expr/compare.py b/pyomo/core/expr/compare.py index 44d9c4205d7..4a777a9b977 100644 --- a/pyomo/core/expr/compare.py +++ b/pyomo/core/expr/compare.py @@ -230,10 +230,14 @@ def assertExpressionsEqual(test, a, b, include_named_exprs=True, places=None): test.assertEqual(len(prefix_a), len(prefix_b)) for _a, _b in zip(prefix_a, prefix_b): test.assertIs(_a.__class__, _b.__class__) - if places is None: - test.assertEqual(_a, _b) + # If _a is nan, check _b is nan + if _a != _a: + test.assertTrue(_b != _b) else: - test.assertAlmostEqual(_a, _b, places=places) + if places is None: + test.assertEqual(_a, _b) + else: + test.assertAlmostEqual(_a, _b, places=places) except (PyomoException, AssertionError): test.fail( f"Expressions not equal:\n\t" @@ -292,10 +296,13 @@ def assertExpressionsStructurallyEqual( for _a, _b in zip(prefix_a, prefix_b): if _a.__class__ not in native_types and _b.__class__ not in native_types: test.assertIs(_a.__class__, _b.__class__) - if places is None: - test.assertEqual(_a, _b) + if _a != _a: + test.assertTrue(_b != _b) else: - test.assertAlmostEqual(_a, _b, places=places) + if places is None: + test.assertEqual(_a, _b) + else: + test.assertAlmostEqual(_a, _b, places=places) except (PyomoException, AssertionError): test.fail( f"Expressions not structurally equal:\n\t" diff --git a/pyomo/core/kernel/conic.py b/pyomo/core/kernel/conic.py index 1bb5f1b6ce8..bd78ba310f4 100644 --- a/pyomo/core/kernel/conic.py +++ b/pyomo/core/kernel/conic.py @@ -632,7 +632,7 @@ def as_domain(cls, r, x): b = block() b.r = variable_tuple([variable(lb=0) for i in range(len(r))]) b.x = variable() - b.c = _build_linking_constraints(list(r) + [x], list(b.r) + [x]) + b.c = _build_linking_constraints(list(r) + [x], list(b.r) + [b.x]) b.q = cls(r=b.r, x=b.x) return b @@ -934,7 +934,7 @@ def as_domain(cls, r, x): b = block() b.r = variable_tuple([variable(lb=0) for i in range(len(r))]) b.x = variable() - b.c = _build_linking_constraints(list(r) + [x], list(b.r) + [x]) + b.c = _build_linking_constraints(list(r) + [x], list(b.r) + [b.x]) b.q = cls(r=b.r, x=b.x) return b diff --git a/pyomo/core/kernel/constraint.py b/pyomo/core/kernel/constraint.py index 6aa4abc4bfe..fe8eb8b2c1f 100644 --- a/pyomo/core/kernel/constraint.py +++ b/pyomo/core/kernel/constraint.py @@ -177,6 +177,9 @@ class _MutableBoundsConstraintMixin(object): # Define some of the IConstraint abstract methods # + def to_bounded_expression(self): + return self.lower, self.body, self.upper + @property def lower(self): """The expression for the lower bound of the constraint""" diff --git a/pyomo/core/kernel/register_numpy_types.py b/pyomo/core/kernel/register_numpy_types.py index 86877be2230..b3405645d97 100644 --- a/pyomo/core/kernel/register_numpy_types.py +++ b/pyomo/core/kernel/register_numpy_types.py @@ -45,10 +45,12 @@ # Historically, the lists included several numpy aliases numpy_int_names.extend(('int_', 'intc', 'intp')) numpy_int.extend((numpy.int_, numpy.intc, numpy.intp)) - numpy_float_names.append('float_') - numpy_float.append(numpy.float_) - numpy_complex_names.append('complex_') - numpy_complex.append(numpy.complex_) + if hasattr(numpy, 'float_'): + numpy_float_names.append('float_') + numpy_float.append(numpy.float_) + if hasattr(numpy, 'complex_'): + numpy_complex_names.append('complex_') + numpy_complex.append(numpy.complex_) # Re-build the old numpy_* lists for t in native_boolean_types: diff --git a/pyomo/core/plugins/transform/add_slack_vars.py b/pyomo/core/plugins/transform/add_slack_vars.py index 39903384729..31c1107d692 100644 --- a/pyomo/core/plugins/transform/add_slack_vars.py +++ b/pyomo/core/plugins/transform/add_slack_vars.py @@ -150,26 +150,29 @@ def _apply_to_impl(self, instance, **kwds): if not cons.active: continue cons_name = cons.getname(fully_qualified=True) - if cons.lower is not None: + lower = cons.lower + body = cons.body + upper = cons.upper + if lower is not None: # we add positive slack variable to body: # declare positive slack varName = "_slack_plus_" + cons_name posSlack = Var(within=NonNegativeReals) xblock.add_component(varName, posSlack) # add positive slack to body expression - cons._body += posSlack + body += posSlack # penalize slack in objective obj_expr += posSlack - if cons.upper is not None: + if upper is not None: # we subtract a positive slack variable from the body: # declare slack varName = "_slack_minus_" + cons_name negSlack = Var(within=NonNegativeReals) xblock.add_component(varName, negSlack) # add negative slack to body expression - cons._body -= negSlack + body -= negSlack # add slack to objective obj_expr += negSlack - + cons.set_value((lower, body, upper)) # make a new objective that minimizes sum of slack variables xblock._slack_objective = Objective(expr=obj_expr) diff --git a/pyomo/core/plugins/transform/radix_linearization.py b/pyomo/core/plugins/transform/radix_linearization.py index 92270655f31..3cfde28db3c 100644 --- a/pyomo/core/plugins/transform/radix_linearization.py +++ b/pyomo/core/plugins/transform/radix_linearization.py @@ -280,7 +280,7 @@ def _collect_bilinear(self, expr, bilin, quad): if type(expr) is PowExpression and value(expr._args[1]) == 2: # Note: directly testing the value of the exponent above is # safe: we have already verified that this expression is - # polynominal, so the exponent must be constant. + # polynomial, so the exponent must be constant. tmp = ProductExpression() tmp._numerator = [expr._args[0], expr._args[0]] tmp._denominator = [] diff --git a/pyomo/core/tests/unit/kernel/test_conic.py b/pyomo/core/tests/unit/kernel/test_conic.py index ccfbcca7e1f..bd97c13fc2e 100644 --- a/pyomo/core/tests/unit/kernel/test_conic.py +++ b/pyomo/core/tests/unit/kernel/test_conic.py @@ -35,6 +35,8 @@ primal_power, dual_exponential, dual_power, + primal_geomean, + dual_geomean, ) @@ -784,6 +786,40 @@ def test_as_domain(self): x[1].value = None +# These mosek 10 constraints can't be evaluated, pprinted, checked for convexity, +# pickled, etc., so I won't use the _conic_tester_base for them +class Test_primal_geomean(unittest.TestCase): + def test_as_domain(self): + b = primal_geomean.as_domain(r=[2, 3], x=6) + self.assertIs(type(b), block) + self.assertIs(type(b.q), primal_geomean) + self.assertIs(type(b.r), variable_tuple) + self.assertIs(type(b.x), variable) + self.assertIs(type(b.c), constraint_tuple) + self.assertExpressionsEqual(b.c[0].body, b.r[0]) + self.assertExpressionsEqual(b.c[0].rhs, 2) + self.assertExpressionsEqual(b.c[1].body, b.r[1]) + self.assertExpressionsEqual(b.c[1].rhs, 3) + self.assertExpressionsEqual(b.c[2].body, b.x) + self.assertExpressionsEqual(b.c[2].rhs, 6) + + +class Test_dual_geomean(unittest.TestCase): + def test_as_domain(self): + b = dual_geomean.as_domain(r=[2, 3], x=6) + self.assertIs(type(b), block) + self.assertIs(type(b.q), dual_geomean) + self.assertIs(type(b.r), variable_tuple) + self.assertIs(type(b.x), variable) + self.assertIs(type(b.c), constraint_tuple) + self.assertExpressionsEqual(b.c[0].body, b.r[0]) + self.assertExpressionsEqual(b.c[0].rhs, 2) + self.assertExpressionsEqual(b.c[1].body, b.r[1]) + self.assertExpressionsEqual(b.c[1].rhs, 3) + self.assertExpressionsEqual(b.c[2].body, b.x) + self.assertExpressionsEqual(b.c[2].rhs, 6) + + class TestMisc(unittest.TestCase): def test_build_linking_constraints(self): c = _build_linking_constraints([], []) diff --git a/pyomo/core/tests/unit/test_con.py b/pyomo/core/tests/unit/test_con.py index 15f190e281e..07c7eb3af8e 100644 --- a/pyomo/core/tests/unit/test_con.py +++ b/pyomo/core/tests/unit/test_con.py @@ -84,21 +84,55 @@ def rule(model): self.assertEqual(model.c.upper, 0) def test_tuple_construct_inf_equality(self): - model = self.create_model(abstract=True) - - def rule(model): - return (model.x, float('inf')) - - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) - - model = self.create_model(abstract=True) - - def rule(model): - return (float('inf'), model.x) + model = self.create_model(abstract=True).create_instance() - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) + model.c = Constraint(expr=(model.x, float('inf'))) + self.assertEqual(model.c.equality, True) + self.assertEqual(model.c.lower, float('inf')) + self.assertIs(model.c.body, model.x) + self.assertEqual(model.c.upper, float('inf')) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' created with an invalid non-finite lower bound \(inf\).", + ): + model.c.lb + self.assertEqual(model.c.ub, None) + + model.d = Constraint(expr=(float('inf'), model.x)) + self.assertEqual(model.d.equality, True) + self.assertEqual(model.d.lower, float('inf')) + self.assertIs(model.d.body, model.x) + self.assertEqual(model.d.upper, float('inf')) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'd' created with an invalid non-finite lower bound \(inf\).", + ): + model.d.lb + self.assertEqual(model.d.ub, None) + + model.e = Constraint(expr=(model.x, float('-inf'))) + self.assertEqual(model.e.equality, True) + self.assertEqual(model.e.lower, float('-inf')) + self.assertIs(model.e.body, model.x) + self.assertEqual(model.e.upper, float('-inf')) + self.assertEqual(model.e.lb, None) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'e' created with an invalid non-finite upper bound \(-inf\).", + ): + model.e.ub + + model.f = Constraint(expr=(float('-inf'), model.x)) + self.assertEqual(model.f.equality, True) + self.assertEqual(model.f.lower, float('-inf')) + self.assertIs(model.f.body, model.x) + self.assertEqual(model.f.upper, float('-inf')) + self.assertEqual(model.f.lb, None) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'f' created with an invalid non-finite upper bound \(-inf\).", + ): + model.f.ub def test_tuple_construct_1sided_inequality(self): model = self.create_model() @@ -134,9 +168,11 @@ def rule(model): model.c = Constraint(rule=rule) self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) + self.assertEqual(model.c.lower, float('-inf')) self.assertIs(model.c.body, model.y) self.assertEqual(model.c.upper, 1) + self.assertEqual(model.c.lb, None) + self.assertEqual(model.c.ub, 1) model = self.create_model() @@ -148,7 +184,9 @@ def rule(model): self.assertEqual(model.c.equality, False) self.assertEqual(model.c.lower, 0) self.assertIs(model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.upper, float('inf')) + self.assertEqual(model.c.lb, 0) + self.assertEqual(model.c.ub, None) def test_tuple_construct_unbounded_inequality(self): model = self.create_model() @@ -171,9 +209,11 @@ def rule(model): model.c = Constraint(rule=rule) self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) + self.assertEqual(model.c.lower, float('-inf')) self.assertIs(model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.upper, float('inf')) + self.assertEqual(model.c.lb, None) + self.assertEqual(model.c.ub, None) def test_tuple_construct_invalid_1sided_inequality(self): model = self.create_model(abstract=True) @@ -229,7 +269,11 @@ def rule(model): ): instance.c.lower self.assertIs(instance.c.body, instance.y) - self.assertEqual(instance.c.upper, 1) + with self.assertRaisesRegex( + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): + instance.c.upper instance.x.fix(3) self.assertEqual(value(instance.c.lower), 3) @@ -240,7 +284,11 @@ def rule(model): model.c = Constraint(rule=rule) instance = model.create_instance() - self.assertEqual(instance.c.lower, 0) + with self.assertRaisesRegex( + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): + instance.c.lower self.assertIs(instance.c.body, instance.y) with self.assertRaisesRegex( ValueError, @@ -276,21 +324,23 @@ def rule(model): self.assertEqual(model.c.upper, 0) def test_expr_construct_inf_equality(self): - model = self.create_model(abstract=True) - - def rule(model): - return model.x == float('inf') - - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) + model = self.create_model(abstract=True).create_instance() - model = self.create_model(abstract=True) - - def rule(model): - return float('inf') == model.x + model.c = Constraint(expr=model.x == float('inf')) + self.assertEqual(model.c.ub, None) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' created with an invalid non-finite lower bound \(inf\).", + ): + model.c.lb - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) + model.d = Constraint(expr=model.x == float('-inf')) + self.assertEqual(model.d.lb, None) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'd' created with an invalid non-finite upper bound \(-inf\).", + ): + model.d.ub def test_expr_construct_1sided_inequality(self): model = self.create_model() @@ -350,9 +400,11 @@ def rule(model): model.c = Constraint(rule=rule) self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) + self.assertIs(model.c.lower, None) self.assertIs(model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.upper, float('inf')) + self.assertIs(model.c.ub, None) + self.assertIs(model.c.lb, None) model = self.create_model() @@ -362,9 +414,11 @@ def rule(model): model.c = Constraint(rule=rule) self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) + self.assertEqual(model.c.lower, float('-inf')) self.assertIs(model.c.body, model.y) self.assertEqual(model.c.upper, None) + self.assertIs(model.c.ub, None) + self.assertIs(model.c.lb, None) model = self.create_model() @@ -374,9 +428,11 @@ def rule(model): model.c = Constraint(rule=rule) self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) + self.assertEqual(model.c.lower, float('-inf')) self.assertIs(model.c.body, model.y) self.assertEqual(model.c.upper, None) + self.assertIs(model.c.ub, None) + self.assertIs(model.c.lb, None) model = self.create_model() @@ -388,40 +444,40 @@ def rule(model): self.assertEqual(model.c.equality, False) self.assertEqual(model.c.lower, None) self.assertIs(model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.upper, float('inf')) + self.assertIs(model.c.ub, None) + self.assertIs(model.c.lb, None) def test_expr_construct_invalid_unbounded_inequality(self): - model = self.create_model(abstract=True) - - def rule(model): - return model.y <= float('-inf') - - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) - - model = self.create_model(abstract=True) - - def rule(model): - return float('inf') <= model.y - - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) - - model = self.create_model(abstract=True) - - def rule(model): - return model.y >= float('inf') + model = self.create_model(abstract=True).create_instance() - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) + model.c = Constraint(expr=model.y <= float('-inf')) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' created with an invalid non-finite upper bound \(-inf\).", + ): + model.c.ub - model = self.create_model(abstract=True) + model.d = Constraint(expr=float('inf') <= model.y) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'd' created with an invalid non-finite lower bound \(inf\).", + ): + model.d.lb - def rule(model): - return float('-inf') >= model.y + model.e = Constraint(expr=model.y >= float('inf')) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'e' created with an invalid non-finite lower bound \(inf\).", + ): + model.e.lb - model.c = Constraint(rule=rule) - self.assertRaises(ValueError, model.create_instance) + model.f = Constraint(expr=float('-inf') >= model.y) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'f' created with an invalid non-finite upper bound \(-inf\).", + ): + model.f.ub def test_expr_construct_invalid(self): m = ConcreteModel() @@ -484,9 +540,6 @@ def test_nondata_bounds(self): model.e2 = Expression() model.e3 = Expression() model.c.set_value((model.e1, model.e2, model.e3)) - self.assertIsNone(model.c._lower) - self.assertIsNone(model.c._body) - self.assertIsNone(model.c._upper) self.assertIs(model.c.lower, model.e1) self.assertIs(model.c.body, model.e2) self.assertIs(model.c.upper, model.e3) @@ -507,7 +560,7 @@ def test_nondata_bounds(self): self.assertIs(model.c.body.expr, model.v[2]) with self.assertRaisesRegex( ValueError, - "Constraint 'c' is a Ranged Inequality with a variable upper bound", + "Constraint 'c' is a Ranged Inequality with a variable lower bound", ): model.c.upper @@ -1574,10 +1627,30 @@ def rule1(model): self.assertIs(instance.c.body, instance.x) with self.assertRaisesRegex( ValueError, - "Constraint 'c' is a Ranged Inequality with a variable upper bound", + "Constraint 'c' is a Ranged Inequality with a variable lower bound", ): instance.c.upper + # + def rule1(model): + return (0, model.x, model.z) + + model = AbstractModel() + model.x = Var() + model.z = Var() + model.c = Constraint(rule=rule1) + instance = model.create_instance() + with self.assertRaisesRegex( + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): + instance.c.lower + self.assertIs(instance.c.body, instance.x) + with self.assertRaisesRegex( + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): + instance.c.upper def test_expression_constructor_coverage(self): def rule1(model): @@ -1807,23 +1880,39 @@ def test_potentially_variable_bounds(self): r"Constraint 'c' is a Ranged Inequality with a variable lower bound", ): m.c.lower - self.assertIs(m.c.upper, m.u) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): + self.assertIs(m.c.upper, m.u) with self.assertRaisesRegex( ValueError, r"Constraint 'c' is a Ranged Inequality with a variable lower bound", ): m.c.lb - self.assertEqual(m.c.ub, 10) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): + self.assertEqual(m.c.ub, 10) m.l = 15 m.u.expr = m.x - self.assertIs(m.c.lower, m.l) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): + self.assertIs(m.c.lower, m.l) with self.assertRaisesRegex( ValueError, r"Constraint 'c' is a Ranged Inequality with a variable upper bound", ): m.c.upper - self.assertEqual(m.c.lb, 15) + with self.assertRaisesRegex( + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): + self.assertEqual(m.c.lb, 15) with self.assertRaisesRegex( ValueError, r"Constraint 'c' is a Ranged Inequality with a variable upper bound", @@ -1890,17 +1979,16 @@ def test_tuple_expression(self): ): m.c = (m.x, None) + # You can create it with an infinite value, but then one of the + # bounds will fail: + m.c = (m.x, float('inf')) + self.assertIsNone(m.c.ub) with self.assertRaisesRegex( ValueError, r"Constraint 'c' created with an invalid " r"non-finite lower bound \(inf\)", ): - m.c = (m.x, float('inf')) - - with self.assertRaisesRegex( - ValueError, r"Equality constraint 'c' defined with non-finite term" - ): - m.c = EqualityExpression((m.x, None)) + m.c.lb if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py index 91a0f571881..8186c3d6028 100644 --- a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py +++ b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py @@ -16,7 +16,10 @@ # Boolean numpy_bool_names = [] if numpy_available: - numpy_bool_names.append('bool_') + if numpy.__version__[0] == '2': + numpy_bool_names.append('bool') + else: + numpy_bool_names.append('bool_') # Integers numpy_int_names = [] if numpy_available: @@ -34,7 +37,8 @@ # Reals numpy_float_names = [] if numpy_available: - numpy_float_names.append('float_') + if hasattr(numpy, 'float_'): + numpy_float_names.append('float_') numpy_float_names.append('float16') numpy_float_names.append('float32') numpy_float_names.append('float64') @@ -46,7 +50,8 @@ # Complex numpy_complex_names = [] if numpy_available: - numpy_complex_names.append('complex_') + if hasattr(numpy, 'complex_'): + numpy_complex_names.append('complex_') numpy_complex_names.append('complex64') numpy_complex_names.append('complex128') if hasattr(numpy, 'complex192'): diff --git a/pyomo/core/tests/unit/test_numvalue.py b/pyomo/core/tests/unit/test_numvalue.py index 1cccd3863ea..4d39a42ed70 100644 --- a/pyomo/core/tests/unit/test_numvalue.py +++ b/pyomo/core/tests/unit/test_numvalue.py @@ -552,10 +552,10 @@ def test_unknownNumericType(self): @unittest.skipUnless(numpy_available, "This test requires NumPy") def test_numpy_basic_float_registration(self): - self.assertIn(numpy.float_, native_numeric_types) - self.assertNotIn(numpy.float_, native_integer_types) - self.assertIn(numpy.float_, _native_boolean_types) - self.assertIn(numpy.float_, native_types) + self.assertIn(numpy.float64, native_numeric_types) + self.assertNotIn(numpy.float64, native_integer_types) + self.assertIn(numpy.float64, _native_boolean_types) + self.assertIn(numpy.float64, native_types) @unittest.skipUnless(numpy_available, "This test requires NumPy") def test_numpy_basic_int_registration(self): diff --git a/pyomo/core/tests/unit/test_set.py b/pyomo/core/tests/unit/test_set.py index f62589a6873..0361f41c835 100644 --- a/pyomo/core/tests/unit/test_set.py +++ b/pyomo/core/tests/unit/test_set.py @@ -3763,8 +3763,8 @@ def I_init(m): m = ConcreteModel() m.I = Set(initialize={1, 3, 2, 4}) ref = ( - "Initializing ordered Set I with a " - "fundamentally unordered data source (type: set)." + 'Initializing ordered Set I with a fundamentally ' + 'unordered data source (type: set).' ) self.assertIn(ref, output.getvalue()) self.assertEqual(m.I.sorted_data(), (1, 2, 3, 4)) @@ -3811,6 +3811,7 @@ def I_init(m): self.assertEqual(m.I.data(), (4, 3, 2, 1)) self.assertEqual(m.I.dimen, 1) + def test_initialize_with_noniterable(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): @@ -3819,6 +3820,14 @@ def I_init(m): ref = "Initializer for Set I returned non-iterable object of type int." self.assertIn(ref, output.getvalue()) + output = StringIO() + with LoggingIntercept(output, 'pyomo.core'): + with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): + m = ConcreteModel() + m.I = Set([1, 2], initialize=5) + ref = "Initializer for Set I[1] returned non-iterable object of type int." + self.assertIn(ref, output.getvalue()) + def test_scalar_indexed_api(self): m = ConcreteModel() m.I = Set(initialize=range(3)) @@ -3877,12 +3886,13 @@ def _verify(_s, _l): m.I.add(4) _verify(m.I, [1, 3, 2, 4]) + N = len(m.I) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.I.add(3) - self.assertEqual( - output.getvalue(), "Element 3 already exists in Set I; no action taken\n" - ) + # In Pyomo <= 6.7.3 duplicate values logged a warning. + self.assertEqual(output.getvalue(), "") + self.assertEqual(N, len(m.I)) _verify(m.I, [1, 3, 2, 4]) m.I.remove(3) @@ -3959,12 +3969,13 @@ def _verify(_s, _l): m.I.add(4) _verify(m.I, [1, 2, 3, 4]) + N = len(m.I) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.I.add(3) - self.assertEqual( - output.getvalue(), "Element 3 already exists in Set I; no action taken\n" - ) + # In Pyomo <= 6.7.3 duplicate values logged a warning. + self.assertEqual(output.getvalue(), "") + self.assertEqual(N, len(m.I)) _verify(m.I, [1, 2, 3, 4]) m.I.remove(3) @@ -4052,12 +4063,13 @@ def _verify(_s, _l): m.I.add(4) _verify(m.I, [1, 2, 3, 4]) + N = len(m.I) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.I.add(3) - self.assertEqual( - output.getvalue(), "Element 3 already exists in Set I; no action taken\n" - ) + # In Pyomo <= 6.7.3 duplicate values logged a warning. + self.assertEqual(output.getvalue(), "") + self.assertEqual(N, len(m.I)) _verify(m.I, [1, 2, 3, 4]) m.I.remove(3) @@ -4248,26 +4260,23 @@ def test_add_filter_validate(self): self.assertIn(1, m.I) self.assertIn(1.0, m.I) + N = len(m.I) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): self.assertFalse(m.I.add(1)) - self.assertEqual( - output.getvalue(), "Element 1 already exists in Set I; no action taken\n" - ) + # In Pyomo <= 6.7.3 duplicate values logged a warning. + self.assertEqual(output.getvalue(), "") + self.assertEqual(N, len(m.I)) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): self.assertFalse(m.I.add((1,))) - self.assertEqual( - output.getvalue(), "Element (1,) already exists in Set I; no action taken\n" - ) + # In Pyomo <= 6.7.3 duplicate values logged a warning. + self.assertEqual(output.getvalue(), "") m.J = Set() # Note that pypy raises a different exception from cpython - err = ( - r"Unable to insert '{}' into Set J:\n\tTypeError: " - r"((unhashable type: 'dict')|('dict' objects are unhashable))" - ) + err = r"((unhashable type: 'dict')|('dict' objects are unhashable))" with self.assertRaisesRegex(TypeError, err): m.J.add({}) @@ -4275,9 +4284,9 @@ def test_add_filter_validate(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): self.assertFalse(m.J.add(1)) - self.assertEqual( - output.getvalue(), "Element 1 already exists in Set J; no action taken\n" - ) + # In Pyomo <= 6.7.3 duplicate values logged a warning. + self.assertEqual(output.getvalue(), "") + self.assertEqual(N, len(m.I)) def _l_tri(model, i, j): self.assertIs(model, m) @@ -4543,9 +4552,11 @@ def test_construction(self): m.I = Set(initialize=[1, 2, 3]) m.J = Set(initialize=[4, 5, 6]) m.K = Set(initialize=[(1, 4), (2, 6), (3, 5)], within=m.I * m.J) + m.L = Set(initialize=[1, 3], within=m.I) m.II = Set([1, 2, 3], initialize={1: [0], 2: [1, 2], 3: range(3)}) m.JJ = Set([1, 2, 3], initialize={1: [0], 2: [1, 2], 3: range(3)}) m.KK = Set([1, 2], initialize=[], dimen=lambda m, i: i) + m.LL = Set([2, 3], within=m.II, initialize={2: [1, 2], 3: [1]}) output = StringIO() m.I.pprint(ostream=output) @@ -4569,6 +4580,8 @@ def test_construction(self): 'I': [-1, 0], 'II': {1: [10, 11], 3: [30]}, 'K': [-1, 4, -1, 6, 0, 5], + 'L': [-1], + 'LL': {3: [30]}, } } ) @@ -4576,6 +4589,7 @@ def test_construction(self): self.assertEqual(list(i.I), [-1, 0]) self.assertEqual(list(i.J), [4, 5, 6]) self.assertEqual(list(i.K), [(-1, 4), (-1, 6), (0, 5)]) + self.assertEqual(list(i.L), [-1]) self.assertEqual(list(i.II[1]), [10, 11]) self.assertEqual(list(i.II[3]), [30]) self.assertEqual(list(i.JJ[1]), [0]) @@ -4583,9 +4597,11 @@ def test_construction(self): self.assertEqual(list(i.JJ[3]), [0, 1, 2]) self.assertEqual(list(i.KK[1]), []) self.assertEqual(list(i.KK[2]), []) + self.assertEqual(list(i.LL[3]), [30]) # Implicitly-constructed set should fall back on initialize! self.assertEqual(list(i.II[2]), [1, 2]) + self.assertEqual(list(i.LL[2]), [1, 2]) # Additional tests for tuplize: i = m.create_instance(data={None: {'K': [(1, 4), (2, 6)], 'KK': [1, 4, 2, 6]}}) @@ -5247,6 +5263,21 @@ def Bindex(m): self.assertIs(m.K.index_set()._domain, Integers) self.assertEqual(m.K.index_set(), [0, 1, 2, 3, 4]) + def test_normalize_index(self): + try: + _oldFlatten = normalize_index.flatten + normalize_index.flatten = True + + m = ConcreteModel() + with self.assertRaisesRegex( + ValueError, + r"The value=\(\(2, 3\),\) has dimension 2 and is not " + "valid for Set I which has dimen=1", + ): + m.I = Set(initialize=[1, ((2, 3),)]) + finally: + normalize_index.flatten = _oldFlatten + def test_no_normalize_index(self): try: _oldFlatten = normalize_index.flatten @@ -6388,3 +6419,209 @@ def test_issue_1112(self): self.assertEqual(len(vals), 1) self.assertIsInstance(vals[0], SetProduct_OrderedSet) self.assertIsNot(vals[0], cross) + + def test_issue_3284(self): + # test creating (indexed and non-indexed) sets using the within argument + # using concrete model and initialization + problem = ConcreteModel() + # non-indexed sets not using the within argument + problem.A = Set(initialize=[1, 2, 3]) + problem.B = Set(dimen=2, initialize=[(1, 2), (3, 4), (5, 6)]) + # non-indexed sets using within argument + problem.subset_A = Set(within=problem.A, initialize=[2, 3]) + problem.subset_B = Set(within=problem.B, dimen=2, initialize=[(1, 2), (5, 6)]) + # indexed sets not using the within argument + problem.C = Set(problem.A, initialize={1: [-1, 3], 2: [4, 7], 3: [3, 8]}) + problem.D = Set( + problem.B, initialize={(1, 2): [1, 5], (3, 4): [3], (5, 6): [6, 8, 9]} + ) + # indexed sets using an indexed set for the within argument + problem.subset_C = Set( + problem.A, within=problem.C, initialize={1: [-1], 2: [4], 3: [3, 8]} + ) + problem.subset_D = Set( + problem.B, + within=problem.D, + initialize={(1, 2): [1, 5], (3, 4): [], (5, 6): [6]}, + ) + # indexed sets using a non-indexed set for the within argument + problem.E = Set([0, 1], within=problem.A, initialize={0: [1, 2], 1: [3]}) + problem.F = Set( + [(1, 2, 3), (4, 5, 6)], + within=problem.B, + initialize={(1, 2, 3): [(1, 2)], (4, 5, 6): [(3, 4)]}, + ) + # check them + self.assertEqual(list(problem.A), [1, 2, 3]) + self.assertEqual(list(problem.B), [(1, 2), (3, 4), (5, 6)]) + self.assertEqual(list(problem.subset_A), [2, 3]) + self.assertEqual(list(problem.subset_B), [(1, 2), (5, 6)]) + self.assertEqual(list(problem.C[1]), [-1, 3]) + self.assertEqual(list(problem.C[2]), [4, 7]) + self.assertEqual(list(problem.C[3]), [3, 8]) + self.assertEqual(list(problem.D[(1, 2)]), [1, 5]) + self.assertEqual(list(problem.D[(3, 4)]), [3]) + self.assertEqual(list(problem.D[(5, 6)]), [6, 8, 9]) + self.assertEqual(list(problem.subset_C[1]), [-1]) + self.assertEqual(list(problem.subset_C[2]), [4]) + self.assertEqual(list(problem.subset_C[3]), [3, 8]) + self.assertEqual(list(problem.subset_D[(1, 2)]), [1, 5]) + self.assertEqual(list(problem.subset_D[(3, 4)]), []) + self.assertEqual(list(problem.subset_D[(5, 6)]), [6]) + self.assertEqual(list(problem.E[0]), [1, 2]) + self.assertEqual(list(problem.E[1]), [3]) + self.assertEqual(list(problem.F[(1, 2, 3)]), [(1, 2)]) + self.assertEqual(list(problem.F[(4, 5, 6)]), [(3, 4)]) + + # try adding elements to test the domains (1 compatible, 1 incompatible) + # set subset_A + problem.subset_A.add(1) + error_message = ( + "Cannot add value 4 to Set subset_A.\n\tThe value is not in the domain A" + ) + with self.assertRaisesRegex(ValueError, error_message): + problem.subset_A.add(4) + # set subset_B + problem.subset_B.add((3, 4)) + with self.assertRaisesRegex(ValueError, r".*Cannot add value \(7, 8\)"): + problem.subset_B.add((7, 8)) + # set subset_C + problem.subset_C[2].add(7) + with self.assertRaisesRegex(ValueError, ".*Cannot add value 8 to Set"): + problem.subset_C[2].add(8) + # set subset_D + problem.subset_D[(5, 6)].add(9) + with self.assertRaisesRegex(ValueError, ".*Cannot add value 2 to Set"): + problem.subset_D[(3, 4)].add(2) + # set E + problem.E[1].add(2) + with self.assertRaisesRegex(ValueError, ".*Cannot add value 4 to Set"): + problem.E[1].add(4) + # set F + problem.F[(1, 2, 3)].add((3, 4)) + with self.assertRaisesRegex(ValueError, r".*Cannot add value \(4, 3\)"): + problem.F[(4, 5, 6)].add((4, 3)) + # check them + self.assertEqual(list(problem.A), [1, 2, 3]) + self.assertEqual(list(problem.B), [(1, 2), (3, 4), (5, 6)]) + self.assertEqual(list(problem.subset_A), [2, 3, 1]) + self.assertEqual(list(problem.subset_B), [(1, 2), (5, 6), (3, 4)]) + self.assertEqual(list(problem.C[1]), [-1, 3]) + self.assertEqual(list(problem.C[2]), [4, 7]) + self.assertEqual(list(problem.C[3]), [3, 8]) + self.assertEqual(list(problem.D[(1, 2)]), [1, 5]) + self.assertEqual(list(problem.D[(3, 4)]), [3]) + self.assertEqual(list(problem.D[(5, 6)]), [6, 8, 9]) + self.assertEqual(list(problem.subset_C[1]), [-1]) + self.assertEqual(list(problem.subset_C[2]), [4, 7]) + self.assertEqual(list(problem.subset_C[3]), [3, 8]) + self.assertEqual(list(problem.subset_D[(1, 2)]), [1, 5]) + self.assertEqual(list(problem.subset_D[(3, 4)]), []) + self.assertEqual(list(problem.subset_D[(5, 6)]), [6, 9]) + self.assertEqual(list(problem.E[0]), [1, 2]) + self.assertEqual(list(problem.E[1]), [3, 2]) + self.assertEqual(list(problem.F[(1, 2, 3)]), [(1, 2), (3, 4)]) + self.assertEqual(list(problem.F[(4, 5, 6)]), [(3, 4)]) + + # using abstract model and no initialization + model = AbstractModel() + # non-indexed sets not using the within argument + model.A = Set() + model.B = Set(dimen=2) + # non-indexed sets using within argument + model.subset_A = Set(within=model.A) + model.subset_B = Set(within=model.B, dimen=2) + # indexed sets not using the within argument + model.C = Set(model.A) + model.D = Set(model.B) + # indexed sets using an indexed set for the within argument + model.subset_C = Set(model.A, within=model.C) + model.subset_D = Set(model.B, within=model.D) + # indexed sets using a non-indexed set for the within argument + model.E_index = Set() + model.F_index = Set() + model.E = Set(model.E_index, within=model.A) + model.F = Set(model.F_index, within=model.B) + problem = model.create_instance( + data={ + None: { + 'A': [3, 4, 5], + 'B': [(1, 2), (7, 8)], + 'subset_A': [3, 4], + 'subset_B': [(1, 2)], + 'C': {3: [3], 4: [4, 8], 5: [5, 6]}, + 'D': {(1, 2): [2], (7, 8): [0, 1]}, + 'subset_C': {3: [3], 4: [8], 5: []}, + 'subset_D': {(1, 2): [], (7, 8): [0, 1]}, + 'E_index': [0, 1], + 'F_index': [(1, 2, 3), (4, 5, 6)], + 'E': {0: [3, 4], 1: [5]}, + 'F': {(1, 2, 3): [(1, 2)], (4, 5, 6): [(7, 8)]}, + } + } + ) + + # check them + self.assertEqual(list(problem.A), [3, 4, 5]) + self.assertEqual(list(problem.B), [(1, 2), (7, 8)]) + self.assertEqual(list(problem.subset_A), [3, 4]) + self.assertEqual(list(problem.subset_B), [(1, 2)]) + self.assertEqual(list(problem.C[3]), [3]) + self.assertEqual(list(problem.C[4]), [4, 8]) + self.assertEqual(list(problem.C[5]), [5, 6]) + self.assertEqual(list(problem.D[(1, 2)]), [2]) + self.assertEqual(list(problem.D[(7, 8)]), [0, 1]) + self.assertEqual(list(problem.subset_C[3]), [3]) + self.assertEqual(list(problem.subset_C[4]), [8]) + self.assertEqual(list(problem.subset_C[5]), []) + self.assertEqual(list(problem.subset_D[(1, 2)]), []) + self.assertEqual(list(problem.subset_D[(7, 8)]), [0, 1]) + self.assertEqual(list(problem.E[0]), [3, 4]) + self.assertEqual(list(problem.E[1]), [5]) + self.assertEqual(list(problem.F[(1, 2, 3)]), [(1, 2)]) + self.assertEqual(list(problem.F[(4, 5, 6)]), [(7, 8)]) + + # try adding elements to test the domains (1 compatible, 1 incompatible) + # set subset_A + problem.subset_A.add(5) + with self.assertRaisesRegex(ValueError, ".*Cannot add value "): + problem.subset_A.add(6) + # set subset_B + problem.subset_B.add((7, 8)) + with self.assertRaisesRegex(ValueError, ".*Cannot add value "): + problem.subset_B.add((3, 4)) + # set subset_C + problem.subset_C[4].add(4) + with self.assertRaisesRegex(ValueError, ".*Cannot add value "): + problem.subset_C[4].add(9) + # set subset_D + problem.subset_D[(1, 2)].add(2) + with self.assertRaisesRegex(ValueError, ".*Cannot add value "): + problem.subset_D[(1, 2)].add(3) + # set E + problem.E[1].add(4) + with self.assertRaisesRegex(ValueError, ".*Cannot add value "): + problem.E[1].add(1) + # set F + problem.F[(1, 2, 3)].add((7, 8)) + with self.assertRaisesRegex(ValueError, ".*Cannot add value "): + problem.F[(4, 5, 6)].add((4, 3)) + # check them + self.assertEqual(list(problem.A), [3, 4, 5]) + self.assertEqual(list(problem.B), [(1, 2), (7, 8)]) + self.assertEqual(list(problem.subset_A), [3, 4, 5]) + self.assertEqual(list(problem.subset_B), [(1, 2), (7, 8)]) + self.assertEqual(list(problem.C[3]), [3]) + self.assertEqual(list(problem.C[4]), [4, 8]) + self.assertEqual(list(problem.C[5]), [5, 6]) + self.assertEqual(list(problem.D[(1, 2)]), [2]) + self.assertEqual(list(problem.D[(7, 8)]), [0, 1]) + self.assertEqual(list(problem.subset_C[3]), [3]) + self.assertEqual(list(problem.subset_C[4]), [8, 4]) + self.assertEqual(list(problem.subset_C[5]), []) + self.assertEqual(list(problem.subset_D[(1, 2)]), [2]) + self.assertEqual(list(problem.subset_D[(7, 8)]), [0, 1]) + self.assertEqual(list(problem.E[0]), [3, 4]) + self.assertEqual(list(problem.E[1]), [5, 4]) + self.assertEqual(list(problem.F[(1, 2, 3)]), [(1, 2), (7, 8)]) + self.assertEqual(list(problem.F[(4, 5, 6)]), [(7, 8)]) diff --git a/pyomo/core/tests/unit/test_sets.py b/pyomo/core/tests/unit/test_sets.py index 48869397aae..4d305ebab86 100644 --- a/pyomo/core/tests/unit/test_sets.py +++ b/pyomo/core/tests/unit/test_sets.py @@ -1051,7 +1051,7 @@ def setUp(self): self.instance = self.model.create_instance(currdir + "setA.dat") self.e1 = numpy.bool_(1) self.e2 = numpy.int_(2) - self.e3 = numpy.float_(3.0) + self.e3 = numpy.float64(3.0) self.e4 = numpy.int_(4) self.e5 = numpy.int_(5) self.e6 = numpy.int_(6) @@ -1068,7 +1068,7 @@ def test_numpy_int(self): def test_numpy_float(self): model = ConcreteModel() - model.A = Set(initialize=[numpy.float_(1.0), numpy.float_(0.0)]) + model.A = Set(initialize=[numpy.float64(1.0), numpy.float64(0.0)]) self.assertEqual(model.A.bounds(), (0, 1)) @@ -3213,7 +3213,7 @@ def test_numpy_membership(self): self.assertEqual(numpy.int_(1) in Boolean, True) self.assertEqual(numpy.bool_(True) in Boolean, True) self.assertEqual(numpy.bool_(False) in Boolean, True) - self.assertEqual(numpy.float_(1.1) in Boolean, False) + self.assertEqual(numpy.float64(1.1) in Boolean, False) self.assertEqual(numpy.int_(2) in Boolean, False) self.assertEqual(numpy.int_(0) in Integers, True) @@ -3222,7 +3222,7 @@ def test_numpy_membership(self): # identically to 1 self.assertEqual(numpy.bool_(True) in Integers, True) self.assertEqual(numpy.bool_(False) in Integers, True) - self.assertEqual(numpy.float_(1.1) in Integers, False) + self.assertEqual(numpy.float64(1.1) in Integers, False) self.assertEqual(numpy.int_(2) in Integers, True) self.assertEqual(numpy.int_(0) in Reals, True) @@ -3231,14 +3231,14 @@ def test_numpy_membership(self): # identically to 1 self.assertEqual(numpy.bool_(True) in Reals, True) self.assertEqual(numpy.bool_(False) in Reals, True) - self.assertEqual(numpy.float_(1.1) in Reals, True) + self.assertEqual(numpy.float64(1.1) in Reals, True) self.assertEqual(numpy.int_(2) in Reals, True) self.assertEqual(numpy.int_(0) in Any, True) self.assertEqual(numpy.int_(1) in Any, True) self.assertEqual(numpy.bool_(True) in Any, True) self.assertEqual(numpy.bool_(False) in Any, True) - self.assertEqual(numpy.float_(1.1) in Any, True) + self.assertEqual(numpy.float64(1.1) in Any, True) self.assertEqual(numpy.int_(2) in Any, True) def test_setargs1(self): diff --git a/pyomo/gdp/plugins/bilinear.py b/pyomo/gdp/plugins/bilinear.py index 67390801348..bc91836ea9c 100644 --- a/pyomo/gdp/plugins/bilinear.py +++ b/pyomo/gdp/plugins/bilinear.py @@ -77,9 +77,10 @@ def _transformBlock(self, block, instance): for component in block.component_data_objects( Constraint, active=True, descend_into=False ): - expr = self._transformExpression(component.body, instance) - instance.bilinear_data_.c_body[id(component)] = component.body - component._body = expr + lb, body, ub = component.to_bounded_expression() + expr = self._transformExpression(body, instance) + instance.bilinear_data_.c_body[id(component)] = body + component.set_value((lb, expr, ub)) def _transformExpression(self, expr, instance): if expr.polynomial_degree() > 2: diff --git a/pyomo/gdp/plugins/cuttingplane.py b/pyomo/gdp/plugins/cuttingplane.py index 6c77a582987..4cef098eba9 100644 --- a/pyomo/gdp/plugins/cuttingplane.py +++ b/pyomo/gdp/plugins/cuttingplane.py @@ -400,7 +400,8 @@ def back_off_constraint_with_calculated_cut_violation( val = value(transBlock_rHull.infeasibility_objective) - TOL if val <= 0: logger.info("\tBacking off cut by %s" % val) - cut._body += abs(val) + lb, body, ub = cut.to_bounded_expression() + cut.set_value((lb, body + abs(val), ub)) # else there is nothing to do: restore the objective transBlock_rHull.del_component(transBlock_rHull.infeasibility_objective) transBlock_rHull.separation_objective.activate() @@ -424,7 +425,8 @@ def back_off_constraint_by_fixed_tolerance( this callback TOL: An absolute tolerance to be added to make cut more conservative. """ - cut._body += TOL + lb, body, ub = cut.to_bounded_expression() + cut.set_value((lb, body + TOL, ub)) @TransformationFactory.register( diff --git a/pyomo/gdp/plugins/multiple_bigm.py b/pyomo/gdp/plugins/multiple_bigm.py index 4dffd4e9f9a..3362276246b 100644 --- a/pyomo/gdp/plugins/multiple_bigm.py +++ b/pyomo/gdp/plugins/multiple_bigm.py @@ -12,7 +12,7 @@ import itertools import logging -from pyomo.common.collections import ComponentMap +from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.common.config import ConfigDict, ConfigValue from pyomo.common.gc_manager import PauseGC from pyomo.common.modeling import unique_component_name @@ -310,9 +310,12 @@ def _transform_disjunctionData(self, obj, index, parent_disjunct, root_disjunct) arg_Ms = self._config.bigM if self._config.bigM is not None else {} + # ESJ: I am relying on the fact that the ComponentSet is going to be + # ordered here, but using a set because I will remove infeasible + # Disjuncts from it if I encounter them calculating M's. + active_disjuncts = ComponentSet(disj for disj in obj.disjuncts if disj.active) # First handle the bound constraints if we are dealing with them # separately - active_disjuncts = [disj for disj in obj.disjuncts if disj.active] transformed_constraints = set() if self._config.reduce_bound_constraints: transformed_constraints = self._transform_bound_constraints( @@ -585,7 +588,7 @@ def _calculate_missing_M_values( ): if disjunct is other_disjunct: continue - if id(other_disjunct) in scratch_blocks: + elif id(other_disjunct) in scratch_blocks: scratch = scratch_blocks[id(other_disjunct)] else: scratch = scratch_blocks[id(other_disjunct)] = Block() @@ -631,7 +634,10 @@ def _calculate_missing_M_values( scratch.obj.expr = constraint.body - constraint.lower scratch.obj.sense = minimize lower_M = self._solve_disjunct_for_M( - other_disjunct, scratch, unsuccessful_solve_msg + other_disjunct, + scratch, + unsuccessful_solve_msg, + active_disjuncts, ) if constraint.upper is not None and upper_M is None: # last resort: calculate @@ -639,7 +645,10 @@ def _calculate_missing_M_values( scratch.obj.expr = constraint.body - constraint.upper scratch.obj.sense = maximize upper_M = self._solve_disjunct_for_M( - other_disjunct, scratch, unsuccessful_solve_msg + other_disjunct, + scratch, + unsuccessful_solve_msg, + active_disjuncts, ) arg_Ms[constraint, other_disjunct] = (lower_M, upper_M) transBlock._mbm_values[constraint, other_disjunct] = (lower_M, upper_M) @@ -651,9 +660,18 @@ def _calculate_missing_M_values( return arg_Ms def _solve_disjunct_for_M( - self, other_disjunct, scratch_block, unsuccessful_solve_msg + self, other_disjunct, scratch_block, unsuccessful_solve_msg, active_disjuncts ): + if not other_disjunct.active: + # If a Disjunct is infeasible, we will discover that and deactivate + # it when we are calculating the M values. We remove that disjunct + # from active_disjuncts inside of the loop in + # _calculate_missing_M_values. So that means that we might have + # deactivated Disjuncts here that we should skip over. + return 0 + solver = self._config.solver + results = solver.solve(other_disjunct, load_solutions=False) if results.solver.termination_condition is TerminationCondition.infeasible: # [2/18/24]: TODO: After the solver rewrite is complete, we will not @@ -669,6 +687,7 @@ def _solve_disjunct_for_M( "Disjunct '%s' is infeasible, deactivating." % other_disjunct.name ) other_disjunct.deactivate() + active_disjuncts.remove(other_disjunct) M = 0 else: # This is a solver that might report diff --git a/pyomo/gdp/tests/test_mbigm.py b/pyomo/gdp/tests/test_mbigm.py index 9e82b1010f9..14a23160574 100644 --- a/pyomo/gdp/tests/test_mbigm.py +++ b/pyomo/gdp/tests/test_mbigm.py @@ -1019,39 +1019,34 @@ def test_calculate_Ms_infeasible_Disjunct(self): out.getvalue().strip(), ) - # We just fixed the infeasible by to False + # We just fixed the infeasible disjunct to False self.assertFalse(m.disjunction.disjuncts[0].active) self.assertTrue(m.disjunction.disjuncts[0].indicator_var.fixed) self.assertFalse(value(m.disjunction.disjuncts[0].indicator_var)) + # We didn't actually transform the infeasible disjunct + self.assertIsNone(m.disjunction.disjuncts[0].transformation_block) + # the remaining constraints are transformed correctly. cons = mbm.get_transformed_constraints(m.disjunction.disjuncts[1].constraint[1]) self.assertEqual(len(cons), 1) assertExpressionsEqual( self, cons[0].expr, - 21 + m.x - m.y - <= 0 * m.disjunction.disjuncts[0].binary_indicator_var - + 12.0 * m.disjunction.disjuncts[2].binary_indicator_var, + 21 + m.x - m.y <= 12.0 * m.disjunction.disjuncts[2].binary_indicator_var, ) cons = mbm.get_transformed_constraints(m.disjunction.disjuncts[2].constraint[1]) self.assertEqual(len(cons), 2) - print(cons[0].expr) - print(cons[1].expr) assertExpressionsEqual( self, cons[0].expr, - 0.0 * m.disjunction_disjuncts[0].binary_indicator_var - - 12.0 * m.disjunction_disjuncts[1].binary_indicator_var - <= m.x - (m.y - 9), + -12.0 * m.disjunction_disjuncts[1].binary_indicator_var <= m.x - (m.y - 9), ) assertExpressionsEqual( self, cons[1].expr, - m.x - (m.y - 9) - <= 0.0 * m.disjunction_disjuncts[0].binary_indicator_var - - 12.0 * m.disjunction_disjuncts[1].binary_indicator_var, + m.x - (m.y - 9) <= -12.0 * m.disjunction_disjuncts[1].binary_indicator_var, ) @unittest.skipUnless( diff --git a/pyomo/neos/__init__.py b/pyomo/neos/__init__.py index 7d18535e753..9f910f4a302 100644 --- a/pyomo/neos/__init__.py +++ b/pyomo/neos/__init__.py @@ -30,7 +30,7 @@ 'minos': 'SLC NLP solver', 'minto': 'MILP solver', 'mosek': 'Interior point NLP solver', - 'octeract': 'Deterministic global MINLP solver', + #'octeract': 'Deterministic global MINLP solver', 'ooqp': 'Convex QP solver', 'path': 'Nonlinear MCP solver', 'snopt': 'SQP NLP solver', diff --git a/pyomo/neos/tests/test_neos.py b/pyomo/neos/tests/test_neos.py index a4c4e9e6367..681856781be 100644 --- a/pyomo/neos/tests/test_neos.py +++ b/pyomo/neos/tests/test_neos.py @@ -79,6 +79,9 @@ def test_doc(self): doc = pyomo.neos.doc dockeys = set(doc.keys()) + # Octeract interface is disabled, see #3321 + amplsolvers.remove('octeract') + self.assertEqual(amplsolvers, dockeys) # gamssolvers = set(v[0].lower() for v in tmp if v[1]=='GAMS') @@ -149,8 +152,11 @@ def test_minto(self): def test_mosek(self): self._run('mosek') - def test_octeract(self): - self._run('octeract') + # [16 Jul 24] Octeract is erroring. We will disable the interface + # (and testing) until we have time to resolve #3321 + # + # def test_octeract(self): + # self._run('octeract') def test_ooqp(self): if self.sense == pyo.maximize: diff --git a/pyomo/repn/linear.py b/pyomo/repn/linear.py index 6d084067511..029fe892b62 100644 --- a/pyomo/repn/linear.py +++ b/pyomo/repn/linear.py @@ -183,8 +183,6 @@ def to_expression(visitor, arg): return arg[1].to_expression(visitor) -_exit_node_handlers = {} - # # NEGATION handlers # @@ -199,11 +197,6 @@ def _handle_negation_ANY(visitor, node, arg): return arg -_exit_node_handlers[NegationExpression] = { - None: _handle_negation_ANY, - (_CONSTANT,): _handle_negation_constant, -} - # # PRODUCT handlers # @@ -272,16 +265,6 @@ def _handle_product_nonlinear(visitor, node, arg1, arg2): return _GENERAL, ans -_exit_node_handlers[ProductExpression] = { - None: _handle_product_nonlinear, - (_CONSTANT, _CONSTANT): _handle_product_constant_constant, - (_CONSTANT, _LINEAR): _handle_product_constant_ANY, - (_CONSTANT, _GENERAL): _handle_product_constant_ANY, - (_LINEAR, _CONSTANT): _handle_product_ANY_constant, - (_GENERAL, _CONSTANT): _handle_product_ANY_constant, -} -_exit_node_handlers[MonomialTermExpression] = _exit_node_handlers[ProductExpression] - # # DIVISION handlers # @@ -302,13 +285,6 @@ def _handle_division_nonlinear(visitor, node, arg1, arg2): return _GENERAL, ans -_exit_node_handlers[DivisionExpression] = { - None: _handle_division_nonlinear, - (_CONSTANT, _CONSTANT): _handle_division_constant_constant, - (_LINEAR, _CONSTANT): _handle_division_ANY_constant, - (_GENERAL, _CONSTANT): _handle_division_ANY_constant, -} - # # EXPONENTIATION handlers # @@ -345,13 +321,6 @@ def _handle_pow_nonlinear(visitor, node, arg1, arg2): return _GENERAL, ans -_exit_node_handlers[PowExpression] = { - None: _handle_pow_nonlinear, - (_CONSTANT, _CONSTANT): _handle_pow_constant_constant, - (_LINEAR, _CONSTANT): _handle_pow_ANY_constant, - (_GENERAL, _CONSTANT): _handle_pow_ANY_constant, -} - # # ABS and UNARY handlers # @@ -371,12 +340,6 @@ def _handle_unary_nonlinear(visitor, node, arg): return _GENERAL, ans -_exit_node_handlers[UnaryFunctionExpression] = { - None: _handle_unary_nonlinear, - (_CONSTANT,): _handle_unary_constant, -} -_exit_node_handlers[AbsExpression] = _exit_node_handlers[UnaryFunctionExpression] - # # NAMED EXPRESSION handlers # @@ -395,11 +358,6 @@ def _handle_named_ANY(visitor, node, arg1): return _type, arg1.duplicate() -_exit_node_handlers[Expression] = { - None: _handle_named_ANY, - (_CONSTANT,): _handle_named_constant, -} - # # EXPR_IF handlers # @@ -430,11 +388,6 @@ def _handle_expr_if_nonlinear(visitor, node, arg1, arg2, arg3): return _GENERAL, ans -_exit_node_handlers[Expr_ifExpression] = {None: _handle_expr_if_nonlinear} -for j in (_CONSTANT, _LINEAR, _GENERAL): - for k in (_CONSTANT, _LINEAR, _GENERAL): - _exit_node_handlers[Expr_ifExpression][_CONSTANT, j, k] = _handle_expr_if_const - # # Relational expression handlers # @@ -462,12 +415,6 @@ def _handle_equality_general(visitor, node, arg1, arg2): return _GENERAL, ans -_exit_node_handlers[EqualityExpression] = { - None: _handle_equality_general, - (_CONSTANT, _CONSTANT): _handle_equality_const, -} - - def _handle_inequality_const(visitor, node, arg1, arg2): # It is exceptionally likely that if we get here, one of the # arguments is an InvalidNumber @@ -490,12 +437,6 @@ def _handle_inequality_general(visitor, node, arg1, arg2): return _GENERAL, ans -_exit_node_handlers[InequalityExpression] = { - None: _handle_inequality_general, - (_CONSTANT, _CONSTANT): _handle_inequality_const, -} - - def _handle_ranged_const(visitor, node, arg1, arg2, arg3): # It is exceptionally likely that if we get here, one of the # arguments is an InvalidNumber @@ -523,10 +464,62 @@ def _handle_ranged_general(visitor, node, arg1, arg2, arg3): return _GENERAL, ans -_exit_node_handlers[RangedExpression] = { - None: _handle_ranged_general, - (_CONSTANT, _CONSTANT, _CONSTANT): _handle_ranged_const, -} +def define_exit_node_handlers(_exit_node_handlers=None): + if _exit_node_handlers is None: + _exit_node_handlers = {} + _exit_node_handlers[NegationExpression] = { + None: _handle_negation_ANY, + (_CONSTANT,): _handle_negation_constant, + } + _exit_node_handlers[ProductExpression] = { + None: _handle_product_nonlinear, + (_CONSTANT, _CONSTANT): _handle_product_constant_constant, + (_CONSTANT, _LINEAR): _handle_product_constant_ANY, + (_CONSTANT, _GENERAL): _handle_product_constant_ANY, + (_LINEAR, _CONSTANT): _handle_product_ANY_constant, + (_GENERAL, _CONSTANT): _handle_product_ANY_constant, + } + _exit_node_handlers[MonomialTermExpression] = _exit_node_handlers[ProductExpression] + _exit_node_handlers[DivisionExpression] = { + None: _handle_division_nonlinear, + (_CONSTANT, _CONSTANT): _handle_division_constant_constant, + (_LINEAR, _CONSTANT): _handle_division_ANY_constant, + (_GENERAL, _CONSTANT): _handle_division_ANY_constant, + } + _exit_node_handlers[PowExpression] = { + None: _handle_pow_nonlinear, + (_CONSTANT, _CONSTANT): _handle_pow_constant_constant, + (_LINEAR, _CONSTANT): _handle_pow_ANY_constant, + (_GENERAL, _CONSTANT): _handle_pow_ANY_constant, + } + _exit_node_handlers[UnaryFunctionExpression] = { + None: _handle_unary_nonlinear, + (_CONSTANT,): _handle_unary_constant, + } + _exit_node_handlers[AbsExpression] = _exit_node_handlers[UnaryFunctionExpression] + _exit_node_handlers[Expression] = { + None: _handle_named_ANY, + (_CONSTANT,): _handle_named_constant, + } + _exit_node_handlers[Expr_ifExpression] = {None: _handle_expr_if_nonlinear} + for j in (_CONSTANT, _LINEAR, _GENERAL): + for k in (_CONSTANT, _LINEAR, _GENERAL): + _exit_node_handlers[Expr_ifExpression][ + _CONSTANT, j, k + ] = _handle_expr_if_const + _exit_node_handlers[EqualityExpression] = { + None: _handle_equality_general, + (_CONSTANT, _CONSTANT): _handle_equality_const, + } + _exit_node_handlers[InequalityExpression] = { + None: _handle_inequality_general, + (_CONSTANT, _CONSTANT): _handle_inequality_const, + } + _exit_node_handlers[RangedExpression] = { + None: _handle_ranged_general, + (_CONSTANT, _CONSTANT, _CONSTANT): _handle_ranged_const, + } + return _exit_node_handlers class LinearBeforeChildDispatcher(BeforeChildDispatcher): @@ -728,9 +721,8 @@ def _initialize_exit_node_dispatcher(exit_handlers): class LinearRepnVisitor(StreamBasedExpressionVisitor): Result = LinearRepn - exit_node_handlers = _exit_node_handlers exit_node_dispatcher = ExitNodeDispatcher( - _initialize_exit_node_dispatcher(_exit_node_handlers) + _initialize_exit_node_dispatcher(define_exit_node_handlers()) ) expand_nonlinear_products = False max_exponential_expansion = 1 diff --git a/pyomo/repn/parameterized_linear.py b/pyomo/repn/parameterized_linear.py new file mode 100644 index 00000000000..d1295b73e14 --- /dev/null +++ b/pyomo/repn/parameterized_linear.py @@ -0,0 +1,395 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import copy + +from pyomo.common.collections import ComponentSet +from pyomo.common.numeric_types import native_numeric_types +from pyomo.core import Var +from pyomo.core.expr.logical_expr import _flattened +from pyomo.core.expr.numeric_expr import ( + AbsExpression, + DivisionExpression, + LinearExpression, + MonomialTermExpression, + NegationExpression, + mutable_expression, + PowExpression, + ProductExpression, + SumExpression, + UnaryFunctionExpression, +) +from pyomo.repn.linear import ( + ExitNodeDispatcher, + _initialize_exit_node_dispatcher, + LinearBeforeChildDispatcher, + LinearRepn, + LinearRepnVisitor, +) +from pyomo.repn.util import ExprType +import pyomo.repn.linear as linear + + +_FIXED = ExprType.FIXED +_CONSTANT = ExprType.CONSTANT +_LINEAR = ExprType.LINEAR +_GENERAL = ExprType.GENERAL + + +def _merge_dict(dest_dict, mult, src_dict): + if mult.__class__ not in native_numeric_types or mult != 1: + for vid, coef in src_dict.items(): + if vid in dest_dict: + dest_dict[vid] += mult * coef + else: + dest_dict[vid] = mult * coef + else: + for vid, coef in src_dict.items(): + if vid in dest_dict: + dest_dict[vid] += coef + else: + dest_dict[vid] = coef + + +def to_expression(visitor, arg): + if arg[0] in (_CONSTANT, _FIXED): + return arg[1] + else: + return arg[1].to_expression(visitor) + + +class ParameterizedLinearRepn(LinearRepn): + def __str__(self): + return ( + f"ParameterizedLinearRepn(mult={self.multiplier}, const={self.constant}, " + f"linear={self.linear}, nonlinear={self.nonlinear})" + ) + + def walker_exitNode(self): + if self.nonlinear is not None: + return _GENERAL, self + elif self.linear: + return _LINEAR, self + elif self.constant.__class__ in native_numeric_types: + return _CONSTANT, self.multiplier * self.constant + else: + return _FIXED, self.multiplier * self.constant + + def to_expression(self, visitor): + if self.nonlinear is not None: + # We want to start with the nonlinear term (and use + # assignment) in case the term is a non-numeric node (like a + # relational expression) + ans = self.nonlinear + else: + ans = 0 + if self.linear: + var_map = visitor.var_map + with mutable_expression() as e: + for vid, coef in self.linear.items(): + if coef.__class__ not in native_numeric_types or coef: + e += coef * var_map[vid] + if e.nargs() > 1: + ans += e + elif e.nargs() == 1: + ans += e.arg(0) + if self.constant.__class__ not in native_numeric_types or self.constant: + ans += self.constant + if ( + self.multiplier.__class__ not in native_numeric_types + or self.multiplier != 1 + ): + ans *= self.multiplier + return ans + + def append(self, other): + """Append a child result from acceptChildResult + + Notes + ----- + This method assumes that the operator was "+". It is implemented + so that we can directly use a ParameterizedLinearRepn() as a `data` object in + the expression walker (thereby allowing us to use the default + implementation of acceptChildResult [which calls + `data.append()`] and avoid the function call for a custom + callback). + + """ + _type, other = other + if _type is _CONSTANT or _type is _FIXED: + self.constant += other + return + + mult = other.multiplier + try: + _mult = bool(mult) + if not _mult: + return + if mult == 1: + _mult = False + except: + _mult = True + + const = other.constant + try: + _const = bool(const) + except: + _const = True + + if _mult: + if _const: + self.constant += mult * const + if other.linear: + _merge_dict(self.linear, mult, other.linear) + if other.nonlinear is not None: + nl = mult * other.nonlinear + if self.nonlinear is None: + self.nonlinear = nl + else: + self.nonlinear += nl + else: + if _const: + self.constant += const + if other.linear: + _merge_dict(self.linear, 1, other.linear) + if other.nonlinear is not None: + nl = other.nonlinear + if self.nonlinear is None: + self.nonlinear = nl + else: + self.nonlinear += nl + + +class ParameterizedLinearBeforeChildDispatcher(LinearBeforeChildDispatcher): + def __init__(self): + super().__init__() + self[Var] = self._before_var + self[MonomialTermExpression] = self._before_monomial + self[LinearExpression] = self._before_linear + self[SumExpression] = self._before_general_expression + + @staticmethod + def _before_linear(visitor, child): + return True, None + + @staticmethod + def _before_monomial(visitor, child): + return True, None + + @staticmethod + def _before_general_expression(visitor, child): + return True, None + + @staticmethod + def _before_var(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + if child.fixed: + return False, (_CONSTANT, visitor.check_constant(child.value, child)) + if child in visitor.wrt: + # pseudo-constant + # We aren't treating this Var as a Var for the purposes of this walker + return False, (_FIXED, child) + # This is a normal situation + ParameterizedLinearBeforeChildDispatcher._record_var(visitor, child) + ans = visitor.Result() + ans.linear[_id] = 1 + return False, (ExprType.LINEAR, ans) + + +_before_child_dispatcher = ParameterizedLinearBeforeChildDispatcher() + +# +# NEGATION handlers +# + + +def _handle_negation_pseudo_constant(visitor, node, arg): + return (_FIXED, -1 * arg[1]) + + +# +# PRODUCT handlers +# + + +def _handle_product_constant_constant(visitor, node, arg1, arg2): + # [ESJ 5/22/24]: Overriding this handler to exclude the deprecation path for + # 0 * nan. It doesn't need overridden when that deprecation path goes away. + return _CONSTANT, arg1[1] * arg2[1] + + +def _handle_product_pseudo_constant_constant(visitor, node, arg1, arg2): + return _FIXED, arg1[1] * arg2[1] + + +# +# DIVISION handlers +# + + +def _handle_division_pseudo_constant_constant(visitor, node, arg1, arg2): + return _FIXED, arg1[1] / arg2[1] + + +def _handle_division_ANY_pseudo_constant(visitor, node, arg1, arg2): + arg1[1].multiplier = arg1[1].multiplier / arg2[1] + return arg1 + + +# +# EXPONENTIATION handlers +# + + +def _handle_pow_pseudo_constant_constant(visitor, node, arg1, arg2): + return _FIXED, to_expression(visitor, arg1) ** to_expression(visitor, arg2) + + +def _handle_pow_nonlinear(visitor, node, arg1, arg2): + # ESJ: We override this because we need our own to_expression implementation + # if pseudo constants are involved. + ans = visitor.Result() + ans.nonlinear = to_expression(visitor, arg1) ** to_expression(visitor, arg2) + return _GENERAL, ans + + +# +# ABS and UNARY handlers +# + + +def _handle_unary_pseudo_constant(visitor, node, arg): + # We override this because we can't blindly use apply_node_operation in this case + return _FIXED, node.create_node_with_local_data((to_expression(visitor, arg),)) + + +def define_exit_node_handlers(exit_node_handlers=None): + if exit_node_handlers is None: + exit_node_handlers = {} + linear.define_exit_node_handlers(exit_node_handlers) + + exit_node_handlers[NegationExpression].update( + {(_FIXED,): _handle_negation_pseudo_constant} + ) + + exit_node_handlers[ProductExpression].update( + { + (_CONSTANT, _CONSTANT): _handle_product_constant_constant, + (_FIXED, _FIXED): _handle_product_pseudo_constant_constant, + (_FIXED, _CONSTANT): _handle_product_pseudo_constant_constant, + (_CONSTANT, _FIXED): _handle_product_pseudo_constant_constant, + (_FIXED, _LINEAR): linear._handle_product_constant_ANY, + (_LINEAR, _FIXED): linear._handle_product_ANY_constant, + (_FIXED, _GENERAL): linear._handle_product_constant_ANY, + (_GENERAL, _FIXED): linear._handle_product_ANY_constant, + } + ) + + exit_node_handlers[MonomialTermExpression].update( + exit_node_handlers[ProductExpression] + ) + + exit_node_handlers[DivisionExpression].update( + { + (_FIXED, _FIXED): _handle_division_pseudo_constant_constant, + (_FIXED, _CONSTANT): _handle_division_pseudo_constant_constant, + (_CONSTANT, _FIXED): _handle_division_pseudo_constant_constant, + (_LINEAR, _FIXED): _handle_division_ANY_pseudo_constant, + (_GENERAL, _FIXED): _handle_division_ANY_pseudo_constant, + } + ) + + exit_node_handlers[PowExpression].update( + { + (_FIXED, _FIXED): _handle_pow_pseudo_constant_constant, + (_FIXED, _CONSTANT): _handle_pow_pseudo_constant_constant, + (_CONSTANT, _FIXED): _handle_pow_pseudo_constant_constant, + (_LINEAR, _FIXED): _handle_pow_nonlinear, + (_FIXED, _LINEAR): _handle_pow_nonlinear, + (_GENERAL, _FIXED): _handle_pow_nonlinear, + (_FIXED, _GENERAL): _handle_pow_nonlinear, + } + ) + exit_node_handlers[UnaryFunctionExpression].update( + {(_FIXED,): _handle_unary_pseudo_constant} + ) + exit_node_handlers[AbsExpression] = exit_node_handlers[UnaryFunctionExpression] + + return exit_node_handlers + + +class ParameterizedLinearRepnVisitor(LinearRepnVisitor): + Result = ParameterizedLinearRepn + exit_node_dispatcher = ExitNodeDispatcher( + _initialize_exit_node_dispatcher(define_exit_node_handlers()) + ) + + def __init__(self, subexpression_cache, var_map, var_order, sorter, wrt): + super().__init__(subexpression_cache, var_map, var_order, sorter) + self.wrt = ComponentSet(_flattened(wrt)) + + def beforeChild(self, node, child, child_idx): + return _before_child_dispatcher[child.__class__](self, child) + + def _factor_multiplier_into_linear_terms(self, ans, mult): + linear = ans.linear + zeros = [] + for vid, coef in linear.items(): + if coef.__class__ not in native_numeric_types or coef: + linear[vid] = mult * coef + else: + zeros.append(vid) + for vid in zeros: + del linear[vid] + if ans.nonlinear is not None: + ans.nonlinear *= mult + if ans.constant.__class__ not in native_numeric_types or ans.constant: + ans.constant *= mult + ans.multiplier = 1 + + def finalizeResult(self, result): + ans = result[1] + if ans.__class__ is self.Result: + mult = ans.multiplier + if mult.__class__ not in native_numeric_types: + # mult is an expression--we should push it back into the other terms + self._factor_multiplier_into_linear_terms(ans, mult) + return ans + if mult == 1: + zeros = [ + (vid, coef) + for vid, coef in ans.linear.items() + if coef.__class__ in native_numeric_types and not coef + ] + for vid, coef in zeros: + del ans.linear[vid] + elif not mult: + # the multiplier has cleared out the entire expression. Check + # if this is suppressing a NaN because we can't clear everything + # out if it is + if ans.constant != ans.constant or any( + c != c for c in ans.linear.values() + ): + # There's a nan in here, so we distribute the 0 + self._factor_multiplier_into_linear_terms(ans, mult) + return ans + return self.Result() + else: + # mult not in {0, 1}: factor it into the constant, + # linear coefficients, and nonlinear term + self._factor_multiplier_into_linear_terms(ans, mult) + return ans + + ans = self.Result() + assert result[0] in (_CONSTANT, _FIXED) + ans.constant = result[1] + return ans diff --git a/pyomo/repn/plugins/lp_writer.py b/pyomo/repn/plugins/lp_writer.py index 627a54e3f68..814f79a4eb9 100644 --- a/pyomo/repn/plugins/lp_writer.py +++ b/pyomo/repn/plugins/lp_writer.py @@ -458,13 +458,13 @@ def write(self, model): addSymbol(con, label) ostream.write(f'\n{label}:\n') self.write_expression(ostream, repn, False) - ostream.write(f'>= {(lb - offset)!r}\n') + ostream.write(f'>= {(lb - offset)!s}\n') elif lb == ub: label = f'c_e_{symbol}_' addSymbol(con, label) ostream.write(f'\n{label}:\n') self.write_expression(ostream, repn, False) - ostream.write(f'= {(lb - offset)!r}\n') + ostream.write(f'= {(lb - offset)!s}\n') else: # We will need the constraint body twice. Generate # in a buffer so we only have to do that once. @@ -476,18 +476,18 @@ def write(self, model): addSymbol(con, label) ostream.write(f'\n{label}:\n') ostream.write(buf) - ostream.write(f'>= {(lb - offset)!r}\n') + ostream.write(f'>= {(lb - offset)!s}\n') label = f'r_u_{symbol}_' aliasSymbol(con, label) ostream.write(f'\n{label}:\n') ostream.write(buf) - ostream.write(f'<= {(ub - offset)!r}\n') + ostream.write(f'<= {(ub - offset)!s}\n') elif ub is not None: label = f'c_u_{symbol}_' addSymbol(con, label) ostream.write(f'\n{label}:\n') self.write_expression(ostream, repn, False) - ostream.write(f'<= {(ub - offset)!r}\n') + ostream.write(f'<= {(ub - offset)!s}\n') if with_debug_timing: # report the last constraint @@ -527,8 +527,8 @@ def write(self, model): # Note: Var.bounds guarantees the values are either (finite) # native_numeric_types or None lb, ub = v.bounds - lb = '-inf' if lb is None else repr(lb) - ub = '+inf' if ub is None else repr(ub) + lb = '-inf' if lb is None else str(lb) + ub = '+inf' if ub is None else str(ub) ostream.write(f"\n {lb} <= {v_symbol} <= {ub}") if integer_vars: @@ -565,7 +565,7 @@ def write(self, model): for v, w in getattr(soscon, 'get_items', soscon.items)(): if w.__class__ not in int_float: w = float(f) - ostream.write(f" {getSymbol(v)}:{w!r}\n") + ostream.write(f" {getSymbol(v)}:{w!s}\n") ostream.write("\nend\n") @@ -584,9 +584,9 @@ def write_expression(self, ostream, expr, is_objective): expr.linear.items(), key=lambda x: getVarOrder(x[0]) ): if coef < 0: - ostream.write(f'{coef!r} {getSymbol(getVar(vid))}\n') + ostream.write(f'{coef!s} {getSymbol(getVar(vid))}\n') else: - ostream.write(f'+{coef!r} {getSymbol(getVar(vid))}\n') + ostream.write(f'+{coef!s} {getSymbol(getVar(vid))}\n') quadratic = getattr(expr, 'quadratic', None) if quadratic: @@ -605,9 +605,9 @@ def _normalize_constraint(data): col = c1, c2 sym = f' {getSymbol(getVar(vid1))} * {getSymbol(getVar(vid2))}\n' if coef < 0: - return col, repr(coef) + sym + return col, str(coef) + sym else: - return col, '+' + repr(coef) + sym + return col, f'+{coef!s}{sym}' if is_objective: # diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 43fd2fade68..8fc82d21d30 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -1150,17 +1150,17 @@ def write(self, model): r_lines[idx] = "3" else: # _type = 4 # L == c == U - r_lines[idx] = f"4 {lb - expr_info.const!r}" + r_lines[idx] = f"4 {lb - expr_info.const!s}" n_equality += 1 elif lb is None: # _type = 1 # c <= U - r_lines[idx] = f"1 {ub - expr_info.const!r}" + r_lines[idx] = f"1 {ub - expr_info.const!s}" elif ub is None: # _type = 2 # L <= c - r_lines[idx] = f"2 {lb - expr_info.const!r}" + r_lines[idx] = f"2 {lb - expr_info.const!s}" else: # _type = 0 # L <= c <= U - r_lines[idx] = f"0 {lb - expr_info.const!r} {ub - expr_info.const!r}" + r_lines[idx] = f"0 {lb - expr_info.const!s} {ub - expr_info.const!s}" n_ranges += 1 expr_info.const = 0 # FIXME: this is a HACK to be compatible with the NLv1 @@ -1375,7 +1375,7 @@ def write(self, model): ostream.write(f"S{_field|_float} {len(_vals)} {name}\n") # Note: _SuffixData.compile() guarantees the value is int/float ostream.write( - ''.join(f"{_id} {_vals[_id]!r}\n" for _id in sorted(_vals)) + ''.join(f"{_id} {_vals[_id]!s}\n" for _id in sorted(_vals)) ) # @@ -1485,7 +1485,7 @@ def write(self, model): ostream.write(f"d{len(data.con)}\n") # Note: _SuffixData.compile() guarantees the value is int/float ostream.write( - ''.join(f"{_id} {data.con[_id]!r}\n" for _id in sorted(data.con)) + ''.join(f"{_id} {data.con[_id]!s}\n" for _id in sorted(data.con)) ) # @@ -1507,7 +1507,7 @@ def write(self, model): ) ostream.write( ''.join( - f'{var_idx} {val!r}{col_comments[var_idx]}\n' + f'{var_idx} {val!s}{col_comments[var_idx]}\n' for var_idx, val in _init_lines ) ) @@ -1548,13 +1548,13 @@ def write(self, model): if lb is None: # unbounded ostream.write(f"3{col_comments[var_idx]}\n") else: # == - ostream.write(f"4 {lb!r}{col_comments[var_idx]}\n") + ostream.write(f"4 {lb!s}{col_comments[var_idx]}\n") elif lb is None: # var <= ub - ostream.write(f"1 {ub!r}{col_comments[var_idx]}\n") + ostream.write(f"1 {ub!s}{col_comments[var_idx]}\n") elif ub is None: # lb <= body - ostream.write(f"2 {lb!r}{col_comments[var_idx]}\n") + ostream.write(f"2 {lb!s}{col_comments[var_idx]}\n") else: # lb <= body <= ub - ostream.write(f"0 {lb!r} {ub!r}{col_comments[var_idx]}\n") + ostream.write(f"0 {lb!s} {ub!s}{col_comments[var_idx]}\n") # # "k" lines (column offsets in Jacobian NNZ) @@ -1589,7 +1589,7 @@ def write(self, model): linear[_id] /= scaling_cache[_id] ostream.write(f'J{row_idx} {len(linear)}{row_comments[row_idx]}\n') for _id in sorted(linear, key=column_order.__getitem__): - ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') + ostream.write(f'{column_order[_id]} {linear[_id]!s}\n') # # "G" lines (non-empty terms in the Objective) @@ -1605,7 +1605,7 @@ def write(self, model): linear[_id] /= scaling_cache[_id] ostream.write(f'G{obj_idx} {len(linear)}{row_comments[obj_idx + n_cons]}\n') for _id in sorted(linear, key=column_order.__getitem__): - ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') + ostream.write(f'{column_order[_id]} {linear[_id]!s}\n') # Generate the return information eliminated_vars = [ @@ -1988,6 +1988,18 @@ def _record_named_expression_usage(self, named_exprs, src, comp_type): elif info[comp_type] != src: info[comp_type] = 0 + def _resolve_subexpression_args(self, nl, args): + final_args = [] + for arg in args: + if arg in self.var_id_to_nl_map: + final_args.append(self.var_id_to_nl_map[arg]) + else: + _nl, _ids, _ = self.subexpression_cache[arg][1].compile_repn( + self.visitor + ) + final_args.append(self._resolve_subexpression_args(_nl, _ids)) + return nl % tuple(final_args) + def _write_nl_expression(self, repn, include_const): # Note that repn.mult should always be 1 (the AMPLRepn was # compiled before this point). Omitting the assertion for @@ -2007,18 +2019,7 @@ def _write_nl_expression(self, repn, include_const): nl % tuple(map(self.var_id_to_nl_map.__getitem__, args)) ) except KeyError: - final_args = [] - for arg in args: - if arg in self.var_id_to_nl_map: - final_args.append(self.var_id_to_nl_map[arg]) - else: - _nl, _ids, _ = self.subexpression_cache[arg][1].compile_repn( - self.visitor - ) - final_args.append( - _nl % tuple(map(self.var_id_to_nl_map.__getitem__, _ids)) - ) - self.ostream.write(nl % tuple(final_args)) + self.ostream.write(self._resolve_subexpression_args(nl, args)) elif include_const: self.ostream.write(self.template.const % repn.const) @@ -2041,7 +2042,7 @@ def _write_v_line(self, expr_id, k): # ostream.write(f'V{self.next_V_line_id} {len(linear)} {k}{lbl}\n') for _id in sorted(linear, key=column_order.__getitem__): - ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') + ostream.write(f'{column_order[_id]} {linear[_id]!s}\n') self._write_nl_expression(info[1], True) self.next_V_line_id += 1 diff --git a/pyomo/repn/quadratic.py b/pyomo/repn/quadratic.py index f6e0a43623d..a9c8b7bf2b5 100644 --- a/pyomo/repn/quadratic.py +++ b/pyomo/repn/quadratic.py @@ -157,17 +157,6 @@ def append(self, other): self.nonlinear += nl -_exit_node_handlers = copy.deepcopy(linear._exit_node_handlers) - -# -# NEGATION -# -_exit_node_handlers[NegationExpression][(_QUADRATIC,)] = linear._handle_negation_ANY - - -# -# PRODUCT -# def _mul_linear_linear(varOrder, linear1, linear2): quadratic = {} for vid1, coef1 in linear1.items(): @@ -275,69 +264,73 @@ def _handle_product_nonlinear(visitor, node, arg1, arg2): return _GENERAL, ans -_exit_node_handlers[ProductExpression].update( - { - None: _handle_product_nonlinear, - (_CONSTANT, _QUADRATIC): linear._handle_product_constant_ANY, - (_QUADRATIC, _CONSTANT): linear._handle_product_ANY_constant, - # Replace handler from the linear walker - (_LINEAR, _LINEAR): _handle_product_linear_linear, - } -) - -# -# DIVISION -# -_exit_node_handlers[DivisionExpression].update( - {(_QUADRATIC, _CONSTANT): linear._handle_division_ANY_constant} -) - - -# -# EXPONENTIATION -# -_exit_node_handlers[PowExpression].update( - {(_QUADRATIC, _CONSTANT): linear._handle_pow_ANY_constant} -) - -# -# ABS and UNARY handlers -# -# (no changes needed) - -# -# NAMED EXPRESSION handlers -# -# (no changes needed) - -# -# EXPR_IF handlers -# -# Note: it is easier to just recreate the entire data structure, rather -# than update it -_exit_node_handlers[Expr_ifExpression].update( - { - (_CONSTANT, i, _QUADRATIC): linear._handle_expr_if_const - for i in (_CONSTANT, _LINEAR, _QUADRATIC, _GENERAL) - } -) -_exit_node_handlers[Expr_ifExpression].update( - { - (_CONSTANT, _QUADRATIC, i): linear._handle_expr_if_const - for i in (_CONSTANT, _LINEAR, _GENERAL) - } -) - -# -# RELATIONAL handlers -# -# (no changes needed) +def define_exit_node_handlers(_exit_node_handlers=None): + if _exit_node_handlers is None: + _exit_node_handlers = {} + linear.define_exit_node_handlers(_exit_node_handlers) + # + # NEGATION + # + _exit_node_handlers[NegationExpression][(_QUADRATIC,)] = linear._handle_negation_ANY + # + # PRODUCT + # + _exit_node_handlers[ProductExpression].update( + { + None: _handle_product_nonlinear, + (_CONSTANT, _QUADRATIC): linear._handle_product_constant_ANY, + (_QUADRATIC, _CONSTANT): linear._handle_product_ANY_constant, + # Replace handler from the linear walker + (_LINEAR, _LINEAR): _handle_product_linear_linear, + } + ) + # + # DIVISION + # + _exit_node_handlers[DivisionExpression].update( + {(_QUADRATIC, _CONSTANT): linear._handle_division_ANY_constant} + ) + # + # EXPONENTIATION + # + _exit_node_handlers[PowExpression].update( + {(_QUADRATIC, _CONSTANT): linear._handle_pow_ANY_constant} + ) + # + # ABS and UNARY handlers + # + # (no changes needed) + # + # NAMED EXPRESSION handlers + # + # (no changes needed) + # + # EXPR_IF handlers + # + # Note: it is easier to just recreate the entire data structure, rather + # than update it + _exit_node_handlers[Expr_ifExpression].update( + { + (_CONSTANT, i, _QUADRATIC): linear._handle_expr_if_const + for i in (_CONSTANT, _LINEAR, _QUADRATIC, _GENERAL) + } + ) + _exit_node_handlers[Expr_ifExpression].update( + { + (_CONSTANT, _QUADRATIC, i): linear._handle_expr_if_const + for i in (_CONSTANT, _LINEAR, _GENERAL) + } + ) + # + # RELATIONAL handlers + # + # (no changes needed) + return _exit_node_handlers class QuadraticRepnVisitor(linear.LinearRepnVisitor): Result = QuadraticRepn - exit_node_handlers = _exit_node_handlers exit_node_dispatcher = linear.ExitNodeDispatcher( - linear._initialize_exit_node_dispatcher(_exit_node_handlers) + linear._initialize_exit_node_dispatcher(define_exit_node_handlers()) ) max_exponential_expansion = 2 diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py index b6bb5f6c074..4d7b5d9ab6c 100644 --- a/pyomo/repn/tests/ampl/test_nlv2.py +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -2703,3 +2703,72 @@ def test_presolve_check_invalid_monomial_constraints(self): r"\(fixed body value 5.0 outside bounds \[10, None\]\)\.", ): nl_writer.NLWriter().write(m, OUT, linear_presolve=True) + + def test_nested_external_expressions(self): + # This tests nested external functions in a single expression + DLL = find_GSL() + if not DLL: + self.skipTest("Could not find the amplgsl.dll library") + + m = ConcreteModel() + m.hypot = ExternalFunction(library=DLL, function="gsl_hypot") + m.p = Param(initialize=1, mutable=True) + m.x = Var(bounds=(None, 3)) + m.y = Var(bounds=(3, None)) + m.z = Var(initialize=1) + m.o = Objective(expr=m.z**2 * m.hypot(m.z, m.hypot(m.x, m.y)) ** 2) + m.c = Constraint(expr=m.x == m.y) + + OUT = io.StringIO() + nl_writer.NLWriter().write( + m, OUT, symbolic_solver_labels=True, linear_presolve=False + ) + self.assertEqual( + *nl_diff( + """g3 1 1 0 #problem unknown + 3 1 1 0 1 #vars, constraints, objectives, ranges, eqns + 0 1 0 0 0 0 #nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 #network constraints: nonlinear, linear + 0 3 0 #nonlinear vars in constraints, objectives, both + 0 1 0 1 #linear network variables; functions; arith, flags + 0 0 0 0 0 #discrete variables: binary, integer, nonlinear (b,c,o) + 2 3 #nonzeros in Jacobian, obj. gradient + 1 1 #max name lengths: constraints, variables + 0 0 0 0 0 #common exprs: b,c,o,c1,o1 +F0 1 -1 gsl_hypot +C0 #c +n0 +O0 0 #o +o2 #* +o5 #^ +v0 #z +n2 +o5 #^ +f0 2 #hypot +v0 #z +f0 2 #hypot +v1 #x +v2 #y +n2 +x1 #initial guess +0 1 #z +r #1 ranges (rhs's) +4 0 #c +b #3 bounds (on variables) +3 #z +1 3 #x +2 3 #y +k2 #intermediate Jacobian column lengths +0 +1 +J0 2 #c +1 1 +2 -1 +G0 3 #o +0 0 +1 0 +2 0 +""", + OUT.getvalue(), + ) + ) diff --git a/pyomo/repn/tests/test_linear.py b/pyomo/repn/tests/test_linear.py index 861fecc7888..d88ddf96baa 100644 --- a/pyomo/repn/tests/test_linear.py +++ b/pyomo/repn/tests/test_linear.py @@ -1659,7 +1659,7 @@ def test_zero_elimination(self): self.assertEqual(repn.multiplier, 1) self.assertEqual(repn.constant, 0) self.assertEqual(repn.linear, {}) - self.assertEqual(repn.nonlinear, None) + self.assertIsNone(repn.nonlinear) m.p = Param(mutable=True, within=Any, initialize=None) e = m.p * m.x[0] + m.p * m.x[1] * m.x[2] + m.p * log(m.x[3]) diff --git a/pyomo/repn/tests/test_parameterized_linear.py b/pyomo/repn/tests/test_parameterized_linear.py new file mode 100644 index 00000000000..624f8390d16 --- /dev/null +++ b/pyomo/repn/tests/test_parameterized_linear.py @@ -0,0 +1,552 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.log import LoggingIntercept +import pyomo.common.unittest as unittest +from pyomo.core.expr.compare import assertExpressionsEqual +from pyomo.environ import Any, Binary, ConcreteModel, log, Param, Var +from pyomo.repn.parameterized_linear import ParameterizedLinearRepnVisitor +from pyomo.repn.tests.test_linear import VisitorConfig +from pyomo.repn.util import InvalidNumber + + +class TestParameterizedLinearRepnVisitor(unittest.TestCase): + def make_model(self): + m = ConcreteModel() + m.x = Var(bounds=(0, 45)) + m.y = Var(domain=Binary) + m.z = Var() + + return m + + def test_walk_sum(self): + m = self.make_model() + e = m.x + m.y + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + self.assertEqual(repn.linear[id(m.x)], 1) + self.assertIs(repn.constant, m.y) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), m.x + m.y) + + def test_walk_triple_sum(self): + m = self.make_model() + e = m.x + m.z * m.y + m.z + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 2) + self.assertIn(id(m.x), repn.linear) + self.assertIn(id(m.y), repn.linear) + self.assertEqual(repn.linear[id(m.x)], 1) + self.assertIs(repn.linear[id(m.y)], m.z) + self.assertIs(repn.constant, m.z) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), m.x + m.z * m.y + m.z) + + def test_sum_two_of_the_same(self): + # This hits the mult == 1 and vid in dest_dict case in _merge_dict + m = self.make_model() + e = m.x + m.x + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + self.assertEqual(repn.linear[id(m.x)], 2) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), 2 * m.x) + + def test_sum_with_mult_0(self): + m = self.make_model() + e = 0 * m.x + m.x - m.y + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + self.assertEqual(repn.linear[id(m.x)], 1) + assertExpressionsEqual(self, repn.constant, -m.y) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), m.x - m.y) + + def test_sum_nonlinear_to_linear(self): + m = self.make_model() + e = m.y * m.x**2 + m.y * m.x - 3 + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + assertExpressionsEqual(self, repn.nonlinear, m.y * m.x**2) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + self.assertIs(repn.linear[id(m.x)], m.y) + self.assertEqual(repn.constant, -3) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual( + self, repn.to_expression(visitor), m.y * m.x**2 + m.y * m.x - 3 + ) + + def test_sum_nonlinear_to_nonlinear(self): + m = self.make_model() + e = m.x**3 + 3 + m.x**2 + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + assertExpressionsEqual(self, repn.nonlinear, m.x**3 + m.x**2) + self.assertEqual(repn.constant, 3) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), m.x**3 + m.x**2 + 3) + + def test_sum_to_linear_expr(self): + m = self.make_model() + e = m.x + m.y * (m.x + 5) + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.x)], 1 + m.y) + assertExpressionsEqual(self, repn.constant, m.y * 5) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual( + self, repn.to_expression(visitor), (1 + m.y) * m.x + m.y * 5 + ) + + def test_bilinear_term(self): + m = self.make_model() + e = m.x * m.y + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + self.assertIs(repn.linear[id(m.x)], m.y) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), m.y * m.x) + + def test_distributed_bilinear_term(self): + m = self.make_model() + e = m.y * (m.x + 7) + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.x), repn.linear) + self.assertIs(repn.linear[id(m.x)], m.y) + assertExpressionsEqual(self, repn.constant, m.y * 7) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), m.y * m.x + m.y * 7) + + def test_monomial(self): + m = self.make_model() + e = 45 * m.y + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.x, m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.y), repn.linear) + self.assertEqual(repn.linear[id(m.y)], 45) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), 45 * m.y) + + def test_constant(self): + m = self.make_model() + e = 45 * m.y + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 0) + assertExpressionsEqual(self, repn.constant, 45 * m.y) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), 45 * m.y) + + def test_fixed_var(self): + m = self.make_model() + m.x.fix(42) + e = (m.y**2) * (m.x + m.x**2) + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + + self.assertIsNone(repn.nonlinear) + self.assertEqual(len(repn.linear), 0) + assertExpressionsEqual(self, repn.constant, (m.y**2) * 1806) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.to_expression(visitor), (m.y**2) * 1806) + + def test_nonlinear(self): + m = self.make_model() + e = (m.y * log(m.x)) * (m.y + 2) / m.x + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]) + + repn = visitor.walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.nonlinear, log(m.x) * (m.y * (m.y + 2)) / m.x) + assertExpressionsEqual( + self, repn.to_expression(visitor), log(m.x) * (m.y * (m.y + 2)) / m.x + ) + + def test_finalize(self): + m = self.make_model() + m.w = Var() + + e = m.x + 2 * m.w**2 * m.y - m.x - m.w * m.z + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.w]).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y, id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1, id(m.z): 2}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear), 2) + self.assertIn(id(m.y), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.y)], 2 * m.w**2) + self.assertIn(id(m.z), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.z)], -m.w) + self.assertEqual(repn.nonlinear, None) + + e *= 5 + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.w]).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y, id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1, id(m.z): 2}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear), 2) + self.assertIn(id(m.y), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.y)], 5 * (2 * m.w**2)) + self.assertIn(id(m.z), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.z)], -5 * m.w) + self.assertEqual(repn.nonlinear, None) + + e = 5 * (m.w * m.y + m.z**2 + 3 * m.w * m.y**3) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.w]).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.y): m.y, id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.y): 0, id(m.z): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.y), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.y)], 5 * m.w) + assertExpressionsEqual(self, repn.nonlinear, (m.z**2 + 3 * m.w * m.y**3) * 5) + + def test_ANY_over_constant_division(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=2, domain=Any) + m.x = Var() + m.z = Var() + m.y = Var() + # We will use the fixed value regardless of the fact that we aren't + # treating this as a Var. + m.y.fix(1) + + expr = m.y + m.x + m.z + ((3 * m.z * m.x) / m.p) / m.y + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]).walk_expression( + expr + ) + + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.constant, 1 + m.z) + self.assertEqual(len(repn.linear), 1) + assertExpressionsEqual(self, repn.linear[id(m.x)], 1 + 1.5 * m.z) + self.assertEqual(repn.nonlinear, None) + + def test_errors_propagate_nan(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=0, domain=Any) + m.x = Var() + m.z = Var() + m.y = Var() + m.y.fix(1) + + expr = m.y + m.x + m.z + ((3 * m.z * m.x) / m.p) / m.y + cfg = VisitorConfig() + with LoggingIntercept() as LOG: + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]).walk_expression( + expr + ) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(3*z, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 3*z*x/p\n", + ) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.constant, 1 + m.z) + self.assertEqual(len(repn.linear), 1) + self.assertIsInstance(repn.linear[id(m.x)], InvalidNumber) + assertExpressionsEqual(self, repn.linear[id(m.x)].value, 1 + float('nan')) + self.assertEqual(repn.nonlinear, None) + + m.y.fix(None) + expr = m.z * log(m.y) + 3 + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]).walk_expression( + expr + ) + self.assertEqual(repn.multiplier, 1) + self.assertIsInstance(repn.constant, InvalidNumber) + assertExpressionsEqual(self, repn.constant.value, float('nan') * m.z + 3) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_negation_constant(self): + m = self.make_model() + e = -(m.y * m.z + 17) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y, m.z]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.constant, -1 * (m.y * m.z + 17)) + self.assertIsNone(repn.nonlinear) + + def test_product_nonlinear(self): + m = self.make_model() + e = (m.x**2) * (log(m.y) * m.z**4) * m.y + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + assertExpressionsEqual( + self, repn.nonlinear, (m.x**2) * (m.z**4 * log(m.y)) * m.y + ) + + def test_division_pseudo_constant_constant(self): + m = self.make_model() + e = m.x / 4 + m.y + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.x]).walk_expression(e) + + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.y), repn.linear) + self.assertEqual(repn.linear[id(m.y)], 1) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.constant, m.x / 4) + self.assertIsNone(repn.nonlinear) + + e = 4 / m.x + m.y + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.x]).walk_expression(e) + + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.y), repn.linear) + self.assertEqual(repn.linear[id(m.y)], 1) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.constant, 4 / m.x) + self.assertIsNone(repn.nonlinear) + + e = m.z / m.x + m.y + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.x, m.z]).walk_expression(e) + + self.assertEqual(len(repn.linear), 1) + self.assertIn(id(m.y), repn.linear) + self.assertEqual(repn.linear[id(m.y)], 1) + self.assertEqual(repn.multiplier, 1) + assertExpressionsEqual(self, repn.constant, m.z / m.x) + self.assertIsNone(repn.nonlinear) + + def test_division_ANY_pseudo_constant(self): + m = self.make_model() + e = (m.x + 3 * m.z) / m.y + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 2) + self.assertIn(id(m.x), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.x)], 1 / m.y) + self.assertIn(id(m.z), repn.linear) + assertExpressionsEqual(self, repn.linear[id(m.z)], (1 / m.y) * 3) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertIsNone(repn.nonlinear) + + def test_duplicate(self): + m = self.make_model() + e = (1 + m.x) ** 2 + m.y + + cfg = VisitorConfig() + visitor = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]) + visitor.max_exponential_expansion = 2 + repn = visitor.walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertIs(repn.constant, m.y) + assertExpressionsEqual(self, repn.nonlinear, (m.x + 1) * (m.x + 1)) + + def test_pow_ANY_pseudo_constant(self): + m = self.make_model() + e = (m.x**2 + 3 * m.z) ** m.y + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + assertExpressionsEqual(self, repn.nonlinear, (m.x**2 + 3 * m.z) ** m.y) + + def test_pow_pseudo_constant_ANY(self): + m = self.make_model() + e = m.y ** (m.x**2 + 3 * m.z) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + assertExpressionsEqual(self, repn.nonlinear, m.y ** (m.x**2 + 3 * m.z)) + + def test_pow_linear_pseudo_constant(self): + m = self.make_model() + e = (m.x + 3 * m.z) ** m.y + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + assertExpressionsEqual(self, repn.nonlinear, (m.x + 3 * m.z) ** m.y) + + def test_pow_pseudo_constant_linear(self): + m = self.make_model() + e = m.y ** (m.x + 3 * m.z) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + assertExpressionsEqual(self, repn.nonlinear, m.y ** (m.x + 3 * m.z)) + + def test_0_mult(self): + m = self.make_model() + m.p = Var() + m.p.fix(0) + e = m.p * (m.y**2 + m.z) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.z]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertIsNone(repn.nonlinear) + self.assertEqual(repn.constant, 0) + + def test_0_mult_nan(self): + m = self.make_model() + m.p = Param(initialize=0, mutable=True) + m.y.domain = Any + m.y.fix(float('nan')) + e = m.p * (m.y**2 + m.x) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.x]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertIsNone(repn.nonlinear) + self.assertIsInstance(repn.constant, InvalidNumber) + assertExpressionsEqual(self, repn.constant.value, 0 * (float('nan') + m.x)) + + def test_0_mult_nan_param(self): + m = self.make_model() + m.p = Param(initialize=0, mutable=True) + m.y.fix(float('nan')) + e = m.p * (m.y**2) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.y]).walk_expression(e) + + self.assertEqual(len(repn.linear), 0) + self.assertEqual(repn.multiplier, 1) + self.assertIsNone(repn.nonlinear) + self.assertIsInstance(repn.constant, InvalidNumber) + assertExpressionsEqual(self, repn.constant.value, 0 * float('nan')) + + def test_0_mult_linear_with_nan(self): + m = self.make_model() + m.p = Param(initialize=0, mutable=True) + m.x.domain = Any + m.x.fix(float('nan')) + e = m.p * (3 * m.x * m.y + m.z) + + cfg = VisitorConfig() + repn = ParameterizedLinearRepnVisitor(*cfg, wrt=[m.x]).walk_expression(e) + + self.assertEqual(len(repn.linear), 2) + self.assertIn(id(m.y), repn.linear) + self.assertIsInstance(repn.linear[id(m.y)], InvalidNumber) + assertExpressionsEqual(self, repn.linear[id(m.y)].value, 0 * 3 * float('nan')) + self.assertIn(id(m.z), repn.linear) + self.assertEqual(repn.linear[id(m.z)], 0) + self.assertEqual(repn.multiplier, 1) + self.assertIsNone(repn.nonlinear) + self.assertEqual(repn.constant, 0) diff --git a/pyomo/repn/util.py b/pyomo/repn/util.py index 8d902d0f99a..32ec99dac0f 100644 --- a/pyomo/repn/util.py +++ b/pyomo/repn/util.py @@ -67,6 +67,7 @@ class ExprType(enum.IntEnum): CONSTANT = 0 + FIXED = 5 MONOMIAL = 10 LINEAR = 20 QUADRATIC = 30 @@ -378,18 +379,16 @@ class ExitNodeDispatcher(collections.defaultdict): `exitNode` callback This dispatcher implements a specialization of :py:`defaultdict` - that supports automatic type registration. Any missing types will - return the :py:meth:`register_dispatcher` method, which (when called - as a callback) will interrogate the type, identify the appropriate - callback, add the callback to the dict, and return the result of - calling the callback. As the callback is added to the dict, no type - will incur the overhead of `register_dispatcher` more than once. + that supports automatic type registration. As the identified + callback is added to the dict, no type will incur the overhead of + `register_dispatcher` more than once. Note that in this case, the client is expected to register all non-NPV expression types. The auto-registration is designed to only handle two cases: - Auto-detection of user-defined Named Expression types - Automatic mappimg of NPV expressions to their equivalent non-NPV handlers + - Automatic registration of derived expression types """ diff --git a/pyomo/solvers/plugins/solvers/ASL.py b/pyomo/solvers/plugins/solvers/ASL.py index bb8174a013e..7acd59936b1 100644 --- a/pyomo/solvers/plugins/solvers/ASL.py +++ b/pyomo/solvers/plugins/solvers/ASL.py @@ -101,7 +101,8 @@ def _get_version(self): timeout=5, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True, + text=True, + errors='ignore', ) ver = _extract_version(results.stdout) if ver is None: diff --git a/pyomo/solvers/plugins/solvers/SAS.py b/pyomo/solvers/plugins/solvers/SAS.py new file mode 100644 index 00000000000..d7b09e29fde --- /dev/null +++ b/pyomo/solvers/plugins/solvers/SAS.py @@ -0,0 +1,811 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging +import sys +from os import stat +from abc import ABC, abstractmethod +from io import StringIO + +from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver +from pyomo.opt.base.solvers import SolverFactory +from pyomo.common.collections import Bunch +from pyomo.common.dependencies import attempt_import +from pyomo.opt.results import ( + SolverResults, + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, +) +from pyomo.common.tempfiles import TempfileManager +from pyomo.core.base import Var +from pyomo.core.base.block import BlockData +from pyomo.core.kernel.block import IBlock +from pyomo.common.log import LogStream +from pyomo.common.tee import capture_output, TeeStream + + +uuid, uuid_available = attempt_import('uuid') +logger = logging.getLogger("pyomo.solvers") + + +STATUS_TO_SOLVERSTATUS = { + "OK": SolverStatus.ok, + "SYNTAX_ERROR": SolverStatus.error, + "DATA_ERROR": SolverStatus.error, + "OUT_OF_MEMORY": SolverStatus.aborted, + "IO_ERROR": SolverStatus.error, + "ERROR": SolverStatus.error, +} + +# This combines all status codes from OPTLP/solvelp and OPTMILP/solvemilp +SOLSTATUS_TO_TERMINATIONCOND = { + "OPTIMAL": TerminationCondition.optimal, + "OPTIMAL_AGAP": TerminationCondition.optimal, + "OPTIMAL_RGAP": TerminationCondition.optimal, + "OPTIMAL_COND": TerminationCondition.optimal, + "TARGET": TerminationCondition.optimal, + "CONDITIONAL_OPTIMAL": TerminationCondition.optimal, + "FEASIBLE": TerminationCondition.feasible, + "INFEASIBLE": TerminationCondition.infeasible, + "UNBOUNDED": TerminationCondition.unbounded, + "INFEASIBLE_OR_UNBOUNDED": TerminationCondition.infeasibleOrUnbounded, + "SOLUTION_LIM": TerminationCondition.maxEvaluations, + "NODE_LIM_SOL": TerminationCondition.maxEvaluations, + "NODE_LIM_NOSOL": TerminationCondition.maxEvaluations, + "ITERATION_LIMIT_REACHED": TerminationCondition.maxIterations, + "TIME_LIM_SOL": TerminationCondition.maxTimeLimit, + "TIME_LIM_NOSOL": TerminationCondition.maxTimeLimit, + "TIME_LIMIT_REACHED": TerminationCondition.maxTimeLimit, + "ABORTED": TerminationCondition.userInterrupt, + "ABORT_SOL": TerminationCondition.userInterrupt, + "ABORT_NOSOL": TerminationCondition.userInterrupt, + "OUTMEM_SOL": TerminationCondition.solverFailure, + "OUTMEM_NOSOL": TerminationCondition.solverFailure, + "FAILED": TerminationCondition.solverFailure, + "FAIL_SOL": TerminationCondition.solverFailure, + "FAIL_NOSOL": TerminationCondition.solverFailure, +} + + +SOLSTATUS_TO_MESSAGE = { + "OPTIMAL": "The solution is optimal.", + "OPTIMAL_AGAP": "The solution is optimal within the absolute gap specified by the ABSOBJGAP= option.", + "OPTIMAL_RGAP": "The solution is optimal within the relative gap specified by the RELOBJGAP= option.", + "OPTIMAL_COND": "The solution is optimal, but some infeasibilities (primal, bound, or integer) exceed tolerances due to scaling or choice of a small INTTOL= value.", + "TARGET": "The solution is not worse than the target specified by the TARGET= option.", + "CONDITIONAL_OPTIMAL": "The solution is optimal, but some infeasibilities (primal, dual or bound) exceed tolerances due to scaling or preprocessing.", + "FEASIBLE": "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", + "INFEASIBLE": "The problem is infeasible.", + "UNBOUNDED": "The problem is unbounded.", + "INFEASIBLE_OR_UNBOUNDED": "The problem is infeasible or unbounded.", + "SOLUTION_LIM": "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", + "NODE_LIM_SOL": "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", + "NODE_LIM_NOSOL": "The solver reached the maximum number of nodes specified by the MAXNODES= option and did not find a solution.", + "ITERATION_LIMIT_REACHED": "The maximum allowable number of iterations was reached.", + "TIME_LIM_SOL": "The solver reached the execution time limit specified by the MAXTIME= option and found a solution.", + "TIME_LIM_NOSOL": "The solver reached the execution time limit specified by the MAXTIME= option and did not find a solution.", + "TIME_LIMIT_REACHED": "The solver reached its execution time limit.", + "ABORTED": "The solver was interrupted externally.", + "ABORT_SOL": "The solver was stopped by the user but still found a solution.", + "ABORT_NOSOL": "The solver was stopped by the user and did not find a solution.", + "OUTMEM_SOL": "The solver ran out of memory but still found a solution.", + "OUTMEM_NOSOL": "The solver ran out of memory and either did not find a solution or failed to output the solution due to insufficient memory.", + "FAILED": "The solver failed to converge, possibly due to numerical issues.", + "FAIL_SOL": "The solver stopped due to errors but still found a solution.", + "FAIL_NOSOL": "The solver stopped due to errors and did not find a solution.", +} + + +@SolverFactory.register("sas", doc="The SAS LP/MIP solver") +class SAS(OptSolver): + """The SAS optimization solver""" + + def __new__(cls, *args, **kwds): + mode = kwds.pop("solver_io", None) + if mode != None: + return SolverFactory(mode, **kwds) + else: + # Choose solver factory automatically + # based on what can be loaded. + s = SolverFactory("_sas94", **kwds) + if not s.available(): + s = SolverFactory("_sascas", **kwds) + return s + + +class SASAbc(ABC, OptSolver): + """Abstract base class for the SAS solver interfaces. Simply to avoid code duplication.""" + + def __init__(self, **kwds): + """Initialize the SAS solver interfaces.""" + kwds["type"] = "sas" + super(SASAbc, self).__init__(**kwds) + + # + # Set up valid problem formats and valid results for each + # problem format + # + self._valid_problem_formats = [ProblemFormat.mps] + self._valid_result_formats = {ProblemFormat.mps: [ResultsFormat.soln]} + + self._keepfiles = False + self._capabilities.linear = True + self._capabilities.integer = True + + super(SASAbc, self).set_problem_format(ProblemFormat.mps) + + def _presolve(self, *args, **kwds): + """Set things up for the actual solve.""" + # create a context in the temporary file manager for + # this plugin - is "pop"ed in the _postsolve method. + TempfileManager.push() + + # Get the warmstart flag + self.warmstart_flag = kwds.pop("warmstart", False) + + # Call parent presolve function + super(SASAbc, self)._presolve(*args, **kwds) + + # Store the model, too bad this is not done in the base class + for arg in args: + if isinstance(arg, (BlockData, IBlock)): + # Store the instance + self._instance = arg + self._vars = [] + for block in self._instance.block_data_objects(active=True): + for vardata in block.component_data_objects( + Var, active=True, descend_into=False + ): + self._vars.append(vardata) + # Store the symbol map, we need this for example when writing the warmstart file + if isinstance(self._instance, IBlock): + self._smap = getattr(self._instance, "._symbol_maps")[self._smap_id] + else: + self._smap = self._instance.solutions.symbol_map[self._smap_id] + + # Create the primalin data + if self.warmstart_flag: + filename = self._warm_start_file_name = TempfileManager.create_tempfile( + ".sol", text=True + ) + smap = self._smap + numWritten = 0 + with open(filename, "w") as file: + file.write("_VAR_,_VALUE_\n") + for var in self._vars: + if (var.value is not None) and (id(var) in smap.byObject): + name = smap.byObject[id(var)] + file.write( + "{name},{value}\n".format(name=name, value=var.value) + ) + numWritten += 1 + if numWritten == 0: + # No solution available, disable warmstart + self.warmstart_flag = False + + def available(self, exception_flag=False): + """True if the solver is available""" + return self._python_api_exists + + def _has_integer_variables(self): + """True if the problem has integer variables.""" + for vardata in self._vars: + if vardata.is_binary() or vardata.is_integer(): + return True + return False + + def _create_results_from_status(self, status, solution_status): + """Create a results object and set the status code and messages.""" + results = SolverResults() + results.solver.name = "SAS" + results.solver.status = STATUS_TO_SOLVERSTATUS[status] + results.solver.hasSolution = False + if results.solver.status == SolverStatus.ok: + results.solver.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + solution_status + ] + results.solver.message = results.solver.termination_message = ( + SOLSTATUS_TO_MESSAGE[solution_status] + ) + results.solver.status = TerminationCondition.to_solver_status( + results.solver.termination_condition + ) + if "OPTIMAL" in solution_status or "_SOL" in solution_status: + results.solver.hasSolution = True + elif results.solver.status == SolverStatus.aborted: + results.solver.termination_condition = TerminationCondition.userInterrupt + if solution_status != "ERROR": + results.solver.message = results.solver.termination_message = ( + SOLSTATUS_TO_MESSAGE[solution_status] + ) + else: + results.solver.termination_condition = TerminationCondition.error + results.solver.message = results.solver.termination_message = ( + SOLSTATUS_TO_MESSAGE["FAILED"] + ) + return results + + @abstractmethod + def _apply_solver(self): + pass + + def _postsolve(self): + """Clean up at the end, especially the temp files.""" + # Let the base class deal with returning results. + results = super(SASAbc, self)._postsolve() + + # Finally, clean any temporary files registered with the temp file + # manager, created populated *directly* by this plugin. does not + # include, for example, the execution script. but does include + # the warm-start file. + TempfileManager.pop(remove=not self._keepfiles) + + return results + + def warm_start_capable(self): + """True if the solver interface supports MILP warmstarting.""" + return True + + +@SolverFactory.register("_sas94", doc="SAS 9.4 interface") +class SAS94(SASAbc): + """ + Solver interface for SAS 9.4 using saspy. See the saspy documentation about + how to create a connection. + The swat connection options can be specified on the SolverFactory call. + """ + + def __init__(self, **kwds): + """Initialize the solver interface and see if the saspy package is available.""" + super(SAS94, self).__init__(**kwds) + + try: + import saspy + + self._sas = saspy + except ImportError: + self._python_api_exists = False + except Exception as e: + self._python_api_exists = False + # For other exceptions, raise it so that it does not get lost + raise e + else: + self._python_api_exists = True + self._sas.logger.setLevel(logger.level) + + # Store other options for the SAS session + self._session_options = kwds + + # Create the session + try: + self._sas_session = self._sas.SASsession(**self._session_options) + except: + self._sas_session = None + + def __del__(self): + # Close the session, if we created one + if self._sas_session: + self._sas_session.endsas() + del self._sas_session + + def _create_statement_str(self, statement): + """Helper function to create the strings for the statements of the proc OPTLP/OPTMILP code.""" + stmt = self.options.pop(statement, None) + if stmt: + return ( + statement.strip() + + " " + + " ".join(option + "=" + str(value) for option, value in stmt.items()) + + ";" + ) + else: + return "" + + def sas_version(self): + return self._sasver + + def _apply_solver(self): + """ "Prepare the options and run the solver. Then store the data to be returned.""" + logger.debug("Running SAS") + + # Set return code to issue an error if we get interrupted + self._rc = -1 + + # Figure out if the problem has integer variables + with_opt = self.options.pop("with", None) + if with_opt == "lp": + proc = "OPTLP" + elif with_opt == "milp": + proc = "OPTMILP" + else: + # Check if there are integer variables, this might be slow + proc = "OPTMILP" if self._has_integer_variables() else "OPTLP" + + # Get the rootnode options + decomp_str = self._create_statement_str("decomp") + decompmaster_str = self._create_statement_str("decompmaster") + decompmasterip_str = self._create_statement_str("decompmasterip") + decompsubprob_str = self._create_statement_str("decompsubprob") + rootnode_str = self._create_statement_str("rootnode") + + # Get a unique identifier, always use the same with different prefixes + unique = uuid.uuid4().hex[:16] + + # Create unique filename for output datasets + primalout_dataset_name = "pout" + unique + dualout_dataset_name = "dout" + unique + primalin_dataset_name = None + + # Handle warmstart + warmstart_str = "" + if self.warmstart_flag: + # Set the warmstart basis option + primalin_dataset_name = "pin" + unique + if proc != "OPTLP": + warmstart_str = """ + proc import datafile='{primalin}' + out={primalin_dataset_name} + dbms=csv + replace; + getnames=yes; + run; + """.format( + primalin=self._warm_start_file_name, + primalin_dataset_name=primalin_dataset_name, + ) + self.options["primalin"] = primalin_dataset_name + + # Convert options to string + opt_str = " ".join( + option + "=" + str(value) for option, value in self.options.items() + ) + + # Set some SAS options to make the log more clean + sas_options = "option notes nonumber nodate nosource pagesize=max;" + + # Get the current SAS session, submit the code and return the results + if not self._sas_session: + sas = self._sas_session = self._sas.SASsession(**self._session_options) + else: + sas = self._sas_session + + # Find the version of 9.4 we are using + self._sasver = sas.sasver + + # Upload files, only if not accessible locally + upload_mps = False + if not sas.file_info(self._problem_files[0], quiet=True): + sas.upload(self._problem_files[0], self._problem_files[0], overwrite=True) + upload_mps = True + + upload_pin = False + if self.warmstart_flag and not sas.file_info( + self._warm_start_file_name, quiet=True + ): + sas.upload( + self._warm_start_file_name, self._warm_start_file_name, overwrite=True + ) + upload_pin = True + + # Using a function call to make it easier to mock the version check + major_version = self.sas_version()[0] + minor_version = self.sas_version().split("M", 1)[1][0] + if major_version == "9" and int(minor_version) < 5: + raise NotImplementedError( + "Support for SAS 9.4 M4 and earlier is not implemented." + ) + elif major_version == "9" and int(minor_version) == 5: + # In 9.4M5 we have to create an MPS data set from an MPS file first + # Earlier versions will not work because the MPS format in incompatible + mps_dataset_name = "mps" + unique + res = sas.submit( + """ + {sas_options} + {warmstart} + %MPS2SASD(MPSFILE="{mpsfile}", OUTDATA={mps_dataset_name}, MAXLEN=256, FORMAT=FREE); + proc {proc} data={mps_dataset_name} {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + sas_options=sas_options, + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + mps_dataset_name=mps_dataset_name, + options=opt_str, + primalout_dataset_name=primalout_dataset_name, + dualout_dataset_name=dualout_dataset_name, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + sas.sasdata(mps_dataset_name).delete(quiet=True) + else: + # Since 9.4M6+ optlp/optmilp can read mps files directly (this includes Viya-based local installs) + res = sas.submit( + """ + {sas_options} + {warmstart} + proc {proc} mpsfile=\"{mpsfile}\" {options} primalout={primalout_dataset_name} dualout={dualout_dataset_name}; + {decomp} + {decompmaster} + {decompmasterip} + {decompsubprob} + {rootnode} + run; + """.format( + sas_options=sas_options, + warmstart=warmstart_str, + proc=proc, + mpsfile=self._problem_files[0], + options=opt_str, + primalout_dataset_name=primalout_dataset_name, + dualout_dataset_name=dualout_dataset_name, + decomp=decomp_str, + decompmaster=decompmaster_str, + decompmasterip=decompmasterip_str, + decompsubprob=decompsubprob_str, + rootnode=rootnode_str, + ), + results="TEXT", + ) + + # Delete uploaded file + if upload_mps: + sas.file_delete(self._problem_files[0], quiet=True) + if self.warmstart_flag and upload_pin: + sas.file_delete(self._warm_start_file_name, quiet=True) + + # Store log and ODS output + self._log = res["LOG"] + self._lst = res["LST"] + if "ERROR 22-322: Syntax error" in self._log: + raise ValueError( + "An option passed to the SAS solver caused a syntax error: {log}".format( + log=self._log + ) + ) + else: + # Print log if requested by the user, only if we did not already print it + if self._tee: + print(self._log) + self._macro = dict( + (key.strip(), value.strip()) + for key, value in ( + pair.split("=") for pair in sas.symget("_OR" + proc + "_").split() + ) + ) + if self._macro.get("STATUS", "ERROR") == "OK": + primal_out = sas.sd2df(primalout_dataset_name) + dual_out = sas.sd2df(dualout_dataset_name) + + # Delete data sets, they will go away automatically, but does not hurt to delete them + if primalin_dataset_name: + sas.sasdata(primalin_dataset_name).delete(quiet=True) + sas.sasdata(primalout_dataset_name).delete(quiet=True) + sas.sasdata(dualout_dataset_name).delete(quiet=True) + + # Prepare the solver results + results = self.results = self._create_results_from_status( + self._macro.get("STATUS", "ERROR"), + self._macro.get("SOLUTION_STATUS", "ERROR"), + ) + + if "Objective Sense Maximization" in self._lst: + results.problem.sense = ProblemSense.maximize + else: + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if results.solver.hasSolution: + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + self._macro.get("SOLUTION_STATUS", "ERROR") + ] + + # Store objective value in solution + sol.objective["__default_objective__"] = {"Value": self._macro["OBJECTIVE"]} + + if proc == "OPTLP": + # Convert primal out data set to variable dictionary + # Use pandas functions for efficiency + primal_out = primal_out[["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"]] + primal_out = primal_out.set_index("_VAR_", drop=True) + primal_out = primal_out.rename( + {"_VALUE_": "Value", "_STATUS_": "Status", "_R_COST_": "rc"}, + axis="columns", + ) + sol.variable = primal_out.to_dict("index") + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = dual_out[["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"]] + dual_out = dual_out.set_index("_ROW_", drop=True) + dual_out = dual_out.rename( + {"_VALUE_": "dual", "_STATUS_": "Status", "_ACTIVITY_": "slack"}, + axis="columns", + ) + sol.constraint = dual_out.to_dict("index") + else: + # Convert primal out data set to variable dictionary + # Use pandas functions for efficiency + primal_out = primal_out[["_VAR_", "_VALUE_"]] + primal_out = primal_out.set_index("_VAR_", drop=True) + primal_out = primal_out.rename({"_VALUE_": "Value"}, axis="columns") + sol.variable = primal_out.to_dict("index") + + self._rc = 0 + return Bunch(rc=self._rc, log=self._log) + + +@SolverFactory.register("_sascas", doc="SAS Viya CAS Server interface") +class SASCAS(SASAbc): + """ + Solver interface connection to a SAS Viya CAS server using swat. + See the documentation for the swat package about how to create a connection. + The swat connection options can be specified on the SolverFactory call. + """ + + def __init__(self, **kwds): + """Initialize and try to load the swat package.""" + super(SASCAS, self).__init__(**kwds) + + try: + import swat + + self._sas = swat + except ImportError: + self._python_api_exists = False + except Exception as e: + self._python_api_exists = False + # For other exceptions, raise it so that it does not get lost + raise e + else: + self._python_api_exists = True + + self._session_options = kwds + + # Create the session + try: + self._sas_session = self._sas.CAS(**self._session_options) + except: + self._sas_session = None + + def __del__(self): + # Close the session, if we created one + if self._sas_session: + self._sas_session.close() + del self._sas_session + + def _uploadMpsFile(self, s, unique): + # Declare a unique table name for the mps table + mpsdata_table_name = "mps" + unique + + # Upload mps file to CAS, if the file is larger than 2 GB, we need to use convertMps instead of loadMps + # Note that technically it is 2 Gibibytes file size that trigger the issue, but 2 GB is the safer threshold + if stat(self._problem_files[0]).st_size > 2e9: + # For files larger than 2 GB (this is a limitation of the loadMps action used in the else part). + # Use convertMPS, first create file for upload. + mpsWithIdFileName = TempfileManager.create_tempfile(".mps.csv", text=True) + with open(mpsWithIdFileName, "w") as mpsWithId: + mpsWithId.write("_ID_\tText\n") + with open(self._problem_files[0], "r") as f: + id = 0 + for line in f: + id += 1 + mpsWithId.write(str(id) + "\t" + line.rstrip() + "\n") + + # Upload .mps.csv file + mpscsv_table_name = "csv" + unique + s.upload_file( + mpsWithIdFileName, + casout={"name": mpscsv_table_name, "replace": True}, + importoptions={"filetype": "CSV", "delimiter": "\t"}, + ) + + # Convert .mps.csv file to .mps + s.optimization.convertMps( + data=mpscsv_table_name, + casOut={"name": mpsdata_table_name, "replace": True}, + format="FREE", + maxLength=256, + ) + + # Delete the table we don't need anymore + if mpscsv_table_name: + s.dropTable(name=mpscsv_table_name, quiet=True) + else: + # For small files (less than 2 GB), use loadMps + with open(self._problem_files[0], "r") as mps_file: + s.optimization.loadMps( + mpsFileString=mps_file.read(), + casout={"name": mpsdata_table_name, "replace": True}, + format="FREE", + maxLength=256, + ) + return mpsdata_table_name + + def _uploadPrimalin(self, s, unique): + # Upload warmstart file to CAS with a unique name + primalin_table_name = "pin" + unique + s.upload_file( + self._warm_start_file_name, + casout={"name": primalin_table_name, "replace": True}, + importoptions={"filetype": "CSV"}, + ) + self.options["primalin"] = primalin_table_name + return primalin_table_name + + def _retrieveSolution( + self, s, r, results, action, primalout_table_name, dualout_table_name + ): + # Create solution + sol = results.solution.add() + + # Store status in solution + sol.status = SolutionStatus.feasible + sol.termination_condition = SOLSTATUS_TO_TERMINATIONCOND[ + r.get("solutionStatus", "ERROR") + ] + + # Store objective value in solution + sol.objective["__default_objective__"] = {"Value": r["objective"]} + + if action == "solveMilp": + primal_out = s.CASTable(name=primalout_table_name) + # Use pandas functions for efficiency + primal_out = primal_out[["_VAR_", "_VALUE_"]] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {"Value": row[1]} + else: + # Convert primal out data set to variable dictionary + # Use panda functions for efficiency + primal_out = s.CASTable(name=primalout_table_name) + primal_out = primal_out[["_VAR_", "_VALUE_", "_STATUS_", "_R_COST_"]] + sol.variable = {} + for row in primal_out.itertuples(index=False): + sol.variable[row[0]] = {"Value": row[1], "Status": row[2], "rc": row[3]} + + # Convert dual out data set to constraint dictionary + # Use pandas functions for efficiency + dual_out = s.CASTable(name=dualout_table_name) + dual_out = dual_out[["_ROW_", "_VALUE_", "_STATUS_", "_ACTIVITY_"]] + sol.constraint = {} + for row in dual_out.itertuples(index=False): + sol.constraint[row[0]] = { + "dual": row[1], + "Status": row[2], + "slack": row[3], + } + + def _apply_solver(self): + """ "Prepare the options and run the solver. Then store the data to be returned.""" + logger.debug("Running SAS Viya") + + # Set return code to issue an error if we get interrupted + self._rc = -1 + + # Figure out if the problem has integer variables + with_opt = self.options.pop("with", None) + if with_opt == "lp": + action = "solveLp" + elif with_opt == "milp": + action = "solveMilp" + else: + # Check if there are integer variables, this might be slow + action = "solveMilp" if self._has_integer_variables() else "solveLp" + + # Get a unique identifier, always use the same with different prefixes + unique = uuid.uuid4().hex[:16] + + # Creat the output stream, we want to print to a log string as well as to the console + self._log = StringIO() + ostreams = [LogStream(level=logging.INFO, logger=logger)] + ostreams.append(self._log) + if self._tee: + ostreams.append(sys.stdout) + + # Connect to CAS server + with TeeStream(*ostreams) as t: + with capture_output(output=t.STDOUT, capture_fd=False): + s = self._sas_session + if s == None: + s = self._sas_session = self._sas.CAS(**self._session_options) + try: + # Load the optimization action set + s.loadactionset("optimization") + + mpsdata_table_name = self._uploadMpsFile(s, unique) + + primalin_table_name = None + if self.warmstart_flag: + primalin_table_name = self._uploadPrimalin(s, unique) + + # Define output table names + primalout_table_name = "pout" + unique + dualout_table_name = None + + # Solve the problem in CAS + if action == "solveMilp": + r = s.optimization.solveMilp( + data={"name": mpsdata_table_name}, + primalOut={"name": primalout_table_name, "replace": True}, + **self.options + ) + else: + dualout_table_name = "dout" + unique + r = s.optimization.solveLp( + data={"name": mpsdata_table_name}, + primalOut={"name": primalout_table_name, "replace": True}, + dualOut={"name": dualout_table_name, "replace": True}, + **self.options + ) + + # Prepare the solver results + if r: + # Get back the primal and dual solution data sets + results = self.results = self._create_results_from_status( + r.get("status", "ERROR"), r.get("solutionStatus", "ERROR") + ) + + if results.solver.status != SolverStatus.error: + if r.ProblemSummary["cValue1"][1] == "Maximization": + results.problem.sense = ProblemSense.maximize + else: + results.problem.sense = ProblemSense.minimize + + # Prepare the solution information + if results.solver.hasSolution: + self._retrieveSolution( + s, + r, + results, + action, + primalout_table_name, + dualout_table_name, + ) + else: + raise ValueError("The SAS solver returned an error status.") + else: + results = self.results = SolverResults() + results.solver.name = "SAS" + results.solver.status = SolverStatus.error + raise ValueError( + "An option passed to the SAS solver caused a syntax error." + ) + + finally: + if mpsdata_table_name: + s.dropTable(name=mpsdata_table_name, quiet=True) + if primalin_table_name: + s.dropTable(name=primalin_table_name, quiet=True) + if primalout_table_name: + s.dropTable(name=primalout_table_name, quiet=True) + if dualout_table_name: + s.dropTable(name=dualout_table_name, quiet=True) + + self._log = self._log.getvalue() + self._rc = 0 + return Bunch(rc=self._rc, log=self._log) diff --git a/pyomo/solvers/plugins/solvers/__init__.py b/pyomo/solvers/plugins/solvers/__init__.py index 9b2507d876c..a3918dce5cc 100644 --- a/pyomo/solvers/plugins/solvers/__init__.py +++ b/pyomo/solvers/plugins/solvers/__init__.py @@ -30,3 +30,4 @@ import pyomo.solvers.plugins.solvers.mosek_persistent import pyomo.solvers.plugins.solvers.xpress_direct import pyomo.solvers.plugins.solvers.xpress_persistent +import pyomo.solvers.plugins.solvers.SAS diff --git a/pyomo/solvers/plugins/solvers/persistent_solver.py b/pyomo/solvers/plugins/solvers/persistent_solver.py index 3c2a9e52eab..ef883fe5496 100644 --- a/pyomo/solvers/plugins/solvers/persistent_solver.py +++ b/pyomo/solvers/plugins/solvers/persistent_solver.py @@ -262,7 +262,9 @@ def _add_and_collect_column_data(self, var, obj_coef, constraints, coefficients) coeff_list = list() constr_list = list() for val, c in zip(coefficients, constraints): - c._body += val * var + lb, body, ub = c.to_bounded_expression() + body += val * var + c.set_value((lb, body, ub)) self._vars_referenced_by_con[c].add(var) cval = _convert_to_const(val) diff --git a/pyomo/solvers/plugins/solvers/xpress_direct.py b/pyomo/solvers/plugins/solvers/xpress_direct.py index c62f76d85ce..33a3c8d0282 100644 --- a/pyomo/solvers/plugins/solvers/xpress_direct.py +++ b/pyomo/solvers/plugins/solvers/xpress_direct.py @@ -1036,10 +1036,8 @@ def _load_slacks(self, cons_to_load=None): if xpress_con in self._range_constraints: ## for xpress, the slack on a range constraint ## is based on the upper bound - ## FIXME: This looks like a bug - there is no variable named - ## `con` - there is, however, `xpress_con` and `pyomo_con` - lb = con.lb - ub = con.ub + lb = xpress_con.lb + ub = xpress_con.ub ub_s = val expr_val = ub - ub_s lb_s = lb - expr_val diff --git a/pyomo/solvers/tests/checks/test_SAS.py b/pyomo/solvers/tests/checks/test_SAS.py new file mode 100644 index 00000000000..6dd662bdb21 --- /dev/null +++ b/pyomo/solvers/tests/checks/test_SAS.py @@ -0,0 +1,543 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2024 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import os +import pyomo.common.unittest as unittest +from unittest import mock +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + NonNegativeIntegers, + NonNegativeReals, + Reals, + Integers, + maximize, + minimize, + Suffix, +) +from pyomo.opt.results import SolverStatus, TerminationCondition, ProblemSense +from pyomo.opt import SolverFactory, check_available_solvers +import warnings + +CFGFILE = os.environ.get("SAS_CFG_FILE_PATH", None) + +CAS_OPTIONS = { + "hostname": os.environ.get("CASHOST", None), + "port": os.environ.get("CASPORT", None), + "authinfo": os.environ.get("CASAUTHINFO", None), +} + + +sas_available = check_available_solvers("sas") + + +class SASTestAbc: + solver_io = "_sas94" + session_options = {} + cfgfile = CFGFILE + + @classmethod + def setUpClass(cls): + cls.opt_sas = SolverFactory( + "sas", solver_io=cls.solver_io, cfgfile=cls.cfgfile, **cls.session_options + ) + + @classmethod + def tearDownClass(cls): + del cls.opt_sas + + def setObj(self): + X = self.instance.X + self.instance.Obj = Objective( + expr=2 * X[1] - 3 * X[2] - 4 * X[3], sense=minimize + ) + + def setX(self): + self.instance.X = Var([1, 2, 3], within=NonNegativeReals) + + def setUp(self): + # Disable resource warnings + warnings.filterwarnings("ignore", category=ResourceWarning) + instance = self.instance = ConcreteModel() + self.setX() + X = instance.X + instance.R1 = Constraint(expr=-2 * X[2] - 3 * X[3] >= -5) + instance.R2 = Constraint(expr=X[1] + X[2] + 2 * X[3] <= 4) + instance.R3 = Constraint(expr=X[1] + 2 * X[2] + 3 * X[3] <= 7) + self.setObj() + + # Declare suffixes for solution information + instance.status = Suffix(direction=Suffix.IMPORT) + instance.slack = Suffix(direction=Suffix.IMPORT) + instance.rc = Suffix(direction=Suffix.IMPORT) + instance.dual = Suffix(direction=Suffix.IMPORT) + + def tearDown(self): + del self.instance + + def run_solver(self, **kwargs): + opt_sas = self.opt_sas + instance = self.instance + + # Call the solver + self.results = opt_sas.solve(instance, **kwargs) + + +class SASTestLP(SASTestAbc): + def checkSolution(self): + instance = self.instance + results = self.results + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7.5) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 2.5) + self.assertAlmostEqual(instance.X[3].value, 0.0) + + # Check reduced cost + self.assertAlmostEqual(instance.rc[instance.X[1]], sense * 2.0) + self.assertAlmostEqual(instance.rc[instance.X[2]], sense * 0.0) + self.assertAlmostEqual(instance.rc[instance.X[3]], sense * 0.5) + + # Check slack + self.assertAlmostEqual(instance.slack[instance.R1], -5.0) + self.assertAlmostEqual(instance.slack[instance.R2], 2.5) + self.assertAlmostEqual(instance.slack[instance.R3], 5.0) + + # Check dual solution + self.assertAlmostEqual(instance.dual[instance.R1], sense * 1.5) + self.assertAlmostEqual(instance.dual[instance.R2], sense * 0.0) + self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) + + # Check basis status + self.assertEqual(instance.status[instance.X[1]], "L") + self.assertEqual(instance.status[instance.X[2]], "B") + self.assertEqual(instance.status[instance.X[3]], "L") + self.assertEqual(instance.status[instance.R1], "U") + self.assertEqual(instance.status[instance.R2], "B") + self.assertEqual(instance.status[instance.R3], "B") + + def test_solver_default(self): + self.run_solver() + self.checkSolution() + + def test_solver_tee(self): + self.run_solver(tee=True) + self.checkSolution() + + def test_solver_primal(self): + self.run_solver(options={"algorithm": "ps"}) + self.assertIn("NOTE: The Primal Simplex algorithm is used.", self.opt_sas._log) + self.checkSolution() + + def test_solver_ipm(self): + self.run_solver(options={"algorithm": "ip"}) + self.assertIn("NOTE: The Interior Point algorithm is used.", self.opt_sas._log) + self.checkSolution() + + def test_solver_intoption(self): + self.run_solver(options={"maxiter": 20}) + self.checkSolution() + + def test_solver_invalidoption(self): + with self.assertRaisesRegex(ValueError, "syntax error"): + self.run_solver(options={"foo": "bar"}) + + def test_solver_max(self): + X = self.instance.X + self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) + self.instance.Obj.sense = maximize + self.run_solver() + self.checkSolution() + self.assertEqual(self.results.problem.sense, ProblemSense.maximize) + + def test_solver_infeasible(self): + instance = self.instance + X = instance.X + instance.R4 = Constraint(expr=-2 * X[2] - 3 * X[3] <= -6) + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.solver.message, "The problem is infeasible.") + + def test_solver_infeasible_or_unbounded(self): + self.instance.X.domain = Reals + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertIn( + results.solver.termination_condition, + [ + TerminationCondition.infeasibleOrUnbounded, + TerminationCondition.unbounded, + ], + ) + self.assertIn( + results.solver.message, + ["The problem is infeasible or unbounded.", "The problem is unbounded."], + ) + + def test_solver_unbounded(self): + self.instance.X.domain = Reals + self.run_solver(options={"presolver": "none", "algorithm": "primal"}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.solver.message, "The problem is unbounded.") + + def checkSolutionDecomp(self): + instance = self.instance + results = self.results + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7.5) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 2.5) + self.assertAlmostEqual(instance.X[3].value, 0.0) + + # Check reduced cost + self.assertAlmostEqual(instance.rc[instance.X[1]], sense * 2.0) + self.assertAlmostEqual(instance.rc[instance.X[2]], sense * 0.0) + self.assertAlmostEqual(instance.rc[instance.X[3]], sense * 0.5) + + # Check slack + self.assertAlmostEqual(instance.slack[instance.R1], -5.0) + self.assertAlmostEqual(instance.slack[instance.R2], 2.5) + self.assertAlmostEqual(instance.slack[instance.R3], 5.0) + + # Check dual solution + self.assertAlmostEqual(instance.dual[instance.R1], sense * 1.5) + self.assertAlmostEqual(instance.dual[instance.R2], sense * 0.0) + self.assertAlmostEqual(instance.dual[instance.R3], sense * 0.0) + + # Don't check basis status for decomp + + def test_solver_decomp(self): + self.run_solver( + options={ + "decomp": {"absobjgap": 0.0}, + "decompmaster": {"algorithm": "dual"}, + "decompsubprob": {"presolver": "none"}, + } + ) + self.assertIn( + "NOTE: The DECOMP method value DEFAULT is applied.", self.opt_sas._log + ) + self.checkSolutionDecomp() + + def test_solver_iis(self): + self.run_solver(options={"iis": "true"}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.feasible + ) + self.assertIn("NOTE: The IIS= option is enabled.", self.opt_sas._log) + self.assertEqual( + results.solver.message, + "The problem is feasible. This status is displayed when the IIS=TRUE option is specified and the problem is feasible.", + ) + + def test_solver_maxiter(self): + self.run_solver(options={"maxiter": 1}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) + self.assertEqual( + results.solver.message, + "The maximum allowable number of iterations was reached.", + ) + + def test_solver_with_milp(self): + self.run_solver(options={"with": "milp"}) + self.assertIn( + "WARNING: The problem has no integer variables.", self.opt_sas._log + ) + + +@unittest.skipIf(not sas_available, "The SAS solver is not available") +class SASTestLP94(SASTestLP, unittest.TestCase): + @mock.patch( + "pyomo.solvers.plugins.solvers.SAS.SAS94.sas_version", + return_value="9.sd45s39M4234232", + ) + def test_solver_versionM4(self, sas): + with self.assertRaises(NotImplementedError): + self.run_solver() + + @mock.patch( + "pyomo.solvers.plugins.solvers.SAS.SAS94.sas_version", + return_value="9.34897293M5324u98", + ) + def test_solver_versionM5(self, sas): + self.run_solver() + self.checkSolution() + + @mock.patch("saspy.SASsession.submit", return_value={"LOG": "", "LST": ""}) + @mock.patch("saspy.SASsession.symget", return_value="STATUS=OUT_OF_MEMORY") + def test_solver_out_of_memory(self, submit_mock, symget_mocks): + self.run_solver(load_solutions=False) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.aborted) + + @mock.patch("saspy.SASsession.submit", return_value={"LOG": "", "LST": ""}) + @mock.patch("saspy.SASsession.symget", return_value="STATUS=ERROR") + def test_solver_error(self, submit_mock, symget_mock): + self.run_solver(load_solutions=False) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.error) + + +# @unittest.skipIf(not sas_available, "The SAS solver is not available") +@unittest.skip("Tests not yet configured for SAS Viya interface.") +class SASTestLPCAS(SASTestLP, unittest.TestCase): + solver_io = "_sascas" + session_options = CAS_OPTIONS + + @mock.patch("pyomo.solvers.plugins.solvers.SAS.stat") + def test_solver_large_file(self, os_stat): + os_stat.return_value.st_size = 3 * 1024**3 + self.run_solver() + self.checkSolution() + + +class SASTestMILP(SASTestAbc): + def setX(self): + self.instance.X = Var([1, 2, 3], within=NonNegativeIntegers) + + def checkSolution(self): + instance = self.instance + results = self.results + + # Get the objective sense, we use the same code for minimization and maximization tests + sense = instance.Obj.sense + + # Check status + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + # Check objective value + self.assertAlmostEqual(instance.Obj(), sense * -7) + + # Check primal solution values + self.assertAlmostEqual(instance.X[1].value, 0.0) + self.assertAlmostEqual(instance.X[2].value, 1.0) + self.assertAlmostEqual(instance.X[3].value, 1.0) + + def test_solver_default(self): + self.run_solver() + self.checkSolution() + + def test_solver_tee(self): + self.run_solver(tee=True) + self.checkSolution() + + def test_solver_presolve(self): + self.run_solver(options={"presolver": "none"}) + self.assertIn( + "NOTE: The MILP presolver value NONE is applied.", self.opt_sas._log + ) + self.checkSolution() + + def test_solver_intoption(self): + self.run_solver(options={"maxnodes": 20}) + self.checkSolution() + + def test_solver_invalidoption(self): + with self.assertRaisesRegex(ValueError, "syntax error"): + self.run_solver(options={"foo": "bar"}) + + def test_solver_max(self): + X = self.instance.X + self.instance.Obj.set_value(expr=-2 * X[1] + 3 * X[2] + 4 * X[3]) + self.instance.Obj.sense = maximize + self.run_solver() + self.checkSolution() + + def test_solver_infeasible(self): + instance = self.instance + X = instance.X + instance.R4 = Constraint(expr=-2 * X[2] - 3 * X[3] <= -6) + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.solver.message, "The problem is infeasible.") + + def test_solver_infeasible_or_unbounded(self): + self.instance.X.domain = Integers + self.run_solver() + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertIn( + results.solver.termination_condition, + [ + TerminationCondition.infeasibleOrUnbounded, + TerminationCondition.unbounded, + ], + ) + self.assertIn( + results.solver.message, + ["The problem is infeasible or unbounded.", "The problem is unbounded."], + ) + + def test_solver_unbounded(self): + self.instance.X.domain = Integers + self.run_solver( + options={"presolver": "none", "rootnode": {"algorithm": "primal"}} + ) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.warning) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.solver.message, "The problem is unbounded.") + + def test_solver_decomp(self): + self.run_solver( + options={ + "decomp": {"hybrid": "off"}, + "decompmaster": {"algorithm": "dual"}, + "decompmasterip": {"presolver": "none"}, + "decompsubprob": {"presolver": "none"}, + } + ) + self.assertIn( + "NOTE: The DECOMP method value DEFAULT is applied.", self.opt_sas._log + ) + self.checkSolution() + + def test_solver_rootnode(self): + self.run_solver(options={"rootnode": {"presolver": "automatic"}}) + self.checkSolution() + + def test_solver_maxnodes(self): + self.run_solver(options={"maxnodes": 0}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxEvaluations + ) + self.assertEqual( + results.solver.message, + "The solver reached the maximum number of nodes specified by the MAXNODES= option and found a solution.", + ) + + def test_solver_maxsols(self): + self.run_solver(options={"maxsols": 1}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxEvaluations + ) + self.assertEqual( + results.solver.message, + "The solver reached the maximum number of solutions specified by the MAXSOLS= option.", + ) + + def test_solver_target(self): + self.run_solver(options={"target": -6.0}) + results = self.results + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual( + results.solver.message, + "The solution is not worse than the target specified by the TARGET= option.", + ) + + def test_solver_primalin(self): + X = self.instance.X + X[1] = None + X[2] = 3 + X[3] = 7 + self.run_solver(warmstart=True) + self.checkSolution() + self.assertIn( + "NOTE: The input solution is infeasible or incomplete. Repair heuristics are applied.", + self.opt_sas._log, + ) + + def test_solver_primalin_nosol(self): + X = self.instance.X + X[1] = None + X[2] = None + X[3] = None + self.run_solver(warmstart=True) + self.checkSolution() + + @mock.patch("pyomo.solvers.plugins.solvers.SAS.stat") + def test_solver_large_file(self, os_stat): + os_stat.return_value.st_size = 3 * 1024**3 + self.run_solver() + self.checkSolution() + + def test_solver_with_lp(self): + self.run_solver(options={"with": "lp"}) + self.assertIn( + "contains integer variables; the linear relaxation will be solved.", + self.opt_sas._log, + ) + + def test_solver_warmstart_capable(self): + self.run_solver() + self.assertTrue(self.opt_sas.warm_start_capable()) + + +# @unittest.skipIf(not sas_available, "The SAS solver is not available") +@unittest.skip("MILP94 tests disabled.") +class SASTestMILP94(SASTestMILP, unittest.TestCase): + pass + + +# @unittest.skipIf(not sas_available, "The SAS solver is not available") +@unittest.skip("Tests not yet configured for SAS Viya interface.") +class SASTestMILPCAS(SASTestMILP, unittest.TestCase): + solver_io = "_sascas" + session_options = CAS_OPTIONS + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/util/infeasible.py b/pyomo/util/infeasible.py index 961d5b35036..6a90a4c3773 100644 --- a/pyomo/util/infeasible.py +++ b/pyomo/util/infeasible.py @@ -159,7 +159,7 @@ def log_infeasible_constraints( if log_variables: line += ''.join( f"\n - VAR {v.name}: {v.value}" - for v in identify_variables(constr.body, include_fixed=True) + for v in identify_variables(constr.expr, include_fixed=True) ) logger.info(line)