Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Backport 11.1 bugfixes #9266

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .readthedocs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,6 @@ build:
os: ubuntu-22.04
tools:
python: "3.12"
jobs:
post_checkout:
- git fetch --unshallow --tags || true
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,9 @@ code you can run a fast subset of the tests with
pytest -n logical tests/ert/unit_tests -m "not integration_tests"
```

[Git LFS](https://git-lfs.com/) must be installed to get all the files. This is packaged as `git-lfs` on Ubuntu, Fedora or macOS Homebrew. For Equinor RGS node users, it is possible to use `git` from Red Hat Software Collections:
```sh
source /opt/rh/rh-git227/enable
```
[Git LFS](https://git-lfs.com/) must be installed to get all the files. This is
packaged as `git-lfs` on Ubuntu, Fedora or macOS Homebrew. For Equinor TGX
users, it is preinstalled.

If you have not used git-lfs before, you might have to make changes to your global Git config for git-lfs to work properly.
```sh
Expand Down
2 changes: 1 addition & 1 deletion justfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ snake_oil:

# execute rapid unittests
rapid-tests:
pytest -n logical tests/ert/unit_tests -m "not integration_tests"
nice pytest -n logical tests/ert/unit_tests -m "not integration_tests"
3 changes: 2 additions & 1 deletion src/ert/dark_storage/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import pandas as pd
import polars
import xarray as xr
from polars.exceptions import ColumnNotFoundError

from ert.config import GenDataConfig, GenKwConfig
from ert.config.field import Field
Expand Down Expand Up @@ -184,7 +185,7 @@ def data_for_key(
return data.astype(float)
except ValueError:
return data
except (ValueError, KeyError):
except (ValueError, KeyError, ColumnNotFoundError):
return pd.DataFrame()

return pd.DataFrame()
Expand Down
2 changes: 1 addition & 1 deletion src/ert/ensemble_evaluator/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def __init__(self, ensemble: Ensemble, config: EvaluatorServerConfig):
List[Tuple[EVENT_HANDLER, Event]]
] = asyncio.Queue()
self._max_batch_size: int = 500
self._batching_interval: int = 2
self._batching_interval: float = 2.0
self._complete_batch: asyncio.Event = asyncio.Event()

async def _publisher(self) -> None:
Expand Down
3 changes: 0 additions & 3 deletions src/ert/simulator/batch_simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,6 @@ def _check_suffix(

for control_name, control in controls.items():
ext_config = self.parameter_configurations[control_name]

# fix this

if isinstance(ext_config, ExtParamConfig):
if len(ext_config) != len(control.keys()):
raise KeyError(
Expand Down
42 changes: 29 additions & 13 deletions tests/ert/ui_tests/gui/test_main_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
import stat
from pathlib import Path
from textwrap import dedent
from typing import List
from unittest.mock import MagicMock, Mock, patch

import numpy as np
import pytest
from qtpy.QtCore import Qt, QTimer
from qtpy.QtGui import QWindow
from qtpy.QtWidgets import (
QAction,
QApplication,
Expand Down Expand Up @@ -612,25 +614,39 @@ def test_right_click_plot_button_opens_external_plotter(qtbot, storage, monkeypa
button_plot_tool = gui.findChild(SidebarToolButton, "button_Create_plot")
assert button_plot_tool

qtbot.mouseClick(button_plot_tool, Qt.LeftButton)
plot_window = wait_for_child(gui, qtbot, PlotWindow)
assert plot_window
def top_level_plotter_windows() -> List[QWindow]:
top_level_plot_windows = []
top_level_windows = QApplication.topLevelWindows()
for win in top_level_windows:
if "Plotting" in win.title() and win.isVisible():
top_level_plot_windows.append(win)
return top_level_plot_windows

prev_open_windows = len(QApplication.topLevelWindows())

def detect_external_plot_widget_open_on_right_click(plot_count: int):
previous_count = plot_count - 1
assert len(QApplication.topLevelWindows()) == previous_count
def right_click_plotter_button() -> None:
top_level_windows = len(top_level_plotter_windows())
qtbot.mouseClick(button_plot_tool, Qt.RightButton)
qtbot.wait_until(
lambda: len(QApplication.topLevelWindows()) != previous_count,
lambda: len(top_level_plotter_windows()) > top_level_windows,
timeout=5000,
)
assert len(QApplication.topLevelWindows()) == plot_count

detect_external_plot_widget_open_on_right_click(prev_open_windows + 1)
detect_external_plot_widget_open_on_right_click(prev_open_windows + 2)
detect_external_plot_widget_open_on_right_click(prev_open_windows + 3)
right_click_plotter_button()
right_click_plotter_button()
right_click_plotter_button()

window_list = top_level_plotter_windows()
assert len(window_list) == 3

for window in window_list:
window.close()

qtbot.wait_until(lambda: not top_level_plotter_windows(), timeout=5000)

qtbot.mouseClick(button_plot_tool, Qt.LeftButton)
plot_window = wait_for_child(gui, qtbot, PlotWindow)
assert plot_window
assert "Plotting" in plot_window.windowTitle()

gui.close()


Expand Down
34 changes: 12 additions & 22 deletions tests/ert/unit_tests/config/test_ert_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import os
import os.path
import stat
import warnings
from datetime import date
from pathlib import Path
from textwrap import dedent
Expand Down Expand Up @@ -1537,9 +1538,7 @@ def test_general_option_in_local_config_has_priority_over_site_config():


@pytest.mark.usefixtures("use_tmpdir")
def test_warning_raised_when_summary_key_and_no_simulation_job_present(caplog, recwarn):
caplog.set_level(logging.WARNING)

def test_warning_raised_when_summary_key_and_no_simulation_job_present():
with open("job_file", "w", encoding="utf-8") as fout:
fout.write("EXECUTABLE echo\nARGLIST <ECLBASE> <RUNPATH>\n")

Expand All @@ -1548,34 +1547,26 @@ def test_warning_raised_when_summary_key_and_no_simulation_job_present(caplog, r
fout.write("NUM_REALIZATIONS 1\n")
fout.write("SUMMARY *\n")
fout.write("ECLBASE RESULT_SUMMARY\n")

fout.write("INSTALL_JOB job_name job_file\n")
fout.write(
"FORWARD_MODEL job_name(<ECLBASE>=A/<ECLBASE>, <RUNPATH>=<RUNPATH>/x)\n"
)
with warnings.catch_warnings(record=True) as all_warnings:
ErtConfig.from_file("config_file.ert")

ErtConfig.from_file("config_file.ert")

# Check no warning is logged when config contains
# forward model step with <ECLBASE> and <RUNPATH> as arguments
assert not caplog.text
assert len(recwarn) == 1
assert issubclass(recwarn[0].category, ConfigWarning)
assert (
recwarn[0].message.info.message
assert any(
str(w.message)
== "Config contains a SUMMARY key but no forward model steps known to generate a summary file"
for w in all_warnings
if isinstance(w.message, ConfigWarning)
)


@pytest.mark.parametrize(
"job_name", ["eclipse", "eclipse100", "flow", "FLOW", "ECLIPSE100"]
)
@pytest.mark.usefixtures("use_tmpdir")
def test_no_warning_when_summary_key_and_simulation_job_present(
caplog, recwarn, job_name
):
caplog.set_level(logging.WARNING)

def test_no_warning_when_summary_key_and_simulation_job_present(job_name):
with open("job_file", "w", encoding="utf-8") as fout:
fout.write("EXECUTABLE echo\nARGLIST <ECLBASE> <RUNPATH>\n")

Expand All @@ -1589,9 +1580,8 @@ def test_no_warning_when_summary_key_and_simulation_job_present(
fout.write(
f"FORWARD_MODEL {job_name}(<ECLBASE>=A/<ECLBASE>, <RUNPATH>=<RUNPATH>/x)\n"
)

ErtConfig.from_file("config_file.ert")

# Check no warning is logged when config contains
# forward model step with <ECLBASE> and <RUNPATH> as arguments
assert not any(w.message for w in recwarn if issubclass(w.category, ConfigWarning))
with warnings.catch_warnings():
warnings.simplefilter("error", category=ConfigWarning)
ErtConfig.from_file("config_file.ert")
102 changes: 102 additions & 0 deletions tests/ert/unit_tests/dark_storage/test_common.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import pandas as pd
import polars
import pytest

from ert.config import GenDataConfig, SummaryConfig
from ert.dark_storage.common import data_for_key
from ert.storage import open_storage
from tests.ert.unit_tests.config.summary_generator import (
Date,
Simulator,
Smspec,
SmspecIntehead,
SummaryMiniStep,
SummaryStep,
UnitSystem,
Unsmry,
)


def test_data_for_key_gives_mean_for_duplicate_values(tmp_path):
value1 = 1.1
value2 = 1.0e19
with open_storage(tmp_path / "storage", mode="w") as storage:
summary_config = SummaryConfig(name="summary", input_files=["CASE"], keys=["*"])
experiment = storage.create_experiment(
observations={},
parameters=[],
responses=[summary_config],
)
ensemble = experiment.create_ensemble(name="ensemble", ensemble_size=1)
unsmry = Unsmry(
steps=[
SummaryStep(
seqnum=0,
ministeps=[
SummaryMiniStep(mini_step=0, params=[0.0, 5.629901e16]),
SummaryMiniStep(mini_step=1, params=[365.0, value1]),
],
),
SummaryStep(
seqnum=1,
ministeps=[SummaryMiniStep(mini_step=2, params=[365.0, value2])],
),
]
)
smspec = Smspec(
nx=4,
ny=4,
nz=10,
restarted_from_step=0,
num_keywords=2,
restart=" ",
keywords=["TIME ", "NRPPR"],
well_names=[":+:+:+:+", "WELLNAME"],
region_numbers=[-32676, 0],
units=["HOURS ", "SM3"],
start_date=Date(
day=1, month=1, year=2014, hour=0, minutes=0, micro_seconds=0
),
intehead=SmspecIntehead(
unit=UnitSystem.METRIC,
simulator=Simulator.ECLIPSE_100,
),
)
smspec.to_file(tmp_path / "CASE.SMSPEC")
unsmry.to_file(tmp_path / "CASE.UNSMRY")
ds = summary_config.read_from_file(tmp_path, 0)
ensemble.save_response(summary_config.response_type, ds, 0)
df = data_for_key(ensemble, "NRPPR:WELLNAME")
assert list(df.columns) == [pd.Timestamp("2014-01-16 05:00:00")]
assert df[pd.Timestamp("2014-01-16 05:00:00")][0] == pytest.approx(
(value1 + value2) / 2
)


def test_data_for_key_returns_empty_gen_data_config(tmp_path):
with open_storage(tmp_path / "storage", mode="w") as storage:
gen_data_config = GenDataConfig(keys=["response"])
experiment = storage.create_experiment(
observations={},
parameters=[],
responses=[gen_data_config],
)
ensemble = experiment.create_ensemble(name="ensemble", ensemble_size=1)

data = data_for_key(ensemble, "response@0")
assert data.empty

ensemble.save_response(
"gen_data",
polars.DataFrame(
{
"response_key": "response",
"report_step": polars.Series([0], dtype=polars.UInt16),
"index": polars.Series([0], dtype=polars.UInt16),
"values": polars.Series([0.0], dtype=polars.Float32),
}
),
0,
)
data = data_for_key(ensemble, "response@0")
assert not data.empty