diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index aaed2e52..3bc2dd85 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 39 +LIBPATCH = 40 PYDEPS = ["ops>=2.0.0"] @@ -391,6 +391,10 @@ class IllegalOperationError(DataInterfacesError): """To be used when an operation is not allowed to be performed.""" +class PrematureDataAccessError(DataInterfacesError): + """To be raised when the Relation Data may be accessed (written) before protocol init complete.""" + + ############################################################################## # Global helpers / utilities ############################################################################## @@ -1453,6 +1457,8 @@ def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: class ProviderData(Data): """Base provides-side of the data products relation.""" + RESOURCE_FIELD = "database" + def __init__( self, model: Model, @@ -1618,6 +1624,15 @@ def _fetch_my_specific_relation_data( def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: """Set values for fields not caring whether it's a secret or not.""" req_secret_fields = [] + + keys = set(data.keys()) + if self.fetch_relation_field(relation.id, self.RESOURCE_FIELD) is None and ( + keys - {"endpoints", "read-only-endpoints", "replset"} + ): + raise PrematureDataAccessError( + "Premature access to relation data, update is forbidden before the connection is initialized." + ) + if relation.app: req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) @@ -3290,6 +3305,8 @@ class KafkaRequiresEvents(CharmEvents): class KafkaProviderData(ProviderData): """Provider-side of the Kafka relation.""" + RESOURCE_FIELD = "topic" + def __init__(self, model: Model, relation_name: str) -> None: super().__init__(model, relation_name) @@ -3539,6 +3556,8 @@ class OpenSearchRequiresEvents(CharmEvents): class OpenSearchProvidesData(ProviderData): """Provider-side of the OpenSearch relation.""" + RESOURCE_FIELD = "index" + def __init__(self, model: Model, relation_name: str) -> None: super().__init__(model, relation_name) diff --git a/poetry.lock b/poetry.lock index 15049684..a765b441 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "allure-pytest" @@ -3029,4 +3029,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "2f6f0689a9bb04f799d8dc6eec1e8f7d16ed37f61bfb015b23a89678e6dce30b" +content-hash = "86b2df93c2847d3c9cd66abe03d77f58b9d8956c73edf512b68b6768ef9eb7af" diff --git a/pyproject.toml b/pyproject.toml index 38b74a3f..7cad1224 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -99,6 +99,7 @@ pytest = "^8.2.2" juju = "~3.5.0" tenacity = "^8.4.2" coverage = {extras = ["toml"], version = ">7.0"} +pytest-asyncio = "^0.21.2" pytest-operator = ">0.20" pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v16.7.0", subdirectory = "python/pytest_plugins/pytest_operator_cache"} pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v16.7.0", subdirectory = "python/pytest_plugins/pytest_operator_groups"} diff --git a/src/charm.py b/src/charm.py index 03a8f072..c93257f1 100755 --- a/src/charm.py +++ b/src/charm.py @@ -135,7 +135,6 @@ def _on_install(self, event: InstallEvent) -> None: def reconcile(self, event: EventBase) -> None: """Generic handler for all 'something changed, update' events across all relations.""" - # 1. Block until peer relation is set if not self.state.peer_relation: self.unit.status = WaitingStatus(MSG_WAITING_FOR_PEER) diff --git a/src/core/cluster.py b/src/core/cluster.py index ddf8ecb9..cffa0cf5 100644 --- a/src/core/cluster.py +++ b/src/core/cluster.py @@ -4,6 +4,7 @@ """Collection of global cluster state.""" import logging +from ipaddress import IPv4Address, IPv6Address from charms.data_platform_libs.v0.data_interfaces import ( DataPeerData, @@ -23,6 +24,7 @@ PEER, PEER_APP_SECRETS, PEER_UNIT_SECRETS, + SERVER_PORT, ) logger = logging.getLogger(__name__) @@ -63,7 +65,7 @@ def opensearch_relation(self) -> Relation | None: @property def tls_relation(self) -> Relation | None: - """The cluster peer relation.""" + """The cluster tls relation.""" return self.model.get_relation(CERTS_REL_NAME) # --- CORE COMPONENTS--- @@ -141,6 +143,16 @@ def opensearch_server(self) -> OpensearchServer | None: local_app=self.cluster.app, ) + @property + def bind_address(self) -> IPv4Address | IPv6Address | str | None: + """The network binding address from the peer relation.""" + bind_address = None + if self.peer_relation: + if binding := self.model.get_binding(self.peer_relation): + bind_address = binding.network.bind_address + # If the relation does not exist, then we get None + return bind_address + # --- CLUSTER INIT --- @property @@ -162,3 +174,9 @@ def stable(self) -> bool: return False return True + + @property + def url(self) -> str: + """Service URL.""" + scheme = "https" if self.unit_server.tls else "http" + return f"{scheme}://{self.bind_address}:{SERVER_PORT}" diff --git a/src/core/models.py b/src/core/models.py index 96b5a452..48da2868 100644 --- a/src/core/models.py +++ b/src/core/models.py @@ -5,15 +5,12 @@ """Collection of state objects for relations, apps and units.""" import logging import socket -import subprocess from typing import Literal, MutableMapping from charms.data_platform_libs.v0.data_interfaces import Data, DataDict from ops.model import Application, Relation, Unit from typing_extensions import override -from literals import SERVER_PORT - logger = logging.getLogger(__name__) SUBSTRATES = Literal["vm", "k8s"] @@ -178,24 +175,18 @@ def hostname(self) -> str: @property def fqdn(self) -> str: """The Fully Qualified Domain Name for the unit.""" - return socket.getfqdn() + # return socket.getfqdn(self.private_ip) + return socket.getfqdn(self.private_ip) @property def private_ip(self) -> str: - """The IP for the unit.""" + """The IP for the unit recovered using socket.""" return socket.gethostbyname(self.hostname) @property def public_ip(self) -> str: - result = subprocess.check_output( - [ - "bash", - "-c", - "ip a | grep global | grep -v 'inet 10.' | cut -d' ' -f6 | cut -d'/' -f1", - ], - text=True, - ) - return result.rstrip() + """The public IP for the unit.""" + return socket.gethostbyname(self.hostname) @property def host(self) -> str: @@ -245,12 +236,6 @@ def sans(self) -> dict[str, list[str]]: return {} return { - "sans_ip": [self.private_ip], - "sans_dns": [self.hostname, self.fqdn], + "sans_ip": [self.private_ip, self.public_ip], + "sans_dns": [dns for dns in {self.hostname, self.fqdn} if dns], } - - @property - def url(self) -> str: - """Service URL.""" - scheme = "https" if self.tls else "http" - return f"{scheme}://{self.private_ip}:{SERVER_PORT}" diff --git a/src/events/tls.py b/src/events/tls.py index c786eea1..1725d3da 100644 --- a/src/events/tls.py +++ b/src/events/tls.py @@ -63,18 +63,22 @@ def _request_certificates(self): if self.charm.state.unit_server.tls: self._remove_certificates() + sans_ip = set( + self.charm.state.unit_server.sans.get("sans_ip", []) + + [str(self.charm.state.bind_address or "")] + ) + sans_dns = set(self.charm.state.unit_server.sans.get("sans_dns", [])) + logger.debug( "Requesting certificate for: " - f"host {self.charm.state.unit_server.host}," - f"with IP {self.charm.state.unit_server.sans.get('sans_ip', [])}," - f"DNS {self.charm.state.unit_server.sans.get('sans_dns', [])}" + f"host {self.charm.state.unit_server.host}, with IP {sans_ip}, DNS {sans_dns}" ) csr = generate_csr( private_key=self.charm.state.unit_server.private_key.encode("utf-8"), - subject=self.charm.state.unit_server.private_ip, - sans_ip=self.charm.state.unit_server.sans.get("sans_ip", []), - sans_dns=self.charm.state.unit_server.sans.get("sans_dns", []), + subject=str(self.charm.state.bind_address or self.charm.state.unit_server.private_ip), + sans_ip=list(sans_ip or ""), + sans_dns=list(sans_dns), ) self.charm.state.unit_server.update({"csr": csr.decode("utf-8").strip()}) diff --git a/src/literals.py b/src/literals.py index 793bdf4f..400f2fbf 100644 --- a/src/literals.py +++ b/src/literals.py @@ -59,6 +59,7 @@ MSG_STATUS_ERROR = "Service is an error state" MSG_STATUS_WORKLOAD_DOWN = "Workload is not alive" MSG_STATUS_UNKNOWN = "Workload status is not known" +MSG_STATUS_APP_REMOVED = "remove-application was requested: leaving..." MSG_APP_STATUS = [ MSG_STATUS_DB_DOWN, diff --git a/src/managers/api.py b/src/managers/api.py index fefe88f4..c7eeb355 100644 --- a/src/managers/api.py +++ b/src/managers/api.py @@ -69,7 +69,7 @@ def request( if None in [endpoint, method]: raise ValueError("endpoint or method missing") - full_url = f"{self.state.unit_server.url}/api/{endpoint}" + full_url = f"{self.state.url}/api/{endpoint}" request_kwargs = { "verify": self.workload.paths.ca, diff --git a/src/managers/config.py b/src/managers/config.py index 6db56840..75078904 100644 --- a/src/managers/config.py +++ b/src/managers/config.py @@ -102,7 +102,8 @@ def dashboard_properties(self) -> list[str]: opensearch_ca = self.workload.paths.opensearch_ca if self.state.opensearch_server else "" - properties += [f"server.host: '{self.state.unit_server.private_ip}'"] + # We are using the address exposed by Juju as service address + properties += [f"server.host: '{self.state.bind_address}'"] properties += ( [ f"opensearch.username: {opensearch_user}", diff --git a/src/managers/health.py b/src/managers/health.py index c5e76ac2..67c03372 100644 --- a/src/managers/health.py +++ b/src/managers/health.py @@ -15,6 +15,7 @@ from exceptions import OSDAPIError from literals import ( HEALTH_OPENSEARCH_STATUS_URL, + MSG_STATUS_APP_REMOVED, MSG_STATUS_DB_DOWN, MSG_STATUS_DB_MISSING, MSG_STATUS_ERROR, @@ -63,6 +64,9 @@ def status_ok(self) -> tuple[bool, str]: def opensearch_ok(self) -> tuple[bool, str]: """Verify if associated Opensearch service is up and running.""" + if not self.state.url: + return False, MSG_STATUS_APP_REMOVED + if not self.state.opensearch_server or not ( os.path.exists(self.workload.paths.opensearch_ca) and os.path.getsize(self.workload.paths.opensearch_ca) > 0 diff --git a/src/managers/tls.py b/src/managers/tls.py index ca56b210..54d984e9 100644 --- a/src/managers/tls.py +++ b/src/managers/tls.py @@ -86,4 +86,4 @@ def certificate_valid(self) -> bool: logger.debug( f"Currently recognized IP using 'gethostbyname': {self.state.unit_server.private_ip}" ) - return self.state.unit_server.private_ip in response + return str(self.state.bind_address) in response diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 435e14e0..004199f6 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -31,6 +31,24 @@ METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) APP_NAME = METADATA["name"] OPENSEARCH_APP_NAME = "opensearch" +SERIES = "jammy" + +OPENSEARCH_APP_NAME = "opensearch" +OPENSEARCH_RELATION_NAME = "opensearch-client" +OPENSEARCH_CONFIG = { + "logging-config": "=INFO;unit=DEBUG", + "cloudinit-userdata": """postruncmd: + - [ 'sysctl', '-w', 'vm.max_map_count=262144' ] + - [ 'sysctl', '-w', 'fs.file-max=1048576' ] + - [ 'sysctl', '-w', 'vm.swappiness=0' ] + - [ 'sysctl', '-w', 'net.ipv4.tcp_retries2=5' ] + """, +} + +TLS_CERTIFICATES_APP_NAME = "self-signed-certificates" +COS_AGENT_APP_NAME = "grafana-agent" +COS_AGENT_RELATION_NAME = "cos-agent" +DB_CLIENT_APP_NAME = "application" logger = logging.getLogger(__name__) @@ -213,7 +231,7 @@ def all_dashboards_unavailable(ops_test: OpsTest, https: bool = False) -> bool: logger.info(f"Couldn't retrieve host certificate for unit {unit}") continue - host = get_private_address(ops_test.model.name, unit.name) + host = get_bind_address(ops_test.model.name, unit.name) # We should retry until a host could be retrieved if not host: @@ -258,7 +276,6 @@ def access_dashboard( arguments = {"url": url, "headers": headers, "json": data} if ssl: arguments["verify"] = "./ca.pem" - response = requests.post(**arguments) return response.status_code == 200 @@ -298,7 +315,7 @@ async def access_all_dashboards( for unit in ops_test.model.applications[APP_NAME].units: if unit.name in skip: continue - host = get_private_address(ops_test.model.name, unit.name) + host = get_bind_address(ops_test.model.name, unit.name) if not host: logger.error(f"No hostname found for {unit.name}, can't check connection.") return False @@ -371,14 +388,19 @@ async def get_address(ops_test: OpsTest, unit_name: str) -> str: return address -def get_private_address(model_full_name: str, unit: str): +def get_bind_address(model_full_name: str, unit: str): try: private_ip = check_output( [ - "bash", - "-c", - f"JUJU_MODEL={model_full_name} juju ssh {unit} ip a | " - "grep global | grep 'inet 10.*/24' | cut -d' ' -f6 | cut -d'/' -f1", + "juju", + "exec", + f"--model={model_full_name}", + "--unit", + unit, + "--", + "network-get", + OPENSEARCH_RELATION_NAME, + "--bind-address", ], text=True, ) @@ -742,7 +764,7 @@ async def client_run_all_dashboards_request( return False for dashboards_unit in ops_test.model.applications[APP_NAME].units: - host = get_private_address(ops_test.model.name, dashboards_unit.name) + host = get_bind_address(ops_test.model.name, dashboards_unit.name) if not host: logger.debug(f"No hostname found for {dashboards_unit.name}, can't check connection.") return False @@ -774,3 +796,20 @@ async def destroy_cluster(ops_test, app: str = OPENSEARCH_APP_NAME): # This case we don't raise an error in the context manager which # fails to restore the `update-status-hook-interval` value to it's former state. assert n_apps_after == n_apps_before - 1, "old cluster not destroyed successfully." + + +async def for_machines(ops_test, machines, state="started"): + for attempt in Retrying(stop=stop_after_attempt(10), wait=wait_fixed(wait=60)): + with attempt: + mach_status = json.loads( + subprocess.check_output( + ["juju", "machines", f"--model={ops_test.model.name}", "--format=json"] + ) + )["machines"] + for id in machines: + if ( + str(id) not in mach_status.keys() + or mach_status[str(id)]["juju-status"]["current"] != state + ): + logger.warning(f"machine-{id} either not exist yet or not in {state}") + raise Exception() diff --git a/tests/integration/spaces/__init__.py b/tests/integration/spaces/__init__.py new file mode 100644 index 00000000..e3979c0f --- /dev/null +++ b/tests/integration/spaces/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/integration/spaces/conftest.py b/tests/integration/spaces/conftest.py new file mode 100644 index 00000000..45677139 --- /dev/null +++ b/tests/integration/spaces/conftest.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import os +import subprocess + +import pytest +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + + +DEFAULT_LXD_NETWORK = "lxdbr0" +RAW_DNSMASQ = """dhcp-option=3 +dhcp-option=6""" + + +def _lxd_network(name: str, subnet: str, external: bool = True): + """Helper function""" + + # Don't create the network if it already exists + try: + subprocess.run( + ["sudo", "lxc", "network", "show", name], + capture_output=True, + check=True, + encoding="utf-8", + ) + logger.info(f"LXD network {name} already exists") + return + except subprocess.CalledProcessError: + # If we can't list the network, let's try to create it + pass + + try: + output = subprocess.run( + [ + "sudo", + "lxc", + "network", + "create", + name, + "--type=bridge", + f"ipv4.address={subnet}", + f"ipv4.nat={external}".lower(), + "ipv6.address=none", + "dns.mode=none", + ], + capture_output=True, + check=True, + encoding="utf-8", + ).stdout + logger.info(f"LXD network created: {output}") + output = subprocess.run( + ["sudo", "lxc", "network", "show", name], + capture_output=True, + check=True, + encoding="utf-8", + ).stdout + logger.debug(f"LXD network status: {output}") + + if not external: + subprocess.run( + ["sudo", "lxc", "network", "set", name, "raw.dnsmasq", RAW_DNSMASQ], check=True + ) + + subprocess.run(f"sudo ip link set up dev {name}".split(), check=True) + except subprocess.CalledProcessError as e: + logger.error(f"Error creating LXD network {name} with: {e.returncode} {e.stderr}") + raise + + +@pytest.fixture(scope="module") +def lxd(): + try: + # Set all networks' dns.mode=none + # We want to avoid check: + # https://github.com/canonical/lxd/blob/ + # 762f7dc5c3dc4dbd0863a796898212d8fbe3f7c3/lxd/device/nic_bridged.go#L403 + # As described on: + # https://discuss.linuxcontainers.org/t/ + # error-failed-start-validation-for-device-enp3s0f0-instance + # -dns-name-net17-nicole-munoz-marketing-already-used-on-network/15586/22?page=2 + subprocess.run( + [ + "sudo", + "lxc", + "network", + "set", + DEFAULT_LXD_NETWORK, + "dns.mode=none", + ], + check=True, + ) + except subprocess.CalledProcessError as e: + logger.error( + f"Error creating LXD network {DEFAULT_LXD_NETWORK} with: {e.returncode} {e.stderr}" + ) + raise + _lxd_network("client", "10.0.0.1/24", True) + _lxd_network("cluster", "10.10.10.1/24", False) + _lxd_network("backup", "10.20.20.1/24", False) + + +@pytest.fixture(scope="module") +def lxd_spaces(ops_test: OpsTest, lxd): + subprocess.run( + [ + "juju", + "reload-spaces", + f"--model={ops_test.model.name}", + ], + ) + spaces = [("client", "10.0.0.0/24"), ("cluster", "10.10.10.0/24"), ("backup", "10.20.20.0/24")] + for space in spaces: + subprocess.run( + f"juju add-space --model={ops_test.model.name} {space[0]} {space[1]}".split(), + check=True, + ) + + +@pytest.hookimpl() +def pytest_sessionfinish(session, exitstatus): + if os.environ.get("CI", "true").lower() == "true": + # Nothing to do, as this is a temp runner only + return + + def __exec(cmd): + try: + subprocess.run(cmd.split(), check=True) + except subprocess.CalledProcessError as e: + # Log and try to delete the next network + logger.warning(f"Error deleting LXD network with: {e.returncode} {e.stderr}") + + for network in ["client", "cluster", "backup"]: + __exec(f"sudo lxc network delete {network}") + + __exec(f"sudo lxc network unset {DEFAULT_LXD_NETWORK} dns.mode") diff --git a/tests/integration/spaces/test_wrong_etc_hosts.py b/tests/integration/spaces/test_wrong_etc_hosts.py new file mode 100644 index 00000000..84cc0943 --- /dev/null +++ b/tests/integration/spaces/test_wrong_etc_hosts.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import subprocess + +import pytest +from pytest_operator.plugin import OpsTest + +from ..helpers import ( + APP_NAME, + OPENSEARCH_APP_NAME, + SERIES, + TLS_CERTIFICATES_APP_NAME, + access_all_dashboards, + access_all_prometheus_exporters, + for_machines, + get_relation, +) + +logger = logging.getLogger(__name__) + + +DEFAULT_NUM_UNITS = 3 + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "large"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +@pytest.mark.skip_if_deployed +async def test_build_and_deploy(ops_test: OpsTest, lxd_spaces) -> None: + """Build and deploy OpenSearch Dashboards. + + For this test, we will create a machine in multiple spaces and inject + the a record into /etc/hosts, as follows: + 127.0.1.1 + + More information: gh:canonical/opensearch-dashboards-operator#121 + """ + osd_charm = await ops_test.build_charm(".") + + for _ in range(DEFAULT_NUM_UNITS): + await ops_test.model.add_machine( + spec=None, + constraints={"spaces": ["alpha", "cluster", "backup", "client"]}, + series=SERIES, + ) + + await for_machines(ops_test, machines=list(range(DEFAULT_NUM_UNITS))) + + # Now, we should SSH to each machine and inject the record into /etc/hosts + machine_ip = "127.0.1.1" + for machine_id in range(DEFAULT_NUM_UNITS): + subprocess.check_output( + [ + "juju", + "ssh", + f"--model={ops_test.model.name}", + str(machine_id), + "--", + "sudo", + "sed", + "-i", + f'"1i\\{machine_ip} $(hostname -f)"', + "/etc/hosts", + ] + ) + + await ops_test.model.deploy( + osd_charm, + num_units=DEFAULT_NUM_UNITS, + series=SERIES, + constraints="spaces=alpha,client,cluster,backup", + bind={"": "cluster"}, + to=[str(i) for i in range(DEFAULT_NUM_UNITS)], + ) + config = {"ca-common-name": "CN_CA"} + await ops_test.model.deploy( + TLS_CERTIFICATES_APP_NAME, + channel="stable", + constraints="spaces=alpha,client,cluster,backup", + bind={"": "cluster"}, + config=config, + ) + await ops_test.model.deploy( + "opensearch", + channel="2/edge", + constraints="spaces=alpha,client,cluster,backup", + bind={"": "cluster"}, + num_units=3, + ) + await ops_test.model.integrate(OPENSEARCH_APP_NAME, TLS_CERTIFICATES_APP_NAME) + await ops_test.model.integrate(OPENSEARCH_APP_NAME, APP_NAME) + + await ops_test.model.wait_for_idle( + apps=[TLS_CERTIFICATES_APP_NAME, APP_NAME, OPENSEARCH_APP_NAME], + status="active", + timeout=1000, + ) + assert len(ops_test.model.applications[APP_NAME].units) == DEFAULT_NUM_UNITS + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "large"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_dashboard_access_http(ops_test: OpsTest): + """Test HTTP access to each dashboard unit.""" + assert await access_all_dashboards(ops_test, get_relation(ops_test).id) + assert await access_all_prometheus_exporters(ops_test) + + +############################################################################## + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "large"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_tls_on(ops_test: OpsTest) -> None: + """Not a real test, but only switching on TLS""" + # Relate Dashboards to OpenSearch to set up TLS. + await ops_test.model.integrate(APP_NAME, TLS_CERTIFICATES_APP_NAME) + + await ops_test.model.wait_for_idle( + apps=[APP_NAME, TLS_CERTIFICATES_APP_NAME], status="active", timeout=3000, idle_period=30 + ) + + +############################################################################## + + +@pytest.mark.runner(["self-hosted", "linux", "X64", "jammy", "large"]) +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_dashboard_access_https(ops_test: OpsTest): + """Test HTTP access to each dashboard unit.""" + assert await access_all_dashboards(ops_test, get_relation(ops_test).id, https=True) + assert await access_all_prometheus_exporters(ops_test) diff --git a/tests/unit/test_api.py b/tests/unit/test_api.py index a1317ef0..9f9194a9 100644 --- a/tests/unit/test_api.py +++ b/tests/unit/test_api.py @@ -4,6 +4,7 @@ import logging from pathlib import Path +from unittest.mock import MagicMock, patch import pytest import responses @@ -34,6 +35,9 @@ def harness(): harness.add_relation_unit(opensearch_rel_id, "opensearch/0") harness._update_config({"log_level": "debug"}) harness.begin() + harness.charm.model.get_binding = MagicMock() + harness.charm.model.get_binding.network.bind_address = MagicMock(return_value="10.10.10.10") + return harness @@ -91,13 +95,13 @@ def test_api_request(harness): ], }, } + harness.charm.state.unit_server.relation = MagicMock(name="test") responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", json=expected_response, ) - response = harness.charm.api_manager.request("status") assert all(field in response for field in ["status", "name", "version"]) assert all(field in response["status"] for field in ["statuses", "overall"]) @@ -160,7 +164,7 @@ def test_status(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", json=expected_response, ) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index e0c26e77..239376d9 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -4,7 +4,7 @@ import logging from pathlib import Path -from unittest.mock import patch +from unittest.mock import PropertyMock, patch import pytest import responses @@ -115,7 +115,7 @@ def test_install_sets_ip_hostname_fqdn(harness): with patch("workload.ODWorkload.install", return_value=False): harness.charm.on.install.emit() - assert harness.charm.state.unit_server.private_ip + assert harness.charm.state.bind_address def test_relation_changed_emitted_for_leader_elected(harness): @@ -359,7 +359,7 @@ def test_workload_down_blocked_status(harness): def test_service_unavailable_blocked_status(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", status=503, body="OpenSearch Dashboards server is not ready yet", ) @@ -398,7 +398,7 @@ def test_service_unhealthy(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", status=200, json=expected_response, ) @@ -418,6 +418,26 @@ def test_service_unhealthy(harness): patch("managers.config.ConfigManager.set_dashboard_properties"), patch("os.path.exists", return_value=True), patch("os.path.getsize", return_value=1), + patch( + "core.models.ODServer.hostname", + new_callable=PropertyMock, + return_value="opensearch-dashboards", + ), + patch( + "core.models.ODServer.fqdn", + new_callable=PropertyMock, + return_value="opensearch-dashboards", + ), + patch( + "managers.api.APIManager.request", + return_value={ + "status": { + "overall": { + "state": "yellow", + }, + }, + }, + ), ): harness.charm.init_server() harness.charm.on.update_status.emit() @@ -438,7 +458,7 @@ def test_service_error(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", status=200, json=expected_response, ) @@ -458,6 +478,26 @@ def test_service_error(harness): patch("managers.config.ConfigManager.set_dashboard_properties"), patch("os.path.exists", return_value=True), patch("os.path.getsize", return_value=1), + patch( + "core.models.ODServer.hostname", + new_callable=PropertyMock, + return_value="opensearch-dashboards", + ), + patch( + "core.models.ODServer.fqdn", + new_callable=PropertyMock, + return_value="opensearch-dashboards", + ), + patch( + "managers.api.APIManager.request", + return_value={ + "status": { + "overall": { + "state": "red", + }, + }, + }, + ), ): harness.charm.init_server() harness.charm.on.update_status.emit() @@ -478,7 +518,7 @@ def test_service_available(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", status=200, json=expected_response, ) @@ -498,6 +538,26 @@ def test_service_available(harness): patch("managers.config.ConfigManager.set_dashboard_properties"), patch("os.path.exists", return_value=True), patch("os.path.getsize", return_value=1), + patch( + "core.models.ODServer.hostname", + new_callable=PropertyMock, + return_value="opensearch-dashboards", + ), + patch( + "core.models.ODServer.fqdn", + new_callable=PropertyMock, + return_value="opensearch-dashboards", + ), + patch( + "managers.api.APIManager.request", + return_value={ + "status": { + "overall": { + "state": "green", + }, + }, + }, + ), ): harness.charm.init_server() harness.charm.on.update_status.emit() @@ -517,7 +577,7 @@ def test_wrong_opensearch_version(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", status=200, json=expected_response, ) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 40de9b50..fc327234 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -66,7 +66,7 @@ def test_log_level_changed(harness): patch("workload.ODWorkload.write") as write, ): assert harness.charm.config_manager.config_changed() - content = DEFAULT_CONF.format(ip=harness.charm.state.unit_server.private_ip) + content = DEFAULT_CONF.format(ip=harness.charm.state.bind_address) path = "/var/snap/opensearch-dashboards/current/etc/opensearch-dashboards/opensearch_dashboards.yml" write.assert_called_with(content=content, path=path) diff --git a/tests/unit/test_health.py b/tests/unit/test_health.py index 06f88023..c8df06d9 100644 --- a/tests/unit/test_health.py +++ b/tests/unit/test_health.py @@ -105,7 +105,7 @@ def test_health_status_ok(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", json=expected_response, ) @@ -119,7 +119,7 @@ def test_health_status_service_uniavail(harness): responses.add( method="GET", - url=f"{harness.charm.state.unit_server.url}/api/status", + url=f"{harness.charm.state.url}/api/status", status=503, body="OpenSearch Dashboards server is not ready yet", ) diff --git a/tests/unit/test_tls.py b/tests/unit/test_tls.py index 9ac6a659..66d2c3a0 100644 --- a/tests/unit/test_tls.py +++ b/tests/unit/test_tls.py @@ -58,7 +58,10 @@ def test_certificates_available_fails_wrong_csr(harness): harness.update_relation_data(cert_rel_id, f"{CHARM_KEY}/0", {"csr": "not-missing"}) harness.charm.tls_events.certificates.on.certificate_available.emit( - certificate_signing_request="missing", certificate="cert", ca="ca", chain=["ca", "cert"] + certificate_signing_request="missing", + certificate="cert", + ca="ca", + chain=["ca", "cert"], ) assert not harness.charm.state.unit_server.certificate @@ -153,9 +156,11 @@ def test_certificates_expiring(harness): }, ) - with patch( - "charms.tls_certificates_interface.v3.tls_certificates.TLSCertificatesRequiresV3.request_certificate_renewal", - return_value=None, + with ( + patch( + "charms.tls_certificates_interface.v3.tls_certificates.TLSCertificatesRequiresV3.request_certificate_renewal", + return_value=None, + ), ): harness.charm.tls_events.certificates.on.certificate_expiring.emit( certificate="cert", expiry=None @@ -179,9 +184,11 @@ def test_set_tls_private_key(harness): ) key = open("tests/keys/0.key").read() - with patch( - "charms.tls_certificates_interface.v3.tls_certificates.TLSCertificatesRequiresV3.request_certificate_renewal", - return_value=None, + with ( + patch( + "charms.tls_certificates_interface.v3.tls_certificates.TLSCertificatesRequiresV3.request_certificate_renewal", + return_value=None, + ), ): harness.run_action("set-tls-private-key", {"internal-key": key})