From 2e83ee71f216867b51f1ae18c965e11ec913bd11 Mon Sep 17 00:00:00 2001 From: Javier Torres Date: Thu, 9 May 2024 23:41:48 +0200 Subject: [PATCH] Lint --- mypy.ini | 2 +- nucliadb/nucliadb/common/cluster/rebalance.py | 9 ++++- .../nucliadb/common/datamanagers/rollover.py | 2 +- nucliadb/nucliadb/ingest/consumer/consumer.py | 7 ++-- .../tests/integration/ingest/test_ingest.py | 5 ++- .../tests/unit/consumer/test_auditing.py | 11 ++--- .../tests/unit/consumer/test_shard_creator.py | 20 ++++++---- .../ingest/tests/unit/service/test_writer.py | 13 +++--- nucliadb/nucliadb/search/api/v1/utils.py | 2 +- nucliadb/nucliadb/search/requesters/utils.py | 6 ++- .../tests/unit/search/test_chat_prompt.py | 8 ++-- .../nucliadb/search/tests/unit/test_run.py | 5 ++- .../standalone/tests/unit/test_run.py | 16 ++++---- .../tests/migrations/test_migration_0018.py | 9 +++-- .../unit/common/cluster/discovery/test_k8s.py | 18 +++++---- .../common/cluster/standalone/test_service.py | 7 ++-- .../common/cluster/standalone/test_utils.py | 14 ++++--- .../unit/common/cluster/test_rollover.py | 16 +++++--- .../tests/unit/common/maindb/test_utils.py | 40 ++++++++++++------- .../unit/http_clients/test_processing.py | 12 +++--- nucliadb/nucliadb/tests/unit/test_health.py | 10 +++-- nucliadb/nucliadb/tests/unit/test_purge.py | 14 +++---- .../writer/tests/unit/api/v1/test_upload.py | 8 +++- .../nucliadb_node/tests/unit/test_app.py | 37 ++++++++--------- .../nucliadb_node/tests/unit/test_indexer.py | 7 ++-- .../nucliadb_node/tests/unit/test_pull.py | 5 ++- .../nucliadb_telemetry/errors.py | 8 ++-- .../tests/unit/test_errors.py | 39 ++++++++++-------- pyproject.toml | 2 +- 29 files changed, 203 insertions(+), 149 deletions(-) diff --git a/mypy.ini b/mypy.ini index ef884b6e43..d4f35d284a 100644 --- a/mypy.ini +++ b/mypy.ini @@ -63,4 +63,4 @@ ignore_missing_imports = True disable_error_code = arg-type, call-arg [mypy-aioresponses] -ignore_missing_imports = True \ No newline at end of file +ignore_missing_imports = True diff --git a/nucliadb/nucliadb/common/cluster/rebalance.py b/nucliadb/nucliadb/common/cluster/rebalance.py index 94008de4df..80ca9caa34 100644 --- a/nucliadb/nucliadb/common/cluster/rebalance.py +++ b/nucliadb/nucliadb/common/cluster/rebalance.py @@ -117,8 +117,13 @@ async def move_set_of_kb_resources( for result in search_response.document.results: resource_id = result.uuid try: - async with datamanagers.with_transaction() as txn, locking.distributed_lock( - locking.RESOURCE_INDEX_LOCK.format(kbid=kbid, resource_id=resource_id) + async with ( + datamanagers.with_transaction() as txn, + locking.distributed_lock( + locking.RESOURCE_INDEX_LOCK.format( + kbid=kbid, resource_id=resource_id + ) + ), ): found_shard_id = await datamanagers.resources.get_resource_shard_id( txn, kbid=kbid, rid=resource_id diff --git a/nucliadb/nucliadb/common/datamanagers/rollover.py b/nucliadb/nucliadb/common/datamanagers/rollover.py index 39654e8e73..6ab9633197 100644 --- a/nucliadb/nucliadb/common/datamanagers/rollover.py +++ b/nucliadb/nucliadb/common/datamanagers/rollover.py @@ -90,7 +90,7 @@ async def add_indexed( kbid: str, resource_id: str, shard_id: str, - modification_time: int + modification_time: int, ) -> None: to_index = KB_ROLLOVER_RESOURCES_TO_INDEX.format(kbid=kbid, resource=resource_id) indexed = KB_ROLLOVER_RESOURCES_INDEXED.format(kbid=kbid, resource=resource_id) diff --git a/nucliadb/nucliadb/ingest/consumer/consumer.py b/nucliadb/nucliadb/ingest/consumer/consumer.py index 9e13f2960d..463f20c371 100644 --- a/nucliadb/nucliadb/ingest/consumer/consumer.py +++ b/nucliadb/nucliadb/ingest/consumer/consumer.py @@ -148,9 +148,10 @@ async def subscription_worker(self, msg: Msg): message_source = "" start = time.monotonic() - async with MessageProgressUpdater( - msg, nats_consumer_settings.nats_ack_wait * 0.66 - ), self.lock: + async with ( + MessageProgressUpdater(msg, nats_consumer_settings.nats_ack_wait * 0.66), + self.lock, + ): logger.info( f"Message processing: subject:{subject}, seqid: {seqid}, reply: {reply}" ) diff --git a/nucliadb/nucliadb/ingest/tests/integration/ingest/test_ingest.py b/nucliadb/nucliadb/ingest/tests/integration/ingest/test_ingest.py index bea83d8e92..2a9a967f41 100644 --- a/nucliadb/nucliadb/ingest/tests/integration/ingest/test_ingest.py +++ b/nucliadb/nucliadb/ingest/tests/integration/ingest/test_ingest.py @@ -672,8 +672,9 @@ async def test_ingest_autocommit_deadletter_marks_resource( rid = str(uuid.uuid4()) message = make_message(kbid, rid) - with patch.object(processor, "notify_commit") as mock_notify, pytest.raises( - DeadletteredError + with ( + patch.object(processor, "notify_commit") as mock_notify, + pytest.raises(DeadletteredError), ): # cause an error to force deadletter handling mock_notify.side_effect = Exception("test") diff --git a/nucliadb/nucliadb/ingest/tests/unit/consumer/test_auditing.py b/nucliadb/nucliadb/ingest/tests/unit/consumer/test_auditing.py index c92b8376cb..17828a046f 100644 --- a/nucliadb/nucliadb/ingest/tests/unit/consumer/test_auditing.py +++ b/nucliadb/nucliadb/ingest/tests/unit/consumer/test_auditing.py @@ -48,11 +48,12 @@ def shard_manager(reader): nm = MagicMock() node = MagicMock(reader=reader) nm.get_shards_by_kbid = AsyncMock(return_value=[ShardObject()]) - with patch( - "nucliadb.ingest.consumer.auditing.get_shard_manager", return_value=nm - ), patch( - "nucliadb.ingest.consumer.auditing.choose_node", - return_value=(node, "shard_id"), + with ( + patch("nucliadb.ingest.consumer.auditing.get_shard_manager", return_value=nm), + patch( + "nucliadb.ingest.consumer.auditing.choose_node", + return_value=(node, "shard_id"), + ), ): yield nm diff --git a/nucliadb/nucliadb/ingest/tests/unit/consumer/test_shard_creator.py b/nucliadb/nucliadb/ingest/tests/unit/consumer/test_shard_creator.py index e099ea2eb4..0971b7f11f 100644 --- a/nucliadb/nucliadb/ingest/tests/unit/consumer/test_shard_creator.py +++ b/nucliadb/nucliadb/ingest/tests/unit/consumer/test_shard_creator.py @@ -58,14 +58,18 @@ def shard_manager(reader): shards = Shards(shards=[ShardObject(read_only=False)], actual=0) sm.get_current_active_shard = AsyncMock(return_value=shards.shards[0]) sm.maybe_create_new_shard = AsyncMock() - with patch( - "nucliadb.ingest.consumer.shard_creator.get_shard_manager", return_value=sm - ), patch( - "nucliadb.ingest.consumer.shard_creator.choose_node", - return_value=(node, "shard_id"), - ), patch( - "nucliadb.ingest.consumer.shard_creator.locking.distributed_lock", - return_value=AsyncMock(), + with ( + patch( + "nucliadb.ingest.consumer.shard_creator.get_shard_manager", return_value=sm + ), + patch( + "nucliadb.ingest.consumer.shard_creator.choose_node", + return_value=(node, "shard_id"), + ), + patch( + "nucliadb.ingest.consumer.shard_creator.locking.distributed_lock", + return_value=AsyncMock(), + ), ): yield sm diff --git a/nucliadb/nucliadb/ingest/tests/unit/service/test_writer.py b/nucliadb/nucliadb/ingest/tests/unit/service/test_writer.py index 6e9026564d..bda5a3b1d3 100644 --- a/nucliadb/nucliadb/ingest/tests/unit/service/test_writer.py +++ b/nucliadb/nucliadb/ingest/tests/unit/service/test_writer.py @@ -660,11 +660,14 @@ async def test_Index(self, writer: WriterServicer): request = writer_pb2.IndexResource(kbid="kbid", rid="rid") txn = AsyncMock() - with patch( - "nucliadb.ingest.service.writer.get_partitioning" - ) as get_partitioning, patch( - "nucliadb.ingest.service.writer.get_transaction_utility", - MagicMock(return_value=txn), + with ( + patch( + "nucliadb.ingest.service.writer.get_partitioning" + ) as get_partitioning, + patch( + "nucliadb.ingest.service.writer.get_transaction_utility", + MagicMock(return_value=txn), + ), ): resp = await writer.Index(request) diff --git a/nucliadb/nucliadb/search/api/v1/utils.py b/nucliadb/nucliadb/search/api/v1/utils.py index 8f1ac9841b..3e552726fa 100644 --- a/nucliadb/nucliadb/search/api/v1/utils.py +++ b/nucliadb/nucliadb/search/api/v1/utils.py @@ -39,5 +39,5 @@ def fastapi_query(param: ParamDefault, default: Optional[Any] = _NOT_SET, **kw) le=param.le, gt=param.gt, max_length=param.max_items, - **kw + **kw, ) diff --git a/nucliadb/nucliadb/search/requesters/utils.py b/nucliadb/nucliadb/search/requesters/utils.py index 39a8520838..a4e5d4ba73 100644 --- a/nucliadb/nucliadb/search/requesters/utils.py +++ b/nucliadb/nucliadb/search/requesters/utils.py @@ -20,7 +20,7 @@ import asyncio import json from enum import Enum -from typing import Any, Optional, TypeVar, Union, overload, Sequence +from typing import Any, Optional, Sequence, TypeVar, Union, overload from fastapi import HTTPException from google.protobuf.json_format import MessageToDict @@ -130,7 +130,9 @@ async def node_query( pb_query: REQUEST_TYPE, target_shard_replicas: Optional[list[str]] = None, use_read_replica_nodes: bool = True, -) -> tuple[Sequence[Union[T, BaseException]], bool, list[tuple[AbstractIndexNode, str]]]: +) -> tuple[ + Sequence[Union[T, BaseException]], bool, list[tuple[AbstractIndexNode, str]] +]: use_read_replica_nodes = use_read_replica_nodes and has_feature( const.Features.READ_REPLICA_SEARCHES, context={"kbid": kbid} ) diff --git a/nucliadb/nucliadb/search/tests/unit/search/test_chat_prompt.py b/nucliadb/nucliadb/search/tests/unit/search/test_chat_prompt.py index 330f98e705..268b553a83 100644 --- a/nucliadb/nucliadb/search/tests/unit/search/test_chat_prompt.py +++ b/nucliadb/nucliadb/search/tests/unit/search/test_chat_prompt.py @@ -170,9 +170,11 @@ def _create_find_result( @pytest.mark.asyncio async def test_default_prompt_context(kb): result_text = " ".join(["text"] * 10) - with patch("nucliadb.search.search.chat.prompt.get_read_only_transaction"), patch( - "nucliadb.search.search.chat.prompt.get_storage" - ), patch("nucliadb.search.search.chat.prompt.KnowledgeBoxORM", return_value=kb): + with ( + patch("nucliadb.search.search.chat.prompt.get_read_only_transaction"), + patch("nucliadb.search.search.chat.prompt.get_storage"), + patch("nucliadb.search.search.chat.prompt.KnowledgeBoxORM", return_value=kb), + ): context = chat_prompt.CappedPromptContext(max_size=int(1e6)) find_results = KnowledgeboxFindResults( facets={}, diff --git a/nucliadb/nucliadb/search/tests/unit/test_run.py b/nucliadb/nucliadb/search/tests/unit/test_run.py index b38ecc9865..70a3445301 100644 --- a/nucliadb/nucliadb/search/tests/unit/test_run.py +++ b/nucliadb/nucliadb/search/tests/unit/test_run.py @@ -27,8 +27,9 @@ @pytest.fixture(scope="function") def run_fastapi_with_metrics(): - with patch("nucliadb.search.run.run_fastapi_with_metrics") as mocked, patch( - "nucliadb.search.run.instrument_app" + with ( + patch("nucliadb.search.run.run_fastapi_with_metrics") as mocked, + patch("nucliadb.search.run.instrument_app"), ): yield mocked diff --git a/nucliadb/nucliadb/standalone/tests/unit/test_run.py b/nucliadb/nucliadb/standalone/tests/unit/test_run.py index cfb8128c7f..834ce88d50 100644 --- a/nucliadb/nucliadb/standalone/tests/unit/test_run.py +++ b/nucliadb/nucliadb/standalone/tests/unit/test_run.py @@ -29,14 +29,14 @@ @pytest.fixture(scope="function", autouse=True) def mocked_deps(): - with mock.patch("uvicorn.Server.run"), mock.patch( - "pydantic_argparse.ArgumentParser.parse_typed_args", return_value=Settings() - ), mock.patch( - f"{STANDALONE_RUN}.get_latest_nucliadb", return_value="1.0.0" - ), mock.patch( - "uvicorn.Server.startup" - ), mock.patch( - f"{STANDALONE_RUN}.run_migrations" + with ( + mock.patch("uvicorn.Server.run"), + mock.patch( + "pydantic_argparse.ArgumentParser.parse_typed_args", return_value=Settings() + ), + mock.patch(f"{STANDALONE_RUN}.get_latest_nucliadb", return_value="1.0.0"), + mock.patch("uvicorn.Server.startup"), + mock.patch(f"{STANDALONE_RUN}.run_migrations"), ): yield diff --git a/nucliadb/nucliadb/tests/migrations/test_migration_0018.py b/nucliadb/nucliadb/tests/migrations/test_migration_0018.py index d93d76fdd8..7baefc9748 100644 --- a/nucliadb/nucliadb/tests/migrations/test_migration_0018.py +++ b/nucliadb/nucliadb/tests/migrations/test_migration_0018.py @@ -37,9 +37,12 @@ async def test_migration_0018_global(maindb_driver: Driver): execution_context = Mock() execution_context.kv_driver = maindb_driver - with patch("nucliadb.ingest.orm.knowledgebox.get_storage", new=AsyncMock()), patch( - "nucliadb.ingest.orm.knowledgebox.get_shard_manager", - new=Mock(return_value=AsyncMock()), + with ( + patch("nucliadb.ingest.orm.knowledgebox.get_storage", new=AsyncMock()), + patch( + "nucliadb.ingest.orm.knowledgebox.get_shard_manager", + new=Mock(return_value=AsyncMock()), + ), ): # setup some orphan /kbslugs keys and some real ones async with maindb_driver.transaction() as txn: diff --git a/nucliadb/nucliadb/tests/unit/common/cluster/discovery/test_k8s.py b/nucliadb/nucliadb/tests/unit/common/cluster/discovery/test_k8s.py index 0bb6aafeec..a8d18edd38 100644 --- a/nucliadb/nucliadb/tests/unit/common/cluster/discovery/test_k8s.py +++ b/nucliadb/nucliadb/tests/unit/common/cluster/discovery/test_k8s.py @@ -44,14 +44,16 @@ def writer_stub(): node_id="node_id", shard_count=1, available_disk=10, total_disk=10 ) ) - with patch( - "nucliadb.common.cluster.discovery.base.nodewriter_pb2_grpc.NodeWriterStub", - return_value=writer_stub, - ), patch( - "nucliadb.common.cluster.discovery.base.replication_pb2_grpc.ReplicationServiceStub", - return_value=writer_stub, - ), patch( - "nucliadb.common.cluster.discovery.base.get_traced_grpc_channel" + with ( + patch( + "nucliadb.common.cluster.discovery.base.nodewriter_pb2_grpc.NodeWriterStub", + return_value=writer_stub, + ), + patch( + "nucliadb.common.cluster.discovery.base.replication_pb2_grpc.ReplicationServiceStub", + return_value=writer_stub, + ), + patch("nucliadb.common.cluster.discovery.base.get_traced_grpc_channel"), ): yield writer_stub diff --git a/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_service.py b/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_service.py index a89fb1148c..d256a5561f 100644 --- a/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_service.py +++ b/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_service.py @@ -35,9 +35,10 @@ @pytest.fixture def cluster_settings(): settings = Settings() - with patch( - "nucliadb.common.cluster.standalone.service.cluster_settings", settings - ), tempfile.TemporaryDirectory() as tmpdir: + with ( + patch("nucliadb.common.cluster.standalone.service.cluster_settings", settings), + tempfile.TemporaryDirectory() as tmpdir, + ): settings.data_path = tmpdir os.makedirs(os.path.join(tmpdir, "shards")) yield settings diff --git a/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_utils.py b/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_utils.py index 4ec4460b71..07b84578fd 100644 --- a/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_utils.py +++ b/nucliadb/nucliadb/tests/unit/common/cluster/standalone/test_utils.py @@ -30,9 +30,10 @@ @pytest.fixture def cluster_settings(): settings = Settings() - with patch( - "nucliadb.common.cluster.standalone.utils.cluster_settings", settings - ), tempfile.TemporaryDirectory() as tmpdir: + with ( + patch("nucliadb.common.cluster.standalone.utils.cluster_settings", settings), + tempfile.TemporaryDirectory() as tmpdir, + ): settings.data_path = tmpdir yield settings @@ -52,8 +53,9 @@ def test_get_self_k8s_host(cluster_settings: Settings, monkeypatch): monkeypatch.setenv("NUCLIADB_SERVICE_HOST", "host") monkeypatch.setenv("HOSTNAME", "nucliadb-0") - with patch( - "nucliadb.common.cluster.standalone.grpc_node_binding.NodeWriter" - ), patch("nucliadb.common.cluster.standalone.grpc_node_binding.NodeReader"): + with ( + patch("nucliadb.common.cluster.standalone.grpc_node_binding.NodeWriter"), + patch("nucliadb.common.cluster.standalone.grpc_node_binding.NodeReader"), + ): # patch because loading settings validates address now assert utils.get_self().address == "nucliadb-0.nucliadb" diff --git a/nucliadb/nucliadb/tests/unit/common/cluster/test_rollover.py b/nucliadb/nucliadb/tests/unit/common/cluster/test_rollover.py index 487e7d20bd..5b743d02df 100644 --- a/nucliadb/nucliadb/tests/unit/common/cluster/test_rollover.py +++ b/nucliadb/nucliadb/tests/unit/common/cluster/test_rollover.py @@ -135,12 +135,16 @@ async def _mock_indexed_keys(kbid): mock.iter_indexed_keys = _mock_indexed_keys - with patch("nucliadb.common.cluster.rollover.datamanagers.rollover", mock), patch( - "nucliadb.common.cluster.rollover.datamanagers.with_transaction", - return_value=AsyncMock(), - ), patch( - "nucliadb.ingest.consumer.shard_creator.locking.distributed_lock", - return_value=AsyncMock(), + with ( + patch("nucliadb.common.cluster.rollover.datamanagers.rollover", mock), + patch( + "nucliadb.common.cluster.rollover.datamanagers.with_transaction", + return_value=AsyncMock(), + ), + patch( + "nucliadb.ingest.consumer.shard_creator.locking.distributed_lock", + return_value=AsyncMock(), + ), ): yield mock diff --git a/nucliadb/nucliadb/tests/unit/common/maindb/test_utils.py b/nucliadb/nucliadb/tests/unit/common/maindb/test_utils.py index 1363bb5d36..18ffe07f67 100644 --- a/nucliadb/nucliadb/tests/unit/common/maindb/test_utils.py +++ b/nucliadb/nucliadb/tests/unit/common/maindb/test_utils.py @@ -36,9 +36,11 @@ def reset_driver_utils(): @pytest.mark.asyncio async def test_setup_driver_redis(): mock = AsyncMock(initialized=False) - with patch.object(settings, "driver", "redis"), patch.object( - settings, "driver_redis_url", "driver_redis_url" - ), patch("nucliadb.common.maindb.utils.RedisDriver", return_value=mock): + with ( + patch.object(settings, "driver", "redis"), + patch.object(settings, "driver_redis_url", "driver_redis_url"), + patch("nucliadb.common.maindb.utils.RedisDriver", return_value=mock), + ): assert await setup_driver() == mock mock.initialize.assert_awaited_once() @@ -46,9 +48,11 @@ async def test_setup_driver_redis(): @pytest.mark.asyncio async def test_setup_driver_tikv(): mock = AsyncMock(initialized=False) - with patch.object(settings, "driver", "tikv"), patch.object( - settings, "driver_tikv_url", "driver_tikv_url" - ), patch("nucliadb.common.maindb.utils.TiKVDriver", return_value=mock): + with ( + patch.object(settings, "driver", "tikv"), + patch.object(settings, "driver_tikv_url", "driver_tikv_url"), + patch("nucliadb.common.maindb.utils.TiKVDriver", return_value=mock), + ): assert await setup_driver() == mock mock.initialize.assert_awaited_once() @@ -56,9 +60,11 @@ async def test_setup_driver_tikv(): @pytest.mark.asyncio async def test_setup_driver_pg(): mock = AsyncMock(initialized=False) - with patch.object(settings, "driver", "pg"), patch.object( - settings, "driver_pg_url", "driver_pg_url" - ), patch("nucliadb.common.maindb.utils.PGDriver", return_value=mock): + with ( + patch.object(settings, "driver", "pg"), + patch.object(settings, "driver_pg_url", "driver_pg_url"), + patch("nucliadb.common.maindb.utils.PGDriver", return_value=mock), + ): assert await setup_driver() == mock mock.initialize.assert_awaited_once() @@ -66,16 +72,20 @@ async def test_setup_driver_pg(): @pytest.mark.asyncio async def test_setup_driver_local(): mock = AsyncMock(initialized=False) - with patch.object(settings, "driver", "local"), patch.object( - settings, "driver_local_url", "driver_local_url" - ), patch("nucliadb.common.maindb.utils.LocalDriver", return_value=mock): + with ( + patch.object(settings, "driver", "local"), + patch.object(settings, "driver_local_url", "driver_local_url"), + patch("nucliadb.common.maindb.utils.LocalDriver", return_value=mock), + ): assert await setup_driver() == mock mock.initialize.assert_awaited_once() @pytest.mark.asyncio async def test_setup_driver_error(): - with patch.object(settings, "driver", "pg"), patch.object( - settings, "driver_pg_url", None - ), pytest.raises(ConfigurationError): + with ( + patch.object(settings, "driver", "pg"), + patch.object(settings, "driver_pg_url", None), + pytest.raises(ConfigurationError), + ): await setup_driver() diff --git a/nucliadb/nucliadb/tests/unit/http_clients/test_processing.py b/nucliadb/nucliadb/tests/unit/http_clients/test_processing.py index 603f0844fa..7bcd2ebc91 100644 --- a/nucliadb/nucliadb/tests/unit/http_clients/test_processing.py +++ b/nucliadb/nucliadb/tests/unit/http_clients/test_processing.py @@ -41,12 +41,12 @@ def test_check_status(): def test_get_processing_api_url(): - with mock.patch.object( - nuclia_settings, "nuclia_service_account", "sa" - ), mock.patch.object( - nuclia_settings, "nuclia_zone", "nuclia_zone" - ), mock.patch.object( - nuclia_settings, "nuclia_public_url", "https://{zone}.nuclia_public_url" + with ( + mock.patch.object(nuclia_settings, "nuclia_service_account", "sa"), + mock.patch.object(nuclia_settings, "nuclia_zone", "nuclia_zone"), + mock.patch.object( + nuclia_settings, "nuclia_public_url", "https://{zone}.nuclia_public_url" + ), ): assert ( processing.get_processing_api_url() diff --git a/nucliadb/nucliadb/tests/unit/test_health.py b/nucliadb/nucliadb/tests/unit/test_health.py index 296fd2bf19..ff20b4edef 100644 --- a/nucliadb/nucliadb/tests/unit/test_health.py +++ b/nucliadb/nucliadb/tests/unit/test_health.py @@ -48,8 +48,9 @@ def nats_manager(): async def test_grpc_health_check(): servicer = AsyncMock() - with patch.object(manager, "INDEX_NODES", {"node1": "node1"}), patch.object( - settings, "driver", DriverConfig.PG + with ( + patch.object(manager, "INDEX_NODES", {"node1": "node1"}), + patch.object(settings, "driver", DriverConfig.PG), ): task = asyncio.create_task(health.grpc_health_check(servicer)) await asyncio.sleep(0.05) @@ -61,8 +62,9 @@ async def test_grpc_health_check(): async def test_health_check_fail(): servicer = AsyncMock() - with patch.object(manager, "INDEX_NODES", {}), patch.object( - settings, "driver", DriverConfig.PG + with ( + patch.object(manager, "INDEX_NODES", {}), + patch.object(settings, "driver", DriverConfig.PG), ): task = asyncio.create_task(health.grpc_health_check(servicer)) await asyncio.sleep(0.05) diff --git a/nucliadb/nucliadb/tests/unit/test_purge.py b/nucliadb/nucliadb/tests/unit/test_purge.py index 82bb3a5765..9355e7e0c4 100644 --- a/nucliadb/nucliadb/tests/unit/test_purge.py +++ b/nucliadb/nucliadb/tests/unit/test_purge.py @@ -123,14 +123,12 @@ async def test_purge_kb_storage_handle_errors(keys, driver, storage): async def test_main(driver, storage): - with patch("nucliadb.purge.purge_kb", AsyncMock()) as purge_kb, patch( - "nucliadb.purge.purge_kb_storage", AsyncMock() - ) as purge_kb_storage, patch( - "nucliadb.purge.get_storage", return_value=storage - ), patch( - "nucliadb.purge.setup_driver", return_value=driver - ), patch( - "nucliadb.purge.setup_cluster", return_value=driver + with ( + patch("nucliadb.purge.purge_kb", AsyncMock()) as purge_kb, + patch("nucliadb.purge.purge_kb_storage", AsyncMock()) as purge_kb_storage, + patch("nucliadb.purge.get_storage", return_value=storage), + patch("nucliadb.purge.setup_driver", return_value=driver), + patch("nucliadb.purge.setup_cluster", return_value=driver), ): await purge.main() diff --git a/nucliadb/nucliadb/writer/tests/unit/api/v1/test_upload.py b/nucliadb/nucliadb/writer/tests/unit/api/v1/test_upload.py index ae7fd4efaa..9c29507e93 100644 --- a/nucliadb/nucliadb/writer/tests/unit/api/v1/test_upload.py +++ b/nucliadb/nucliadb/writer/tests/unit/api/v1/test_upload.py @@ -123,8 +123,12 @@ async def test_validate_field_upload(rid, field, md5, exists: bool, result): mock_uuid4.hex = "uuid4" mock_uuid.uuid4 = Mock(return_value=mock_uuid4) - with patch("nucliadb.writer.api.v1.upload.uuid", mock_uuid), patch( - "nucliadb.writer.api.v1.upload.resource_exists", AsyncMock(return_value=exists) + with ( + patch("nucliadb.writer.api.v1.upload.uuid", mock_uuid), + patch( + "nucliadb.writer.api.v1.upload.resource_exists", + AsyncMock(return_value=exists), + ), ): if isinstance(result, tuple): _, result_rid, result_field = await validate_field_upload( diff --git a/nucliadb_node/nucliadb_node/tests/unit/test_app.py b/nucliadb_node/nucliadb_node/tests/unit/test_app.py index 790f12a012..e77e0c3b85 100644 --- a/nucliadb_node/nucliadb_node/tests/unit/test_app.py +++ b/nucliadb_node/nucliadb_node/tests/unit/test_app.py @@ -27,26 +27,23 @@ async def test_main(): - with patch("nucliadb_node.app.get_storage", AsyncMock()) as storage, patch( - "nucliadb_node.app.start_worker", AsyncMock() - ) as start_worker, patch( - "nucliadb_node.app.start_nats_manager", AsyncMock() - ) as _, patch( - "nucliadb_node.app.stop_nats_manager", AsyncMock() - ) as stop_nats_manager, patch( - "nucliadb_node.app.start_indexed_publisher", AsyncMock() - ) as start_indexed_publisher, patch( - "nucliadb_node.app.start_grpc", AsyncMock() - ) as start_grpc, patch( - "nucliadb_node.app.serve_metrics", AsyncMock() - ) as serve_metrics, patch( - "nucliadb_node.app.run_until_exit", AsyncMock() - ) as run_until_exit, patch( - "nucliadb_node.app.Writer", MagicMock() - ) as writer, patch( - "nucliadb_node.app.get_storage", - AsyncMock(), - ) as storage: + with ( + patch("nucliadb_node.app.get_storage", AsyncMock()) as storage, + patch("nucliadb_node.app.start_worker", AsyncMock()) as start_worker, + patch("nucliadb_node.app.start_nats_manager", AsyncMock()) as _, + patch("nucliadb_node.app.stop_nats_manager", AsyncMock()) as stop_nats_manager, + patch( + "nucliadb_node.app.start_indexed_publisher", AsyncMock() + ) as start_indexed_publisher, + patch("nucliadb_node.app.start_grpc", AsyncMock()) as start_grpc, + patch("nucliadb_node.app.serve_metrics", AsyncMock()) as serve_metrics, + patch("nucliadb_node.app.run_until_exit", AsyncMock()) as run_until_exit, + patch("nucliadb_node.app.Writer", MagicMock()) as writer, + patch( + "nucliadb_node.app.get_storage", + AsyncMock(), + ) as storage, + ): await app.main() run_until_exit.assert_awaited_once_with( diff --git a/nucliadb_node/nucliadb_node/tests/unit/test_indexer.py b/nucliadb_node/nucliadb_node/tests/unit/test_indexer.py index fb17a0b425..1c035fecca 100644 --- a/nucliadb_node/nucliadb_node/tests/unit/test_indexer.py +++ b/nucliadb_node/nucliadb_node/tests/unit/test_indexer.py @@ -142,9 +142,10 @@ async def test_creates_an_indexer_and_a_task_per_shard( self, csi: ConcurrentShardIndexer, ): - with patch("nucliadb_node.indexer.asyncio") as asyncio_mock, patch( - "nucliadb_node.indexer.PriorityIndexer" - ) as PriorityIndexer_mock: + with ( + patch("nucliadb_node.indexer.asyncio") as asyncio_mock, + patch("nucliadb_node.indexer.PriorityIndexer") as PriorityIndexer_mock, + ): seqid = 1 n_tasks = 5 for i in range(n_tasks): diff --git a/nucliadb_node/nucliadb_node/tests/unit/test_pull.py b/nucliadb_node/nucliadb_node/tests/unit/test_pull.py index 3ccefdea09..b37491b181 100644 --- a/nucliadb_node/nucliadb_node/tests/unit/test_pull.py +++ b/nucliadb_node/nucliadb_node/tests/unit/test_pull.py @@ -92,8 +92,9 @@ def nats_manager(self): @pytest.fixture(scope="function") def worker(self, settings, nats_manager): writer = AsyncMock() - with mock.patch("nucliadb_node.pull.get_storage"), mock.patch( - "nucliadb_node.indexer.get_storage" + with ( + mock.patch("nucliadb_node.pull.get_storage"), + mock.patch("nucliadb_node.indexer.get_storage"), ): worker = Worker(writer, "node", nats_manager) worker.store_seqid = Mock() diff --git a/nucliadb_telemetry/nucliadb_telemetry/errors.py b/nucliadb_telemetry/nucliadb_telemetry/errors.py index 27cdce1fe6..b3efffc035 100644 --- a/nucliadb_telemetry/nucliadb_telemetry/errors.py +++ b/nucliadb_telemetry/nucliadb_telemetry/errors.py @@ -22,7 +22,7 @@ # abstract advanced error handling into its own module to prevent # code from handling sentry integration everywhere -from typing import Any, ContextManager, List, Optional, Literal +from typing import Any, ContextManager, List, Literal, Optional import pydantic @@ -59,9 +59,11 @@ def capture_exception(error: BaseException) -> Optional[str]: def capture_message( error_msg: str, - level: Optional[Literal['fatal', 'critical', 'error', 'warning', 'info', 'debug']] = None, + level: Optional[ + Literal["fatal", "critical", "error", "warning", "info", "debug"] + ] = None, scope: Optional[Any] = None, - **scope_args: Any + **scope_args: Any, ) -> Optional[str]: if SENTRY: return sentry_sdk.capture_message(error_msg, level, scope, **scope_args) diff --git a/nucliadb_telemetry/nucliadb_telemetry/tests/unit/test_errors.py b/nucliadb_telemetry/nucliadb_telemetry/tests/unit/test_errors.py index b150d0a4a1..f3da71d126 100644 --- a/nucliadb_telemetry/nucliadb_telemetry/tests/unit/test_errors.py +++ b/nucliadb_telemetry/nucliadb_telemetry/tests/unit/test_errors.py @@ -23,8 +23,9 @@ def test_capture_exception() -> None: - with patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, patch.object( - errors, "SENTRY", True + with ( + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + patch.object(errors, "SENTRY", True), ): ex = Exception("test") errors.capture_exception(ex) @@ -32,16 +33,18 @@ def test_capture_exception() -> None: def test_capture_exception_no_sentry() -> None: - with patch.object(errors, "SENTRY", False), patch( - "nucliadb_telemetry.errors.sentry_sdk" - ) as mock_sentry_sdk: + with ( + patch.object(errors, "SENTRY", False), + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + ): errors.capture_exception(Exception()) mock_sentry_sdk.capture_exception.assert_not_called() def test_capture_message() -> None: - with patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, patch.object( - errors, "SENTRY", True + with ( + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + patch.object(errors, "SENTRY", True), ): errors.capture_message("error_msg", "level", "scope") mock_sentry_sdk.capture_message.assert_called_once_with( @@ -50,9 +53,10 @@ def test_capture_message() -> None: def test_capture_message_no_sentry() -> None: - with patch.object(errors, "SENTRY", False), patch( - "nucliadb_telemetry.errors.sentry_sdk" - ) as mock_sentry_sdk: + with ( + patch.object(errors, "SENTRY", False), + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + ): errors.capture_message("error_msg", "level", "scope") mock_sentry_sdk.capture_message.assert_not_called() @@ -60,8 +64,9 @@ def test_capture_message_no_sentry() -> None: def test_setup_error_handling(monkeypatch): monkeypatch.setenv("sentry_url", "sentry_url") monkeypatch.setenv("environment", "environment") - with patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, patch.object( - errors, "SENTRY", True + with ( + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + patch.object(errors, "SENTRY", True), ): errors.setup_error_handling("1.0.0") mock_sentry_sdk.init.assert_called_once_with( @@ -80,8 +85,9 @@ def test_setup_error_handling_no_sentry(monkeypatch): def test_push_scope() -> None: - with patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, patch.object( - errors, "SENTRY", True + with ( + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + patch.object(errors, "SENTRY", True), ): with errors.push_scope() as scope: scope.set_extra("key", "value") @@ -89,8 +95,9 @@ def test_push_scope() -> None: def test_push_scope_no_sentry() -> None: - with patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, patch.object( - errors, "SENTRY", False + with ( + patch("nucliadb_telemetry.errors.sentry_sdk") as mock_sentry_sdk, + patch.object(errors, "SENTRY", False), ): with errors.push_scope() as scope: scope.set_extra("key", "value") diff --git a/pyproject.toml b/pyproject.toml index fde757b1e2..03be535e6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,4 +50,4 @@ dev = [ [project] name = "nucliadb_workspace" requires-python = ">=3.9" -version = "0.0.0" \ No newline at end of file +version = "0.0.0"