Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Small fixes and improvements #2343

Merged
merged 6 commits into from
Jul 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions nucliadb/src/nucliadb/common/maindb/pg.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,9 +275,10 @@ async def initialize(self):

async def finalize(self):
async with self._lock:
await self.pool.close()
self.initialized = False
self.metrics_task.cancel()
if self.initialized:
await self.pool.close()
self.initialized = False
self.metrics_task.cancel()

async def _report_metrics_task(self):
while True:
Expand Down
17 changes: 15 additions & 2 deletions nucliadb/tests/ndbfixtures/maindb.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,21 @@ async def maindb_driver(request: FixtureRequest) -> AsyncIterator[Driver]:
async def pg_maindb_settings(pg):
url = f"postgresql://postgres:postgres@{pg[0]}:{pg[1]}/postgres"

# We want to be sure schema migrations are always run. As some tests use
# this fixture and create their own driver, we need to create one here and
# run the migrations, so the maindb_settings fixture can still be generic
# and pg migrations are run
driver = PGDriver(url=url, connection_pool_min_size=2, connection_pool_max_size=2)
await driver.initialize()
await run_pg_schema_migrations(driver)
await driver.finalize()

return DriverSettings(
yield DriverSettings(
jotare marked this conversation as resolved.
Show resolved Hide resolved
driver=DriverConfig.PG,
driver_pg_url=url,
driver_pg_connection_pool_min_size=10,
driver_pg_connection_pool_max_size=10,
driver_pg_connection_pool_acquire_timeout_ms=200,
)


Expand All @@ -129,7 +137,12 @@ async def pg_maindb_driver(pg_maindb_settings: DriverSettings):
await cur.execute("DROP table IF EXISTS resources")
await cur.execute("DROP table IF EXISTS catalog")

driver = PGDriver(url=url)
driver = PGDriver(
url=url,
connection_pool_min_size=pg_maindb_settings.driver_pg_connection_pool_min_size,
connection_pool_max_size=pg_maindb_settings.driver_pg_connection_pool_max_size,
acquire_timeout_ms=pg_maindb_settings.driver_pg_connection_pool_acquire_timeout_ms,
)
await driver.initialize()
await run_pg_schema_migrations(driver)

Expand Down
5 changes: 3 additions & 2 deletions nucliadb/tests/ndbfixtures/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from typing import Callable, Optional

from fastapi import FastAPI
from httpx import AsyncClient
from httpx import ASGITransport, AsyncClient

from nucliadb.search import API_PREFIX

Expand All @@ -41,7 +41,8 @@ def _make_client_fixture(
if root is False:
client_base_url = f"{client_base_url}/{API_PREFIX}/v{version}"

client = AsyncClient(app=application, base_url=client_base_url)
transport = ASGITransport(app=application) # type: ignore
client = AsyncClient(transport=transport, base_url=client_base_url)
client.headers["X-NUCLIADB-ROLES"] = ";".join([role.value for role in roles])
client.headers["X-NUCLIADB-USER"] = user

Expand Down
57 changes: 27 additions & 30 deletions nucliadb/tests/reader/benchmarks/test_benchmarks_resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from httpx import AsyncClient

from nucliadb.ingest.orm.resource import Resource
from nucliadb.reader.api.v1.router import KB_PREFIX, RESOURCE_PREFIX
from nucliadb_utils.tests.asyncbenchmark import AsyncBenchmarkFixture


Expand All @@ -38,36 +39,32 @@
@pytest.mark.deploy_modes("component")
async def test_get_resource_all(
nucliadb_reader: AsyncClient,
test_resource: Resource,
full_resource: Resource,
asyncbenchmark: AsyncBenchmarkFixture,
) -> None:
# resource = full_resource
# kbid = resource.kb.kbid
# rid = resource.uuid
resource = full_resource
kbid = resource.kb.kbid
rid = resource.uuid

# resp = await asyncbenchmark(
# nucliadb_reader.get,
# f"/{KB_PREFIX}/{kbid}/{RESOURCE_PREFIX}/{rid}",
# params={
# "show": ["basic", "origin", "relations", "values", "extracted"],
# "field_type": [
# "text",
# "link",
# "file",
# "conversation",
# ],
# "extracted": [
# "metadata",
# "vectors",
# "large_metadata",
# "text",
# "link",
# "file",
# ],
# },
# )
# assert resp.status_code == 200
async def function(a):
return a

assert await asyncbenchmark(function, 10) == 10
resp = await asyncbenchmark(
nucliadb_reader.get,
f"/{KB_PREFIX}/{kbid}/{RESOURCE_PREFIX}/{rid}",
params={
"show": ["basic", "origin", "relations", "values", "extracted"],
"field_type": [
"text",
"link",
"file",
"conversation",
],
"extracted": [
"metadata",
"vectors",
"large_metadata",
"text",
"link",
"file",
],
},
)
assert resp.status_code == 200
2 changes: 1 addition & 1 deletion nucliadb_utils/src/nucliadb_utils/audit/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ async def report_and_send(
):
logger.debug(f"AUDIT {audit_type} {kbid} {user} {origin} {rid} {audit_fields}")

async def visited(
def visited(
self,
kbid: str,
uuid: str,
Expand Down
Loading