diff --git a/qcfractal/qcfractal/alembic/versions/2024-05-06-73b4838a6839_remove_server_stats_log.py b/qcfractal/qcfractal/alembic/versions/2024-05-06-73b4838a6839_remove_server_stats_log.py new file mode 100644 index 000000000..6d68a8b6d --- /dev/null +++ b/qcfractal/qcfractal/alembic/versions/2024-05-06-73b4838a6839_remove_server_stats_log.py @@ -0,0 +1,52 @@ +"""Remove server stats log + +Revision ID: 73b4838a6839 +Revises: 75b80763e901 +Create Date: 2024-05-06 10:54:44.383709 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "73b4838a6839" +down_revision = "75b80763e901" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("ix_server_stats_log_timestamp", table_name="server_stats_log", postgresql_using="brin") + op.drop_table("server_stats_log") + + op.execute("DELETE FROM internal_jobs WHERE name = 'update_server_stats'") + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "server_stats_log", + sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False), + sa.Column("timestamp", postgresql.TIMESTAMP(timezone=True), autoincrement=False, nullable=False), + sa.Column("collection_count", sa.INTEGER(), autoincrement=False, nullable=True), + sa.Column("molecule_count", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("record_count", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("outputstore_count", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("access_count", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("db_total_size", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("db_table_size", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("db_index_size", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("db_table_information", postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True), + sa.Column("error_count", sa.BIGINT(), autoincrement=False, nullable=True), + sa.Column("service_queue_status", postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True), + sa.Column("task_queue_status", postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True), + sa.PrimaryKeyConstraint("id", name="server_stats_log_pkey"), + ) + op.create_index( + "ix_server_stats_log_timestamp", "server_stats_log", ["timestamp"], unique=False, postgresql_using="brin" + ) + # ### end Alembic commands ### diff --git a/qcfractal/qcfractal/components/serverinfo/db_models.py b/qcfractal/qcfractal/components/serverinfo/db_models.py index 6ca319322..b0e764148 100644 --- a/qcfractal/qcfractal/components/serverinfo/db_models.py +++ b/qcfractal/qcfractal/components/serverinfo/db_models.py @@ -110,40 +110,6 @@ def model_dict(self, exclude: Optional[Iterable[str]] = None) -> Dict[str, Any]: return d -class ServerStatsLogORM(BaseORM): - """ - Table for storing server statistics - - Server statistics (storage size, row count, etc) are periodically captured and - stored in this table - """ - - __tablename__ = "server_stats_log" - - id = Column(Integer, primary_key=True) - timestamp = Column(TIMESTAMP(timezone=True), nullable=False, default=now_at_utc) - - # Raw counts - collection_count = Column(Integer) - molecule_count = Column(BigInteger) - record_count = Column(BigInteger) - outputstore_count = Column(BigInteger) - access_count = Column(BigInteger) - error_count = Column(BigInteger) - - # Task & service queue status - task_queue_status = Column(JSON) - service_queue_status = Column(JSON) - - # Database - db_total_size = Column(BigInteger) - db_table_size = Column(BigInteger) - db_index_size = Column(BigInteger) - db_table_information = Column(JSON) - - __table_args__ = (Index("ix_server_stats_log_timestamp", "timestamp", postgresql_using="brin"),) - - class MessageOfTheDayORM(BaseORM): """ Table for storing the Message-of-the-Day diff --git a/qcfractal/qcfractal/components/serverinfo/routes.py b/qcfractal/qcfractal/components/serverinfo/routes.py index e6f1da525..73f6dc1a6 100644 --- a/qcfractal/qcfractal/components/serverinfo/routes.py +++ b/qcfractal/qcfractal/components/serverinfo/routes.py @@ -7,7 +7,6 @@ from qcportal.serverinfo import ( AccessLogSummaryFilters, AccessLogQueryFilters, - ServerStatsQueryFilters, ErrorLogQueryFilters, DeleteBeforeDateBody, ) @@ -68,20 +67,6 @@ def query_access_summary_v1(url_params: AccessLogSummaryFilters): return storage_socket.serverinfo.query_access_summary(url_params) -@api_v1.route("/server_stats/query", methods=["POST"]) -@wrap_route("READ") -def query_server_stats_v1(body_data: ServerStatsQueryFilters): - max_limit = current_app.config["QCFRACTAL_CONFIG"].api_limits.get_server_stats - body_data.limit = calculate_limit(max_limit, body_data.limit) - return storage_socket.serverinfo.query_server_stats(body_data) - - -@api_v1.route("/server_stats/bulkDelete", methods=["POST"]) -@wrap_route("DELETE") -def delete_server_stats_v1(body_data: DeleteBeforeDateBody): - return storage_socket.serverinfo.delete_server_stats(before=body_data.before) - - @api_v1.route("/server_errors/query", methods=["POST"]) @wrap_route("READ") def query_error_log_v1(body_data: ErrorLogQueryFilters): diff --git a/qcfractal/qcfractal/components/serverinfo/socket.py b/qcfractal/qcfractal/components/serverinfo/socket.py index 7300205d7..682cd865f 100644 --- a/qcfractal/qcfractal/components/serverinfo/socket.py +++ b/qcfractal/qcfractal/components/serverinfo/socket.py @@ -11,25 +11,19 @@ from typing import TYPE_CHECKING import requests -from sqlalchemy import and_, or_, func, text, select, delete +from sqlalchemy import and_, or_, func, select, delete from sqlalchemy.orm import load_only import qcfractal from qcfractal.components.auth.db_models import UserIDMapSubquery -from qcfractal.components.dataset_db_models import BaseDatasetORM -from qcfractal.components.molecules.db_models import MoleculeORM -from qcfractal.components.record_db_models import BaseRecordORM, OutputStoreORM -from qcfractal.components.services.db_models import ServiceQueueORM -from qcfractal.components.tasks.db_models import TaskQueueORM from qcfractal.db_socket.helpers import get_query_proj_options from qcportal.serverinfo import ( AccessLogQueryFilters, AccessLogSummaryFilters, ErrorLogQueryFilters, - ServerStatsQueryFilters, ) from qcportal.utils import now_at_utc -from .db_models import AccessLogORM, InternalErrorLogORM, ServerStatsLogORM, MessageOfTheDayORM, ServerStatsMetadataORM +from .db_models import AccessLogORM, InternalErrorLogORM, MessageOfTheDayORM, ServerStatsMetadataORM if TYPE_CHECKING: from sqlalchemy.orm.session import Session @@ -54,7 +48,6 @@ class ServerInfoSocket: def __init__(self, root_socket: SQLAlchemySocket): self.root_socket = root_socket self._logger = logging.getLogger(__name__) - self._server_stats_frequency = root_socket.qcf_config.statistics_frequency self._geoip2_dir = root_socket.qcf_config.geoip2_dir self._geoip2_file_path = os.path.join(self._geoip2_dir, root_socket.qcf_config.geoip2_filename) @@ -79,40 +72,12 @@ def __init__(self, root_socket: SQLAlchemySocket): "GeoIP2 package not found. To include locations in access logs, install the geoip2 package" ) - # Server stats job. Don't do it right at startup - self.add_internal_job_server_stats(self._server_stats_frequency) - # Updating the geolocation database file self.add_internal_job_update_geoip2(0.0) # Updating the access log with geolocation info. Don't do it right at startup self.add_internal_job_geolocate_accesses(self._geolocate_accesses_frequency) - def add_internal_job_server_stats(self, delay: float, *, session: Optional[Session] = None): - """ - Adds an internal job to update the server statistics - - Parameters - ---------- - delay - Schedule for this many seconds in the future - session - An existing SQLAlchemy session to use. If None, one will be created. If an existing session - is used, it will be flushed (but not committed) before returning from this function. - """ - with self.root_socket.optional_session(session) as session: - self.root_socket.internal_jobs.add( - "update_server_stats", - now_at_utc() + timedelta(seconds=delay), - "serverinfo.update_server_stats", - {}, - user_id=None, - unique_name=True, - after_function="serverinfo.add_internal_job_server_stats", - after_function_kwargs={"delay": self._server_stats_frequency}, - session=session, - ) - def add_internal_job_update_geoip2(self, delay: float, *, session: Optional[Session] = None): """ Adds an internal job to update the geoip database @@ -386,92 +351,6 @@ def save_error(self, error_data: Dict[str, Any], *, session: Optional[Session] = session.flush() return log.id - def update_server_stats(self, session: Session, job_progress: JobProgress) -> None: - """ - Obtains some statistics about the server and stores them in the database - - Parameters - ---------- - session - An existing SQLAlchemy session to use. If None, one will be created. If an existing session - is used, it will be flushed (but not committed) before returning from this function. - """ - - table_list = [BaseDatasetORM, MoleculeORM, BaseRecordORM, OutputStoreORM, AccessLogORM, InternalErrorLogORM] - db_name = self.root_socket.qcf_config.database.database_name - - table_counts = {} - with self.root_socket.optional_session(session) as session: - # total size of the database - db_size = session.execute(text("SELECT pg_database_size(:dbname)"), {"dbname": db_name}).scalar() - - # Count the number of rows in each table - for table in table_list: - table_name = table.__tablename__ - table_counts[table_name] = session.execute(text(f"SELECT count(*) FROM {table_name}")).scalar() - - table_info_sql = f""" - SELECT relname AS table_name - , c.reltuples::BIGINT AS row_estimate - , pg_total_relation_size(c.oid) AS total_bytes - , pg_indexes_size(c.oid) AS index_bytes - , pg_total_relation_size(reltoastrelid) AS toast_bytes - FROM pg_class c - LEFT JOIN pg_namespace n ON n.oid = c.relnamespace - WHERE relkind = 'r' AND relname NOT LIKE 'pg_%' AND relname NOT LIKE 'sql_%'; - """ - - table_info_result = session.execute(text(table_info_sql)).fetchall() - - table_info_rows = [list(r) for r in table_info_result] - table_info = { - "columns": ["table_name", "row_estimate", "total_bytes", "index_bytes", "toast_bytes"], - "rows": table_info_rows, - } - - # Task queue and Service queue status - task_query = ( - session.query(BaseRecordORM.record_type, BaseRecordORM.status, func.count(TaskQueueORM.id)) - .join(BaseRecordORM, BaseRecordORM.id == TaskQueueORM.record_id) - .group_by(BaseRecordORM.record_type, BaseRecordORM.status) - .all() - ) - task_stats = {"columns": ["record_type", "status", "count"], "rows": [list(r) for r in task_query]} - - service_query = ( - session.query(BaseRecordORM.record_type, BaseRecordORM.status, func.count(ServiceQueueORM.id)) - .join(BaseRecordORM, BaseRecordORM.id == ServiceQueueORM.record_id) - .group_by(BaseRecordORM.record_type, BaseRecordORM.status) - .all() - ) - service_stats = {"columns": ["record_type", "status", "count"], "rows": [list(r) for r in service_query]} - - # Calculate combined table info - table_size = 0 - index_size = 0 - for row in table_info_rows: - table_size += row[2] - row[3] - (row[4] or 0) - index_size += row[3] - - # Build out final data - data = { - "collection_count": table_counts[BaseDatasetORM.__tablename__], - "molecule_count": table_counts[MoleculeORM.__tablename__], - "record_count": table_counts[BaseRecordORM.__tablename__], - "outputstore_count": table_counts[OutputStoreORM.__tablename__], - "access_count": table_counts[AccessLogORM.__tablename__], - "error_count": table_counts[InternalErrorLogORM.__tablename__], - "task_queue_status": task_stats, - "service_queue_status": service_stats, - "db_total_size": db_size, - "db_table_size": table_size, - "db_index_size": index_size, - "db_table_information": table_info, - } - - log = ServerStatsLogORM(**data) - session.add(log) - def query_access_log( self, query_data: AccessLogQueryFilters, @@ -697,54 +576,6 @@ def query_error_log( return result_dicts - def query_server_stats( - self, - query_data: ServerStatsQueryFilters, - *, - session: Optional[Session] = None, - ) -> List[Dict[str, Any]]: - """ - General query of server statistics - - All search criteria are merged via 'and'. Therefore, records will only - be found that match all the criteria. - - Parameters - ---------- - query_data - Fields/filters to query for - session - An existing SQLAlchemy session to use. If None, one will be created. If an existing session - is used, it will be flushed (but not committed) before returning from this function. - - Returns - ------- - : - A list of server statistic entries (as dictionaries) that were found in the database. - """ - - and_query = [] - if query_data.before: - and_query.append(ServerStatsLogORM.timestamp <= query_data.before) - if query_data.after: - and_query.append(ServerStatsLogORM.timestamp >= query_data.after) - - with self.root_socket.optional_session(session, True) as session: - stmt = select(ServerStatsLogORM).filter(and_(True, *and_query)) - - if query_data.cursor is not None: - stmt = stmt.where(ServerStatsLogORM.id < query_data.cursor) - - stmt = stmt.order_by(ServerStatsLogORM.id.desc()) - stmt = stmt.limit(query_data.limit) - stmt = stmt.distinct(ServerStatsLogORM.id) - results = session.execute(stmt).scalars().all() - - # TODO - could be done in sql query (with subquery?) - result_dicts = [x.model_dict() for x in sorted(results, key=lambda x: x.timestamp, reverse=True)] - - return result_dicts - def delete_access_logs(self, before: datetime, *, session: Optional[Session] = None) -> int: """ Deletes access logs that were created before a certain date & time @@ -788,25 +619,3 @@ def delete_error_logs(self, before: datetime, *, session: Optional[Session] = No stmt = delete(InternalErrorLogORM).where(InternalErrorLogORM.error_date < before) r = session.execute(stmt) return r.rowcount - - def delete_server_stats(self, before: datetime, *, session: Optional[Session] = None) -> int: - """ - Deletes server statistics that were created before a certain date & time - - Parameters - ---------- - before - Delete server stats before this time - session - An existing SQLAlchemy session to use. If None, one will be created. If an existing session - is used, it will be flushed (but not committed) before returning from this function. - - Returns - ------- - The number of deleted entries - """ - - with self.root_socket.optional_session(session, False) as session: - stmt = delete(ServerStatsLogORM).where(ServerStatsLogORM.timestamp < before) - r = session.execute(stmt) - return r.rowcount diff --git a/qcfractal/qcfractal/components/serverinfo/test_stats_client.py b/qcfractal/qcfractal/components/serverinfo/test_stats_client.py deleted file mode 100644 index 09288d549..000000000 --- a/qcfractal/qcfractal/components/serverinfo/test_stats_client.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations, annotations - -from typing import TYPE_CHECKING - -from qcfractal.testing_helpers import DummyJobProgress -from qcportal.utils import now_at_utc - -if TYPE_CHECKING: - from qcarchivetesting.testing_classes import QCATestingSnowflake - - -def test_serverinfo_client_delete_stats(snowflake: QCATestingSnowflake): - storage_socket = snowflake.get_storage_socket() - snowflake_client = snowflake.client() - - time_0 = now_at_utc() - with storage_socket.session_scope() as session: - storage_socket.serverinfo.update_server_stats(session=session, job_progress=DummyJobProgress()) - time_12 = now_at_utc() - - with storage_socket.session_scope() as session: - storage_socket.serverinfo.update_server_stats(session=session, job_progress=DummyJobProgress()) - - query_res = snowflake_client.query_server_stats() - stats = list(query_res) - assert len(stats) == 2 - - n_deleted = snowflake_client.delete_server_stats(before=time_0) - assert n_deleted == 0 - - n_deleted = snowflake_client.delete_server_stats(before=time_12) - assert n_deleted == 1 - - query_res = snowflake_client.query_server_stats() - stats2 = list(query_res) - assert len(stats2) == 1 - assert stats2[0] == stats[0] diff --git a/qcfractal/qcfractal/components/serverinfo/test_stats_client_query.py b/qcfractal/qcfractal/components/serverinfo/test_stats_client_query.py deleted file mode 100644 index 147ac78a7..000000000 --- a/qcfractal/qcfractal/components/serverinfo/test_stats_client_query.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import annotations - -import pytest - -from qcarchivetesting.testing_classes import QCATestingSnowflake -from qcfractal.testing_helpers import DummyJobProgress - - -@pytest.fixture(scope="module") -def queryable_stats_client(postgres_server, pytestconfig): - pg_harness = postgres_server.get_new_harness("serverinfo_test_stats") - encoding = pytestconfig.getoption("--client-encoding") - - # Don't log accesses - with QCATestingSnowflake(pg_harness, encoding, log_access=False) as server: - # generate a bunch of test data - storage_socket = server.get_storage_socket() - with storage_socket.session_scope() as session: - for i in range(100): - storage_socket.serverinfo.update_server_stats(session=session, job_progress=DummyJobProgress()) - - yield server.client() - - -def test_serverinfo_client_query_stats(queryable_stats_client): - query_res = queryable_stats_client.query_server_stats() - stats = list(query_res) - - test_time = stats[21].timestamp - query_res = queryable_stats_client.query_server_stats(before=test_time) - stats = list(query_res) - assert len(stats) == 79 - - query_res = queryable_stats_client.query_server_stats(after=test_time) - stats = list(query_res) - assert len(stats) == 22 - - -def test_serverinfo_client_query_stats_empty_iter(queryable_stats_client): - query_res = queryable_stats_client.query_server_stats() - assert len(query_res._current_batch) < queryable_stats_client.api_limits["get_server_stats"] - - stats = list(query_res) - assert len(stats) == 100 - - -def test_serverinfo_client_query_stats_limit(queryable_stats_client): - query_res = queryable_stats_client.query_server_stats(limit=77) - stats = list(query_res) - assert len(stats) == 77 diff --git a/qcfractal/qcfractal/components/serverinfo/test_stats_socket.py b/qcfractal/qcfractal/components/serverinfo/test_stats_socket.py deleted file mode 100644 index a1f5360f2..000000000 --- a/qcfractal/qcfractal/components/serverinfo/test_stats_socket.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -from qcfractal.testing_helpers import DummyJobProgress -from qcportal.serverinfo import ServerStatsQueryFilters -from qcportal.utils import now_at_utc - -if TYPE_CHECKING: - from qcfractal.db_socket import SQLAlchemySocket - - -def test_serverinfo_socket_update_stats(storage_socket: SQLAlchemySocket): - stats = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters()) - assert len(stats) == 0 - - time_0 = now_at_utc() - - # Force saving the stats - with storage_socket.session_scope() as session: - storage_socket.serverinfo.update_server_stats(session=session, job_progress=DummyJobProgress()) - - time_1 = now_at_utc() - - stats = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters()) - assert len(stats) == 1 - - assert stats[0]["molecule_count"] == 0 - assert stats[0]["outputstore_count"] == 0 - assert stats[0]["record_count"] == 0 - - time_0 = now_at_utc() - - # Force saving the stats again - with storage_socket.session_scope() as session: - storage_socket.serverinfo.update_server_stats(session=session, job_progress=DummyJobProgress()) - - # Should get the latest now - stats2 = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters()) - assert len(stats2) == 2 - - # Should return newest first - assert stats2[1] == stats[0] - assert stats2[0]["timestamp"] > time_0 - assert stats2[1]["timestamp"] < time_0 - - # one more update - with storage_socket.session_scope() as session: - storage_socket.serverinfo.update_server_stats(session=session, job_progress=DummyJobProgress()) - - stats = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters(before=now_at_utc())) - assert len(stats) == 3 - - stats = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters(before=now_at_utc(), after=time_1)) - assert len(stats) == 2 - - stats = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters(before=time_1)) - assert len(stats) == 1 diff --git a/qcfractal/qcfractal/test_periodics.py b/qcfractal/qcfractal/test_periodics.py index fad2beb80..1deaa9329 100644 --- a/qcfractal/qcfractal/test_periodics.py +++ b/qcfractal/qcfractal/test_periodics.py @@ -9,8 +9,6 @@ from qcfractal.components.torsiondrive.testing_helpers import submit_test_data as submit_td_test_data from qcportal.managers import ManagerName, ManagerStatusEnum from qcportal.record_models import RecordStatusEnum -from qcportal.serverinfo import ServerStatsQueryFilters -from qcportal.utils import now_at_utc if TYPE_CHECKING: from qcarchivetesting.testing_classes import QCATestingSnowflake @@ -18,28 +16,6 @@ pytestmark = pytest.mark.slow -def test_periodics_server_stats(snowflake: QCATestingSnowflake): - storage_socket = snowflake.get_storage_socket() - - stats = storage_socket.serverinfo.query_server_stats(ServerStatsQueryFilters()) - assert len(stats) == 0 - - sleep_time = snowflake._qcf_config.statistics_frequency - - snowflake.start_job_runner() - time.sleep(sleep_time * 0.8) - - for i in range(5): - time_0 = now_at_utc() - time.sleep(sleep_time) - time_1 = now_at_utc() - - filters = ServerStatsQueryFilters(before=time_1, after=time_0) - stats = storage_socket.serverinfo.query_server_stats(filters) - assert len(stats) == 1 - assert time_0 < stats[0]["timestamp"] < time_1 - - def test_periodics_manager_heartbeats(snowflake: QCATestingSnowflake): storage_socket = snowflake.get_storage_socket() diff --git a/qcportal/qcportal/client.py b/qcportal/qcportal/client.py index 8937f757c..f32c11a84 100644 --- a/qcportal/qcportal/client.py +++ b/qcportal/qcportal/client.py @@ -101,8 +101,6 @@ AccessLogQueryIterator, ErrorLogQueryFilters, ErrorLogQueryIterator, - ServerStatsQueryFilters, - ServerStatsQueryIterator, DeleteBeforeDateBody, ) from .utils import make_list, chunk_iterable, process_chunk_iterable @@ -2561,59 +2559,6 @@ def query_managers( filter_data = ManagerQueryFilters(**filter_dict) return ManagerQueryIterator(self, filter_data) - ############################################################## - # Server statistics and logs - ############################################################## - - def query_server_stats( - self, - *, - before: Optional[Union[datetime, str]] = None, - after: Optional[Union[datetime, str]] = None, - limit: Optional[int] = None, - ) -> ServerStatsQueryIterator: - """ - Query server statistics - - These statistics are captured at certain times, and are available for querying (as long - as they are not deleted) - - Parameters - ---------- - before - Return statistics captured before the specified date/time - after - Return statistics captured after the specified date/time - limit - The maximum number of statistics entries to return. Note that the server limit is always obeyed. - - Returns - ------- - : - An iterator that can be used to retrieve the results of the query - """ - - filter_data = ServerStatsQueryFilters(before=before, after=after, limit=limit) - return ServerStatsQueryIterator(self, filter_data) - - def delete_server_stats(self, before: datetime) -> int: - """ - Delete server statistics from the server - - Parameters - ---------- - before - Delete statistics captured before the given date/time - - Returns - ------- - : - The number of statistics entries deleted from the server - """ - - body = DeleteBeforeDateBody(before=before) - return self.make_request("post", "api/v1/server_stats/bulkDelete", int, body=body) - def query_access_log( self, *, diff --git a/qcportal/qcportal/serverinfo/__init__.py b/qcportal/qcportal/serverinfo/__init__.py index ee4d69721..587dbac5e 100644 --- a/qcportal/qcportal/serverinfo/__init__.py +++ b/qcportal/qcportal/serverinfo/__init__.py @@ -9,7 +9,4 @@ ErrorLogQueryFilters, ErrorLogEntry, ErrorLogQueryIterator, - ServerStatsQueryFilters, - ServerStatsEntry, - ServerStatsQueryIterator, ) diff --git a/qcportal/qcportal/serverinfo/models.py b/qcportal/qcportal/serverinfo/models.py index 6311e8e3d..90522be09 100644 --- a/qcportal/qcportal/serverinfo/models.py +++ b/qcportal/qcportal/serverinfo/models.py @@ -193,74 +193,4 @@ def _request(self) -> List[ErrorLogEntry]: "api/v1/server_errors/query", List[ErrorLogEntry], body=self._query_filters, - ) - - -class ServerStatsQueryFilters(QueryModelBase): - before: Optional[datetime] = None - after: Optional[datetime] = None - - @validator("before", "after", pre=True) - def validate_lists(cls, v): - return validate_list_to_single(v) - - @validator("before", "after", pre=True) - def parse_dates(cls, v): - if isinstance(v, str): - return date_parser(v) - return v - - -class ServerStatsEntry(BaseModel): - class Config: - extra = Extra.forbid - - id: int - timestamp: datetime - - collection_count: Optional[int] - molecule_count: Optional[int] - record_count: Optional[int] - outputstore_count: Optional[int] - access_count: Optional[int] - error_count: Optional[int] - - task_queue_status: Optional[Dict[str, Any]] - service_queue_status: Optional[Dict[str, Any]] - - db_total_size: Optional[int] - db_table_size: Optional[int] - db_index_size: Optional[int] - db_table_information: Dict[str, Any] - - -class ServerStatsQueryIterator(QueryIteratorBase[ServerStatsEntry]): - """ - Iterator for server statistics queries - - This iterator transparently handles batching and pagination over the results - of a server statistics query. - """ - - def __init__(self, client, query_filters: ServerStatsQueryFilters): - """ - Construct an iterator - - Parameters - ---------- - client - QCPortal client object used to contact/retrieve data from the server - query_filters - The actual query information to send to the server - """ - - batch_limit = client.api_limits["get_server_stats"] // 4 - QueryIteratorBase.__init__(self, client, query_filters, batch_limit) - - def _request(self) -> List[ServerStatsEntry]: - return self._client.make_request( - "post", - "api/v1/server_stats/query", - List[ServerStatsEntry], - body=self._query_filters, - ) + ) \ No newline at end of file