diff --git a/config/notebooks/quick.json b/config/notebooks/quick.json new file mode 100644 index 0000000..5f4f04c --- /dev/null +++ b/config/notebooks/quick.json @@ -0,0 +1,28 @@ +{ +"notebooks": [ +{ +"name": "GaiaDMPSetup", +"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/GaiaDMP_validation.json", +"totaltime": 45, +"results": [] +}, +{ +"name": "Mean_proper_motions_over_the_sky", +"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/public_examples/Mean_proper_motions_over_the_sky.json", +"totaltime": 125, +"results": [] +}, +{ +"name": "Source_counts_over_the_sky.json", +"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/public_examples/Source_counts_over_the_sky.json", +"totaltime": 55, +"results": [] +}, +{ +"name": "Library_Validation.json", +"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/Library_validation.json", +"totaltime": 10, +"results": [] +} +] +} diff --git a/gdmp_benchmark/__init__.py b/gdmp_benchmark/__init__.py index 806600f..4c820c4 100644 --- a/gdmp_benchmark/__init__.py +++ b/gdmp_benchmark/__init__.py @@ -2,4 +2,4 @@ GDMP Benchmarker package Used to run benchmarks for the Gaia Data Mining platform """ -from .gdmp_benchmark import GDMPBenchmarker, Results, Timing, Notebook, Status +from .gdmp_benchmark import Results, Timing, Notebook, Status, GDMPBenchmarker diff --git a/gdmp_benchmark/gdmp_benchmark.py b/gdmp_benchmark/gdmp_benchmark.py index db1849e..b0f00c5 100644 --- a/gdmp_benchmark/gdmp_benchmark.py +++ b/gdmp_benchmark/gdmp_benchmark.py @@ -17,7 +17,6 @@ from multiprocessing import Pool, current_process from typing import List, Dict, Protocol from dataclasses import dataclass, field, fields -from pprint import pprint import simplejson as json from simplejson.errors import JSONDecodeError import requests @@ -158,7 +157,7 @@ def __repr__(self): def to_json(self): """Return as json""" return { - "result": self.result, + "result": self.result.to_json(), "elapsed": f"{self.totaltime:.2f}", "percent": self.percent_change, "start": self.start, @@ -249,17 +248,16 @@ def __repr__(self): def to_json(self) -> str: """Convert to JSON""" - return json.dumps( + return json.dumps(str( { "name": self.name, "result": str(self.result), "outputs": self.outputs, "messages": self.messages, - "time": self.time, + "time": self.time.to_json(), "logs": self.logs, - }, - indent=4, - ) + } + )) class AlertStrategies(Enum): @@ -810,7 +808,7 @@ def _run_single( delay_start: int = 0, delay_notebook: int = 0, delete: bool = True, - ): + ) -> list: """ Run a single instance of the benchmark test Args: @@ -821,7 +819,7 @@ def _run_single( delay_notebook: Delay to the start of the notebook in seconds delete: Whether to delete the notebooks after the run Returns: - dict: The results + list: The results """ results = [] @@ -971,6 +969,7 @@ def main(args: List[str] = None): print("}") print("---start---") + results = GDMPBenchmarker( userconfig=user_config, zeppelin_url=zeppelin_url, @@ -986,8 +985,7 @@ def main(args: List[str] = None): alerter.send_alert( content=results, alert_strategy=AlertStrategies.ONLY_ON_ERROR ) - - pprint(results) + print(json.dumps(results, default=lambda o: o.to_dict(), indent=4)) print("---end---") diff --git a/tests/test_gdmp_benchmarker.py b/tests/test_gdmp_benchmarker.py index 65264eb..9c15584 100644 --- a/tests/test_gdmp_benchmarker.py +++ b/tests/test_gdmp_benchmarker.py @@ -1,6 +1,5 @@ import unittest from gdmp_benchmark import GDMPBenchmarker, Results, Timing, Notebook, Status -import unittest.mock as mock """ The GDMPBenchmarker class is responsible for benchmarking Zeppelin notebooks. It allows users to run notebooks and compare their output against expected output. The class can run notebooks in parallel, and it can delete the notebooks after they have been run. The class also generates user configurations for Zeppelin, and it can validate the configuration passed in by the user. diff --git a/tests/test_models.py b/tests/test_models.py index 0b61067..3503285 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -49,7 +49,7 @@ def test_results_to_dict(self): # Tests that a Results object cannot be created with invalid values for attributes. def test_results_invalid_attributes(self): with self.assertRaises(TypeError): - result = Results(result="success", msg="Test successful", output=["test output"], notebookid="1234", + Results(result="success", msg="Test successful", output=["test output"], notebookid="1234", user_config="test config", messages=["test message"], logs="test logs", time=Timing(result="success", totaltime="10s", start="2022-01-01 00:00:00", finish="2022-01-01 00:00:10"), outputs={"test": "output"}, name=1234) @@ -101,7 +101,7 @@ def test_timing_to_string(self): # Tests creating a Timing object with a negative totaltime. def test_timing_negative_totaltime(self): with self.assertRaises(ValueError): - timing = Timing(result=Status.PASS, totaltime=-10, start="2022-01-01 00:00:00", finish="2022-01-01 00:00:10") + Timing(result=Status.PASS, totaltime=-10, start="2022-01-01 00:00:00", finish="2022-01-01 00:00:10") # Tests that the percent_change attribute is calculated correctly based on expected and actual totaltime. def test_percent_change(self):