Skip to content

Commit

Permalink
Merge pull request #56 from stvoutsin/main
Browse files Browse the repository at this point in the history
Fixed Benchmarker formatting, tests & timing value for quick test
  • Loading branch information
stvoutsin authored Nov 22, 2023
2 parents 0b8afd3 + 203666b commit 4702731
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 15 deletions.
28 changes: 28 additions & 0 deletions config/notebooks/quick.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
"notebooks": [
{
"name": "GaiaDMPSetup",
"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/GaiaDMP_validation.json",
"totaltime": 45,
"results": []
},
{
"name": "Mean_proper_motions_over_the_sky",
"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/public_examples/Mean_proper_motions_over_the_sky.json",
"totaltime": 125,
"results": []
},
{
"name": "Source_counts_over_the_sky.json",
"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/public_examples/Source_counts_over_the_sky.json",
"totaltime": 55,
"results": []
},
{
"name": "Library_Validation.json",
"filepath": "https://raw.githubusercontent.com/wfau/aglais-testing/bc9b9787b5b6225e11df5a4ef0272bcec660a44e/notebooks/Library_validation.json",
"totaltime": 10,
"results": []
}
]
}
2 changes: 1 addition & 1 deletion gdmp_benchmark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
GDMP Benchmarker package
Used to run benchmarks for the Gaia Data Mining platform
"""
from .gdmp_benchmark import GDMPBenchmarker, Results, Timing, Notebook, Status
from .gdmp_benchmark import Results, Timing, Notebook, Status, GDMPBenchmarker
20 changes: 9 additions & 11 deletions gdmp_benchmark/gdmp_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
from multiprocessing import Pool, current_process
from typing import List, Dict, Protocol
from dataclasses import dataclass, field, fields
from pprint import pprint
import simplejson as json
from simplejson.errors import JSONDecodeError
import requests
Expand Down Expand Up @@ -158,7 +157,7 @@ def __repr__(self):
def to_json(self):
"""Return as json"""
return {
"result": self.result,
"result": self.result.to_json(),
"elapsed": f"{self.totaltime:.2f}",
"percent": self.percent_change,
"start": self.start,
Expand Down Expand Up @@ -249,17 +248,16 @@ def __repr__(self):
def to_json(self) -> str:
"""Convert to JSON"""

return json.dumps(
return json.dumps(str(
{
"name": self.name,
"result": str(self.result),
"outputs": self.outputs,
"messages": self.messages,
"time": self.time,
"time": self.time.to_json(),
"logs": self.logs,
},
indent=4,
)
}
))


class AlertStrategies(Enum):
Expand Down Expand Up @@ -810,7 +808,7 @@ def _run_single(
delay_start: int = 0,
delay_notebook: int = 0,
delete: bool = True,
):
) -> list:
"""
Run a single instance of the benchmark test
Args:
Expand All @@ -821,7 +819,7 @@ def _run_single(
delay_notebook: Delay to the start of the notebook in seconds
delete: Whether to delete the notebooks after the run
Returns:
dict: The results
list: The results
"""

results = []
Expand Down Expand Up @@ -971,6 +969,7 @@ def main(args: List[str] = None):
print("}")

print("---start---")

results = GDMPBenchmarker(
userconfig=user_config,
zeppelin_url=zeppelin_url,
Expand All @@ -986,8 +985,7 @@ def main(args: List[str] = None):
alerter.send_alert(
content=results, alert_strategy=AlertStrategies.ONLY_ON_ERROR
)

pprint(results)
print(json.dumps(results, default=lambda o: o.to_dict(), indent=4))
print("---end---")


Expand Down
1 change: 0 additions & 1 deletion tests/test_gdmp_benchmarker.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import unittest
from gdmp_benchmark import GDMPBenchmarker, Results, Timing, Notebook, Status
import unittest.mock as mock

"""
The GDMPBenchmarker class is responsible for benchmarking Zeppelin notebooks. It allows users to run notebooks and compare their output against expected output. The class can run notebooks in parallel, and it can delete the notebooks after they have been run. The class also generates user configurations for Zeppelin, and it can validate the configuration passed in by the user.
Expand Down
4 changes: 2 additions & 2 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def test_results_to_dict(self):
# Tests that a Results object cannot be created with invalid values for attributes.
def test_results_invalid_attributes(self):
with self.assertRaises(TypeError):
result = Results(result="success", msg="Test successful", output=["test output"], notebookid="1234",
Results(result="success", msg="Test successful", output=["test output"], notebookid="1234",
user_config="test config", messages=["test message"], logs="test logs",
time=Timing(result="success", totaltime="10s", start="2022-01-01 00:00:00",
finish="2022-01-01 00:00:10"), outputs={"test": "output"}, name=1234)
Expand Down Expand Up @@ -101,7 +101,7 @@ def test_timing_to_string(self):
# Tests creating a Timing object with a negative totaltime.
def test_timing_negative_totaltime(self):
with self.assertRaises(ValueError):
timing = Timing(result=Status.PASS, totaltime=-10, start="2022-01-01 00:00:00", finish="2022-01-01 00:00:10")
Timing(result=Status.PASS, totaltime=-10, start="2022-01-01 00:00:00", finish="2022-01-01 00:00:10")

# Tests that the percent_change attribute is calculated correctly based on expected and actual totaltime.
def test_percent_change(self):
Expand Down

0 comments on commit 4702731

Please sign in to comment.