Skip to content

Commit

Permalink
chg ! tests
Browse files Browse the repository at this point in the history
  • Loading branch information
vitali-yanushchyk-valor committed May 28, 2024
1 parent 90ddb80 commit 91445ff
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 16 deletions.
20 changes: 10 additions & 10 deletions src/hope_dedup_engine/apps/core/storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,16 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.azure_container = settings.AZURE_CONTAINER_HOPE

# def delete(self, name):
# raise RuntimeError("This storage cannot delete files")
def delete(self, name):
raise RuntimeError("This storage cannot delete files")

# def open(self, name, mode="rb"):
# if "w" in mode:
# raise RuntimeError("This storage cannot open files in write mode")
# return super().open(name, mode="rb")
def open(self, name, mode="rb"):
if "w" in mode:
raise RuntimeError("This storage cannot open files in write mode")
return super().open(name, mode="rb")

# def save(self, name, content, max_length=None):
# raise RuntimeError("This storage cannot save files")
def save(self, name, content, max_length=None):
raise RuntimeError("This storage cannot save files")

# def listdir(self, path=""):
# return []
def listdir(self, path=""):
return []
13 changes: 8 additions & 5 deletions tests/faces/test_celery_tasks.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from unittest.mock import patch

from celery import states
from faces_const import FILENAME, FILENAMES

from hope_dedup_engine.apps.faces.celery_tasks import deduplicate
Expand All @@ -20,12 +21,12 @@ def test_deduplicate_task_already_running(mock_redis_client, mock_duplication_de
def test_deduplicate_task_success(dd, mock_redis_client, mock_duplication_detector, celery_app, celery_worker):
mock_set, mock_delete = mock_redis_client
mock_find = mock_duplication_detector

mock_set.return_value = True # Lock is acquired
mock_find.return_value = set(FILENAMES[:2]) # Assuming the first two are duplicates based on mock data

with patch("hope_dedup_engine.apps.faces.celery_tasks.DuplicationDetector", return_value=dd):
task_result = deduplicate.apply(args=[FILENAME]).get()

assert task_result == set(FILENAMES[:2]) # Assuming the first two are duplicates based on mock data
mock_set.assert_called_once_with(f"Deduplicate_{FILENAME}", "true", nx=True, ex=3600)
mock_delete.assert_called_once_with(f"Deduplicate_{FILENAME}") # Lock is released
Expand All @@ -41,9 +42,11 @@ def test_deduplicate_task_exception_handling(
with patch("hope_dedup_engine.apps.faces.celery_tasks.DuplicationDetector", return_value=dd):
task = deduplicate.apply(args=[FILENAME])

assert task.result is None # Task is not executed
mock_duplication_detector.assert_called_once() # DeduplicationDetector is called
assert task.state == states.FAILURE
assert isinstance(task.result, Exception)
assert str(task.result) == "Simulated task failure"
assert task.traceback is not None

# Check that the Redis lock was acquired and then released
mock_find.assert_called_once()
mock_set.assert_called_once_with(f"Deduplicate_{FILENAME}", "true", nx=True, ex=3600)
mock_delete.assert_called_once_with(f"Deduplicate_{FILENAME}")
mock_delete.assert_called_once_with(f"Deduplicate_{FILENAME}") # Lock is released
2 changes: 1 addition & 1 deletion tests/faces/test_duplication_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def test_load_encodings_all_exception_handling(dd):
try:
dd._load_encodings_all()
except Exception:
print(f"\n{dd.logger.exception.assert_called_once()=}")
...
dd.logger.reset_mock()

Expand Down Expand Up @@ -149,6 +148,7 @@ def test_encode_face_invalid_region(dd, image_bytes_io):

# Check that the error was logged with the correct message
mock_error_logger.assert_called_once_with(f"Invalid face region {(0, 0, 10)}")
dd.logger.reset_mock()


def test_encode_face_exception_handling(dd):
Expand Down

0 comments on commit 91445ff

Please sign in to comment.