diff --git a/generated/docs/ImageQueriesApi.md b/generated/docs/ImageQueriesApi.md index 4ed42ba9..248933f8 100644 --- a/generated/docs/ImageQueriesApi.md +++ b/generated/docs/ImageQueriesApi.md @@ -205,7 +205,7 @@ with openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = image_queries_api.ImageQueriesApi(api_client) detector_id = "detector_id_example" # str | Choose a detector by its ID. - human_review = True # bool | Allow image queries to be marked for no human review. (optional) + human_review = "human_review_example" # str | If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. (optional) patience_time = 3.14 # float | How long to wait for a confident response. (optional) body = open('@path/to/image.jpeg', 'rb') # file_type | (optional) @@ -231,7 +231,7 @@ with openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- **detector_id** | **str**| Choose a detector by its ID. | - **human_review** | **bool**| Allow image queries to be marked for no human review. | [optional] + **human_review** | **str**| If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. | [optional] **patience_time** | **float**| How long to wait for a confident response. | [optional] **body** | **file_type**| | [optional] diff --git a/generated/model.py b/generated/model.py index 9af1c3a0..dea486ca 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2023-08-01T00:09:22+00:00 +# timestamp: 2023-08-09T20:46:11+00:00 from __future__ import annotations diff --git a/generated/openapi_client/api/image_queries_api.py b/generated/openapi_client/api/image_queries_api.py index 76a48528..dd760ba8 100644 --- a/generated/openapi_client/api/image_queries_api.py +++ b/generated/openapi_client/api/image_queries_api.py @@ -147,7 +147,7 @@ def __init__(self, api_client=None): "allowed_values": {}, "openapi_types": { "detector_id": (str,), - "human_review": (bool,), + "human_review": (str,), "patience_time": (float,), "body": (file_type,), }, @@ -297,7 +297,7 @@ def submit_image_query(self, detector_id, **kwargs): detector_id (str): Choose a detector by its ID. Keyword Args: - human_review (bool): If set to `False` then unconfident ML predictions will not be escalated to human review. [optional, defaults `True`] + human_review (str): If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. . [optional] patience_time (float): How long to wait for a confident response.. [optional] body (file_type): [optional] _return_http_data_only (bool): response data without head status diff --git a/spec/public-api.yaml b/spec/public-api.yaml index d3540df1..871f1d65 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -135,8 +135,11 @@ paths: - in: query name: human_review schema: - type: boolean - description: If set to `False` then unconfident ML predictions will not be escalated to human review. (Defaults `True`) + type: string + description: > + If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). + If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. + If set to `NEVER`, never send the image query for human review even if the ML model is not confident. required: false - in: query name: patience_time diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 60d84eaa..72a662fb 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -170,7 +170,7 @@ def submit_image_query( detector: Union[Detector, str], image: Union[str, bytes, Image.Image, BytesIO, BufferedReader, np.ndarray], wait: Optional[float] = None, - human_review: Optional[bool] = True, + human_review: Optional[str] = None, ) -> ImageQuery: """Evaluates an image with Groundlight. :param detector: the Detector object, or string id of a detector like `det_12345` @@ -183,7 +183,10 @@ def submit_image_query( Any binary format must be JPEG-encoded already. Any pixel format will get converted to JPEG at high quality before sending to service. :param wait: How long to wait (in seconds) for a confident answer. - :param human_review: If set to False, do not escalate for human review + :param human_review: If `None` or `DEFAULT`, send the image query for human review + only if the ML prediction is not confident. + If set to `ALWAYS`, always send the image query for human review. + If set to `NEVER`, never send the image query for human review. """ if wait is None: wait = self.DEFAULT_WAIT @@ -191,9 +194,16 @@ def submit_image_query( image_bytesio: ByteStreamWrapper = parse_supported_image_types(image) - raw_image_query = self.image_queries_api.submit_image_query( - detector_id=detector_id, patience_time=wait, human_review=human_review, body=image_bytesio - ) + params = {"detector_id": detector_id, "body": image_bytesio} + if wait == 0: + params["patience_time"] = self.DEFAULT_WAIT + else: + params["patience_time"] = wait + + if human_review is not None: + params["human_review"] = human_review + + raw_image_query = self.image_queries_api.submit_image_query(**params) image_query = ImageQuery.parse_obj(raw_image_query.to_dict()) if wait: threshold = self.get_detector(detector).confidence_threshold diff --git a/test/integration/test_groundlight.py b/test/integration/test_groundlight.py index 27b7505a..85a49653 100644 --- a/test/integration/test_groundlight.py +++ b/test/integration/test_groundlight.py @@ -35,7 +35,7 @@ def is_valid_display_label(label: str) -> bool: def fixture_gl() -> Groundlight: """Creates a Groundlight client object for testing.""" _gl = Groundlight() - _gl.DEFAULT_WAIT = 0.1 + _gl.DEFAULT_WAIT = 10 return _gl @@ -163,8 +163,7 @@ def test_get_detector_by_name(gl: Groundlight, detector: Detector): def test_submit_image_query_blocking(gl: Groundlight, detector: Detector): - # Ask for a trivially small wait so it never has time to update, but uses the code path - _image_query = gl.submit_image_query(detector=detector.id, image="test/assets/dog.jpeg", wait=2) + _image_query = gl.submit_image_query(detector=detector.id, image="test/assets/dog.jpeg", wait=10) assert str(_image_query) assert isinstance(_image_query, ImageQuery) assert is_valid_display_result(_image_query.result) @@ -173,7 +172,7 @@ def test_submit_image_query_blocking(gl: Groundlight, detector: Detector): def test_submit_image_query_returns_yes(gl: Groundlight): # We use the "never-review" pipeline to guarantee a confident "yes" answer. detector = gl.get_or_create_detector(name="Always a dog", query="Is there a dog?", pipeline_config="never-review") - image_query = gl.submit_image_query(detector=detector, image="test/assets/dog.jpeg", wait=2) + image_query = gl.submit_image_query(detector=detector, image="test/assets/dog.jpeg", wait=10) assert image_query.result.label == Label.YES @@ -184,6 +183,17 @@ def test_submit_image_query_filename(gl: Groundlight, detector: Detector): assert is_valid_display_result(_image_query.result) +def test_submit_image_query_with_human_review_param(gl: Groundlight, detector: Detector): + # For now, this just tests that the image query is submitted successfully. + # There should probably be a better way to check whether the image query was escalated for human review. + + for human_review_value in ("DEFAULT", "ALWAYS", "NEVER"): + _image_query = gl.submit_image_query( + detector=detector.id, image="test/assets/dog.jpeg", human_review=human_review_value + ) + assert is_valid_display_result(_image_query.result) + + def test_submit_image_query_jpeg_bytes(gl: Groundlight, detector: Detector): jpeg = open("test/assets/dog.jpeg", "rb").read() _image_query = gl.submit_image_query(detector=detector.id, image=jpeg)