Skip to content

Commit

Permalink
Ocr support (#255)
Browse files Browse the repository at this point in the history
* Prioritizing OCR release, this api spec doesn't match any BE version. It skips the Actions update but contains the update for OCR

* running the generator

* Adding test with new functionality

* Automatically reformatting code

---------

Co-authored-by: Auto-format Bot <[email protected]>
  • Loading branch information
brandon-groundlight and Auto-format Bot authored Sep 25, 2024
1 parent 329d86b commit 3e377f5
Show file tree
Hide file tree
Showing 12 changed files with 64 additions and 18 deletions.
1 change: 0 additions & 1 deletion generated/.openapi-generator/FILES
Original file line number Diff line number Diff line change
Expand Up @@ -104,5 +104,4 @@ setup.cfg
setup.py
test-requirements.txt
test/__init__.py
test/test_detector_reset_api.py
tox.ini
1 change: 1 addition & 0 deletions generated/docs/ImageQuery.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ Name | Type | Description | Notes
**patience_time** | **float** | How long to wait for a confident response. | [readonly]
**confidence_threshold** | **float** | Min confidence needed to accept the response of the image query. | [readonly]
**rois** | [**[ROI], none_type**](ROI.md) | An array of regions of interest (bounding boxes) collected on image | [readonly]
**text** | **str, none_type** | A text field on image query. | [readonly]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]

[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
Expand Down
3 changes: 2 additions & 1 deletion generated/docs/LabelValue.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**confidence** | **float, none_type** | | [readonly]
**class_name** | **str, none_type** | A human-readable class name for this label (e.g. YES/NO) | [readonly]
**class_name** | **str, none_type** | Return a human-readable class name for this label (e.g. YES/NO) | [readonly]
**annotations_requested** | **[bool, date, datetime, dict, float, int, list, str, none_type]** | | [readonly]
**created_at** | **datetime** | | [readonly]
**detector_id** | **int, none_type** | | [readonly]
**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly]
**text** | **str, none_type** | Text annotations | [readonly]
**rois** | [**[ROI], none_type**](ROI.md) | | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]

Expand Down
2 changes: 1 addition & 1 deletion generated/docs/ResultTypeEnum.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**value** | **str** | | must be one of ["binary_classification", "counting", ]
**value** | **str** | | must be one of ["binary_classification", "counting", "multi_classification", ]

[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

Expand Down
9 changes: 9 additions & 0 deletions generated/groundlight_openapi_client/model/image_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,10 @@ def openapi_types():
[ROI],
none_type,
), # noqa: E501
"text": (
str,
none_type,
), # noqa: E501
}

@cached_property
Expand All @@ -169,6 +173,7 @@ def discriminator():
"patience_time": "patience_time", # noqa: E501
"confidence_threshold": "confidence_threshold", # noqa: E501
"rois": "rois", # noqa: E501
"text": "text", # noqa: E501
}

read_only_vars = {
Expand All @@ -183,6 +188,7 @@ def discriminator():
"patience_time", # noqa: E501
"confidence_threshold", # noqa: E501
"rois", # noqa: E501
"text", # noqa: E501
}

_composed_schemas = {}
Expand All @@ -202,6 +208,7 @@ def _from_openapi_data(
patience_time,
confidence_threshold,
rois,
text,
*args,
**kwargs,
): # noqa: E501
Expand All @@ -219,6 +226,7 @@ def _from_openapi_data(
patience_time (float): How long to wait for a confident response.
confidence_threshold (float): Min confidence needed to accept the response of the image query.
rois ([ROI], none_type): An array of regions of interest (bounding boxes) collected on image
text (str, none_type): A text field on image query.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
Expand Down Expand Up @@ -290,6 +298,7 @@ def _from_openapi_data(
self.patience_time = patience_time
self.confidence_threshold = confidence_threshold
self.rois = rois
self.text = text
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
Expand Down
12 changes: 10 additions & 2 deletions generated/groundlight_openapi_client/model/label_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,10 @@ def openapi_types():
str,
none_type,
), # noqa: E501
"text": (
str,
none_type,
), # noqa: E501
"rois": (
[ROI],
none_type,
Expand All @@ -142,6 +146,7 @@ def discriminator():
"created_at": "created_at", # noqa: E501
"detector_id": "detector_id", # noqa: E501
"source": "source", # noqa: E501
"text": "text", # noqa: E501
"rois": "rois", # noqa: E501
}

Expand All @@ -152,24 +157,26 @@ def discriminator():
"created_at", # noqa: E501
"detector_id", # noqa: E501
"source", # noqa: E501
"text", # noqa: E501
}

_composed_schemas = {}

@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(
cls, confidence, class_name, annotations_requested, created_at, detector_id, source, *args, **kwargs
cls, confidence, class_name, annotations_requested, created_at, detector_id, source, text, *args, **kwargs
): # noqa: E501
"""LabelValue - a model defined in OpenAPI
Args:
confidence (float, none_type):
class_name (str, none_type): A human-readable class name for this label (e.g. YES/NO)
class_name (str, none_type): Return a human-readable class name for this label (e.g. YES/NO)
annotations_requested ([bool, date, datetime, dict, float, int, list, str, none_type]):
created_at (datetime):
detector_id (int, none_type):
source (bool, date, datetime, dict, float, int, list, str, none_type):
text (str, none_type): Text annotations
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
Expand Down Expand Up @@ -237,6 +244,7 @@ def _from_openapi_data(
self.created_at = created_at
self.detector_id = detector_id
self.source = source
self.text = text
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ class ResultTypeEnum(ModelSimple):
("value",): {
"BINARY_CLASSIFICATION": "binary_classification",
"COUNTING": "counting",
"MULTI_CLASSIFICATION": "multi_classification",
},
}

Expand Down Expand Up @@ -102,10 +103,10 @@ def __init__(self, *args, **kwargs):
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["binary_classification", "counting", ] # noqa: E501
args[0] (str):, must be one of ["binary_classification", "counting", "multi_classification", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["binary_classification", "counting", ] # noqa: E501
value (str):, must be one of ["binary_classification", "counting", "multi_classification", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Expand Down Expand Up @@ -194,10 +195,10 @@ def _from_openapi_data(cls, *args, **kwargs):
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["binary_classification", "counting", ] # noqa: E501
args[0] (str):, must be one of ["binary_classification", "counting", "multi_classification", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["binary_classification", "counting", ] # noqa: E501
value (str):, must be one of ["binary_classification", "counting", "multi_classification", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Expand Down
9 changes: 7 additions & 2 deletions generated/model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# generated by datamodel-codegen:
# filename: public-api.yaml
# timestamp: 2024-08-26T21:12:31+00:00
# timestamp: 2024-09-25T21:50:15+00:00

from __future__ import annotations

Expand Down Expand Up @@ -110,6 +110,7 @@ class ROIRequest(BaseModel):
class ResultTypeEnum(Enum):
binary_classification = "binary_classification"
counting = "counting"
multi_classification = "multi_classification"


class SnoozeTimeUnitEnum(Enum):
Expand Down Expand Up @@ -290,16 +291,20 @@ class ImageQuery(BaseModel):
rois: Optional[List[ROI]] = Field(
..., description="An array of regions of interest (bounding boxes) collected on image"
)
text: Optional[str] = Field(..., description="A text field on image query.")


class LabelValue(BaseModel):
confidence: Optional[float] = Field(...)
class_name: Optional[str] = Field(..., description="A human-readable class name for this label (e.g. YES/NO)")
class_name: Optional[str] = Field(
..., description="Return a human-readable class name for this label (e.g. YES/NO)"
)
rois: Optional[List[ROI]] = None
annotations_requested: List[AnnotationsRequestedEnum]
created_at: datetime
detector_id: Optional[int] = Field(...)
source: SourceEnum
text: Optional[str] = Field(..., description="Text annotations")


class LabelValueRequest(BaseModel):
Expand Down
8 changes: 4 additions & 4 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "groundlight-sdk-generator",
"dependencies": {
"@openapitools/openapi-generator-cli": "^2.9.0",
"rehype-katex": "^7.0.0",
"rehype-katex": "^7.0.1",
"remark-math": "^6.0.0"
}
}
}
15 changes: 14 additions & 1 deletion spec/public-api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -925,6 +925,11 @@ components:
nullable: true
description: An array of regions of interest (bounding boxes) collected
on image
text:
type: string
nullable: true
readOnly: true
description: A text field on image query.
required:
- confidence_threshold
- created_at
Expand All @@ -936,6 +941,7 @@ components:
- result
- result_type
- rois
- text
- type
x-internal: true
ImageQueryTypeEnum:
Expand All @@ -953,7 +959,7 @@ components:
class_name:
type: string
nullable: true
description: A human-readable class name for this label (e.g. YES/NO)
description: Return a human-readable class name for this label (e.g. YES/NO)
readOnly: true
rois:
type: array
Expand Down Expand Up @@ -983,13 +989,19 @@ components:
allOf:
- $ref: '#/components/schemas/SourceEnum'
readOnly: true
text:
type: string
readOnly: true
nullable: true
description: Text annotations
required:
- annotations_requested
- class_name
- confidence
- created_at
- detector_id
- source
- text
LabelValueRequest:
type: object
properties:
Expand Down Expand Up @@ -1145,6 +1157,7 @@ components:
enum:
- binary_classification
- counting
- multi_classification
type: string
Rule:
type: object
Expand Down
9 changes: 9 additions & 0 deletions test/integration/test_groundlight.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,15 @@ def test_submit_image_query_returns_yes(gl: Groundlight):
assert image_query.result.label == Label.YES


def test_submit_image_query_returns_text(gl: Groundlight):
# We use the "never-review" pipeline to guarantee a confident "yes" answer.
detector = gl.get_or_create_detector(
name="Always same text", query="Is there a dog?", pipeline_config="constant-text"
)
image_query = gl.submit_image_query(detector=detector, image="test/assets/dog.jpeg", wait=10, human_review="NEVER")
assert isinstance(image_query.text, str)


def test_submit_image_query_filename(gl: Groundlight, detector: Detector):
_image_query = gl.submit_image_query(detector=detector.id, image="test/assets/dog.jpeg", human_review="NEVER")
assert str(_image_query)
Expand Down

0 comments on commit 3e377f5

Please sign in to comment.