diff --git a/Makefile b/Makefile index ae002dc8..84dcf563 100644 --- a/Makefile +++ b/Makefile @@ -49,10 +49,13 @@ test-4edge: install ## Run tests against the prod API via the edge-endpoint (ne ${PYTEST} ${PROFILING_ARGS} ${TEST_ARGS} ${EDGE_FILTERS} test test-local: install ## Run tests against a localhost API (needs GROUNDLIGHT_API_TOKEN and a local API server) - GROUNDLIGHT_ENDPOINT="http://localhost:8000/" ${PYTEST} ${TEST_ARGS} ${CLOUD_FILTERS} test + GROUNDLIGHT_ENDPOINT="http://localhost:8000/" $(MAKE) test test-integ: install ## Run tests against the integ API server (needs GROUNDLIGHT_API_TOKEN) - GROUNDLIGHT_ENDPOINT="https://api.integ.groundlight.ai/" ${PYTEST} ${TEST_ARGS} ${CLOUD_FILTERS} test + GROUNDLIGHT_ENDPOINT="https://api.integ.groundlight.ai/" $(MAKE) test + +test-dev: install ## Run tests against a dev API server (needs GROUNDLIGHT_API_TOKEN and properly configured dns-hostmap) + GROUNDLIGHT_ENDPOINT="https://api.dev.groundlight.ai/" $(MAKE) test test-docs: install ## Run the example code and tests in our docs against the prod API (needs GROUNDLIGHT_API_TOKEN) ${PYTEST} --markdown-docs ${TEST_ARGS} docs README.md diff --git a/docs/docs/building-applications/7-edge.md b/docs/docs/building-applications/7-edge.md index 873b81d3..78329ed5 100644 --- a/docs/docs/building-applications/7-edge.md +++ b/docs/docs/building-applications/7-edge.md @@ -14,8 +14,8 @@ and for communicating with the Groundlight cloud service. To use the edge endpoint, simply configure the Groundlight SDK to use the edge endpoint's URL instead of the cloud endpoint. All application logic will work seamlessly and unchanged with the Groundlight Edge Endpoint, except some ML answers will -return much faster locally. The only visible difference is that image queries answered at the edge endpoint will have the prefix `iqe_` instead of `iq_` for image queries answered in the cloud. `iqe_` stands for "image query edge". Edge-originated -image queries will not appear in the cloud dashboard. +return much faster locally. Image queries answered at the edge endpoint will not appear in the cloud dashboard unless +specifically configured to do so, in which case the edge prediction will not be reflected on the image query in the cloud. ## Configuring the Edge Endpoint diff --git a/generated/README.md b/generated/README.md index 22aa3842..bd5fe9eb 100644 --- a/generated/README.md +++ b/generated/README.md @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 0.18.1 +- API version: 0.18.2 - Package version: 1.0.0 - Build package: org.openapitools.codegen.languages.PythonClientCodegen diff --git a/generated/docs/BinaryClassificationResult.md b/generated/docs/BinaryClassificationResult.md index 655da3ab..f8dc4656 100644 --- a/generated/docs/BinaryClassificationResult.md +++ b/generated/docs/BinaryClassificationResult.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **label** | **str** | | **confidence** | **float** | | [optional] -**source** | **str** | Source is optional to support edge v0.2 | [optional] +**source** | **str** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/CountingResult.md b/generated/docs/CountingResult.md index dcf8d6da..b5f32515 100644 --- a/generated/docs/CountingResult.md +++ b/generated/docs/CountingResult.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **count** | **int** | | **confidence** | **float** | | [optional] -**source** | **str** | Source is optional to support edge v0.2 | [optional] +**source** | **str** | | [optional] **greater_than_max** | **bool** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] diff --git a/generated/docs/ImageQueriesApi.md b/generated/docs/ImageQueriesApi.md index 39f0eb20..3d6cd5b8 100644 --- a/generated/docs/ImageQueriesApi.md +++ b/generated/docs/ImageQueriesApi.md @@ -248,7 +248,7 @@ Name | Type | Description | Notes - Submit an image query against a detector. You must use `\"Content-Type: image/jpeg\"` for the image data. For example: ```Bash $ curl https://api.groundlight.ai/device-api/v1/image-queries?detector_id=det_abc123 \\ --header \"Content-Type: image/jpeg\" \\ --data-binary @path/to/filename.jpeg ``` + Submit an image query against a detector. You must use `\"Content-Type: image/jpeg\"` or similar (image/png, image/webp, etc) for the image data. For example: ```Bash $ curl https://api.groundlight.ai/device-api/v1/image-queries?detector_id=det_abc123 \\ --header \"Content-Type: image/jpeg\" \\ --data-binary @path/to/filename.jpeg ``` ### Example @@ -283,6 +283,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: api_instance = image_queries_api.ImageQueriesApi(api_client) detector_id = "detector_id_example" # str | Choose a detector by its ID. human_review = "human_review_example" # str | If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. (optional) + image_query_id = "image_query_id_example" # str | The ID to assign to the created image query. (optional) inspection_id = "inspection_id_example" # str | Associate the image query with an inspection. (optional) metadata = "metadata_example" # str | A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). (optional) patience_time = 3.14 # float | How long to wait for a confident response. (optional) @@ -299,7 +300,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.submit_image_query(detector_id, human_review=human_review, inspection_id=inspection_id, metadata=metadata, patience_time=patience_time, want_async=want_async, body=body) + api_response = api_instance.submit_image_query(detector_id, human_review=human_review, image_query_id=image_query_id, inspection_id=inspection_id, metadata=metadata, patience_time=patience_time, want_async=want_async, body=body) pprint(api_response) except groundlight_openapi_client.ApiException as e: print("Exception when calling ImageQueriesApi->submit_image_query: %s\n" % e) @@ -312,6 +313,7 @@ Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- **detector_id** | **str**| Choose a detector by its ID. | **human_review** | **str**| If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. | [optional] + **image_query_id** | **str**| The ID to assign to the created image query. | [optional] **inspection_id** | **str**| Associate the image query with an inspection. | [optional] **metadata** | **str**| A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). | [optional] **patience_time** | **float**| How long to wait for a confident response. | [optional] diff --git a/generated/docs/LabelValue.md b/generated/docs/LabelValue.md index 4869e48c..acbb0e6f 100644 --- a/generated/docs/LabelValue.md +++ b/generated/docs/LabelValue.md @@ -9,9 +9,9 @@ Name | Type | Description | Notes **annotations_requested** | **[bool, date, datetime, dict, float, int, list, str, none_type]** | | [readonly] **created_at** | **datetime** | | [readonly] **detector_id** | **int, none_type** | | [readonly] +**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly] **text** | **str, none_type** | Text annotations | [readonly] **rois** | [**[ROI], none_type**](ROI.md) | | [optional] -**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] [readonly] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/MultiClassificationResult.md b/generated/docs/MultiClassificationResult.md index 2ab36f78..4f401a3d 100644 --- a/generated/docs/MultiClassificationResult.md +++ b/generated/docs/MultiClassificationResult.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **label** | **str** | | **confidence** | **float** | | [optional] -**source** | **str** | Source is optional to support edge v0.2 | [optional] +**source** | **str** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/groundlight_openapi_client/__init__.py b/generated/groundlight_openapi_client/__init__.py index 3f67f120..ff1e9a1d 100644 --- a/generated/groundlight_openapi_client/__init__.py +++ b/generated/groundlight_openapi_client/__init__.py @@ -5,7 +5,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/actions_api.py b/generated/groundlight_openapi_client/api/actions_api.py index 0670bad0..47ac8d0d 100644 --- a/generated/groundlight_openapi_client/api/actions_api.py +++ b/generated/groundlight_openapi_client/api/actions_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_groups_api.py b/generated/groundlight_openapi_client/api/detector_groups_api.py index 89e739ff..83a2cb4f 100644 --- a/generated/groundlight_openapi_client/api/detector_groups_api.py +++ b/generated/groundlight_openapi_client/api/detector_groups_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detector_reset_api.py b/generated/groundlight_openapi_client/api/detector_reset_api.py index 7662bbf8..c50532f8 100644 --- a/generated/groundlight_openapi_client/api/detector_reset_api.py +++ b/generated/groundlight_openapi_client/api/detector_reset_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index e054c2c5..a46e110f 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/image_queries_api.py b/generated/groundlight_openapi_client/api/image_queries_api.py index ebf133a9..d9925f9d 100644 --- a/generated/groundlight_openapi_client/api/image_queries_api.py +++ b/generated/groundlight_openapi_client/api/image_queries_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -171,6 +171,7 @@ def __init__(self, api_client=None): "all": [ "detector_id", "human_review", + "image_query_id", "inspection_id", "metadata", "patience_time", @@ -190,6 +191,7 @@ def __init__(self, api_client=None): "openapi_types": { "detector_id": (str,), "human_review": (str,), + "image_query_id": (str,), "inspection_id": (str,), "metadata": (str,), "patience_time": (float,), @@ -199,6 +201,7 @@ def __init__(self, api_client=None): "attribute_map": { "detector_id": "detector_id", "human_review": "human_review", + "image_query_id": "image_query_id", "inspection_id": "inspection_id", "metadata": "metadata", "patience_time": "patience_time", @@ -207,6 +210,7 @@ def __init__(self, api_client=None): "location_map": { "detector_id": "query", "human_review": "query", + "image_query_id": "query", "inspection_id": "query", "metadata": "query", "patience_time": "query", @@ -406,7 +410,7 @@ def list_image_queries(self, **kwargs): def submit_image_query(self, detector_id, **kwargs): """submit_image_query # noqa: E501 - Submit an image query against a detector. You must use `\"Content-Type: image/jpeg\"` for the image data. For example: ```Bash $ curl https://api.groundlight.ai/device-api/v1/image-queries?detector_id=det_abc123 \\ --header \"Content-Type: image/jpeg\" \\ --data-binary @path/to/filename.jpeg ``` # noqa: E501 + Submit an image query against a detector. You must use `\"Content-Type: image/jpeg\"` or similar (image/png, image/webp, etc) for the image data. For example: ```Bash $ curl https://api.groundlight.ai/device-api/v1/image-queries?detector_id=det_abc123 \\ --header \"Content-Type: image/jpeg\" \\ --data-binary @path/to/filename.jpeg ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -418,6 +422,7 @@ def submit_image_query(self, detector_id, **kwargs): Keyword Args: human_review (str): If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident.. [optional] + image_query_id (str): The ID to assign to the created image query.. [optional] inspection_id (str): Associate the image query with an inspection.. [optional] metadata (str): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] patience_time (float): How long to wait for a confident response.. [optional] diff --git a/generated/groundlight_openapi_client/api/labels_api.py b/generated/groundlight_openapi_client/api/labels_api.py index 884151e4..fd948a9f 100644 --- a/generated/groundlight_openapi_client/api/labels_api.py +++ b/generated/groundlight_openapi_client/api/labels_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/notes_api.py b/generated/groundlight_openapi_client/api/notes_api.py index 6836fb2e..f6522e04 100644 --- a/generated/groundlight_openapi_client/api/notes_api.py +++ b/generated/groundlight_openapi_client/api/notes_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/user_api.py b/generated/groundlight_openapi_client/api/user_api.py index 66937738..fa851468 100644 --- a/generated/groundlight_openapi_client/api/user_api.py +++ b/generated/groundlight_openapi_client/api/user_api.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api_client.py b/generated/groundlight_openapi_client/api_client.py index ac14fb86..da2e348a 100644 --- a/generated/groundlight_openapi_client/api_client.py +++ b/generated/groundlight_openapi_client/api_client.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/configuration.py b/generated/groundlight_openapi_client/configuration.py index 9aab02ac..654b832f 100644 --- a/generated/groundlight_openapi_client/configuration.py +++ b/generated/groundlight_openapi_client/configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -419,7 +419,7 @@ def to_debug_report(self): "Python SDK Debug Report:\n" "OS: {env}\n" "Python Version: {pyversion}\n" - "Version of the API: 0.18.1\n" + "Version of the API: 0.18.2\n" "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) ) diff --git a/generated/groundlight_openapi_client/exceptions.py b/generated/groundlight_openapi_client/exceptions.py index 5ab58207..393dbba8 100644 --- a/generated/groundlight_openapi_client/exceptions.py +++ b/generated/groundlight_openapi_client/exceptions.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action.py b/generated/groundlight_openapi_client/model/action.py index 1199e0d8..8308d44b 100644 --- a/generated/groundlight_openapi_client/model/action.py +++ b/generated/groundlight_openapi_client/model/action.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action_list.py b/generated/groundlight_openapi_client/model/action_list.py index 16954eef..a38fae04 100644 --- a/generated/groundlight_openapi_client/model/action_list.py +++ b/generated/groundlight_openapi_client/model/action_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/all_notes.py b/generated/groundlight_openapi_client/model/all_notes.py index aa96ed47..89da5af1 100644 --- a/generated/groundlight_openapi_client/model/all_notes.py +++ b/generated/groundlight_openapi_client/model/all_notes.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/annotations_requested_enum.py b/generated/groundlight_openapi_client/model/annotations_requested_enum.py index 5a4789bd..d951c99e 100644 --- a/generated/groundlight_openapi_client/model/annotations_requested_enum.py +++ b/generated/groundlight_openapi_client/model/annotations_requested_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry.py b/generated/groundlight_openapi_client/model/b_box_geometry.py index 1bfa3d7a..2282dfd7 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/b_box_geometry_request.py b/generated/groundlight_openapi_client/model/b_box_geometry_request.py index f23ce8d2..fcac579f 100644 --- a/generated/groundlight_openapi_client/model/b_box_geometry_request.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/binary_classification_result.py b/generated/groundlight_openapi_client/model/binary_classification_result.py index 3abe590e..b9cd17eb 100644 --- a/generated/groundlight_openapi_client/model/binary_classification_result.py +++ b/generated/groundlight_openapi_client/model/binary_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -165,7 +165,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 - source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 + source (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -255,7 +255,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 - source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 + source (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/blank_enum.py b/generated/groundlight_openapi_client/model/blank_enum.py index d7a16227..aa466bb8 100644 --- a/generated/groundlight_openapi_client/model/blank_enum.py +++ b/generated/groundlight_openapi_client/model/blank_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/channel_enum.py b/generated/groundlight_openapi_client/model/channel_enum.py index 6590b1cc..720dac0d 100644 --- a/generated/groundlight_openapi_client/model/channel_enum.py +++ b/generated/groundlight_openapi_client/model/channel_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition.py b/generated/groundlight_openapi_client/model/condition.py index 7d67f28d..6e12ea64 100644 --- a/generated/groundlight_openapi_client/model/condition.py +++ b/generated/groundlight_openapi_client/model/condition.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/condition_request.py b/generated/groundlight_openapi_client/model/condition_request.py index 9ce8cd01..3172892a 100644 --- a/generated/groundlight_openapi_client/model/condition_request.py +++ b/generated/groundlight_openapi_client/model/condition_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/count_mode_configuration.py b/generated/groundlight_openapi_client/model/count_mode_configuration.py index e28e500e..25e7539d 100644 --- a/generated/groundlight_openapi_client/model/count_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/count_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/counting_result.py b/generated/groundlight_openapi_client/model/counting_result.py index 5c8daaa5..1bc61441 100644 --- a/generated/groundlight_openapi_client/model/counting_result.py +++ b/generated/groundlight_openapi_client/model/counting_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -162,7 +162,7 @@ def _from_openapi_data(cls, count, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 - source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 + source (str): [optional] # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ @@ -253,7 +253,7 @@ def __init__(self, count, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 - source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 + source (str): [optional] # noqa: E501 greater_than_max (bool): [optional] # noqa: E501 """ diff --git a/generated/groundlight_openapi_client/model/detector.py b/generated/groundlight_openapi_client/model/detector.py index b7095d8a..04c6beb8 100644 --- a/generated/groundlight_openapi_client/model/detector.py +++ b/generated/groundlight_openapi_client/model/detector.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_creation_input_request.py b/generated/groundlight_openapi_client/model/detector_creation_input_request.py index db38f022..42ea7e7e 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input_request.py +++ b/generated/groundlight_openapi_client/model/detector_creation_input_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group.py b/generated/groundlight_openapi_client/model/detector_group.py index 60350686..5620b0c9 100644 --- a/generated/groundlight_openapi_client/model/detector_group.py +++ b/generated/groundlight_openapi_client/model/detector_group.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_group_request.py b/generated/groundlight_openapi_client/model/detector_group_request.py index cb98b8fe..3302860f 100644 --- a/generated/groundlight_openapi_client/model/detector_group_request.py +++ b/generated/groundlight_openapi_client/model/detector_group_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/detector_type_enum.py b/generated/groundlight_openapi_client/model/detector_type_enum.py index c20c0f96..94d446da 100644 --- a/generated/groundlight_openapi_client/model/detector_type_enum.py +++ b/generated/groundlight_openapi_client/model/detector_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/escalation_type_enum.py b/generated/groundlight_openapi_client/model/escalation_type_enum.py index 99c297d5..d28cb1ed 100644 --- a/generated/groundlight_openapi_client/model/escalation_type_enum.py +++ b/generated/groundlight_openapi_client/model/escalation_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query.py b/generated/groundlight_openapi_client/model/image_query.py index 0d844e01..951ee454 100644 --- a/generated/groundlight_openapi_client/model/image_query.py +++ b/generated/groundlight_openapi_client/model/image_query.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query_type_enum.py b/generated/groundlight_openapi_client/model/image_query_type_enum.py index 5c24a7f9..42470730 100644 --- a/generated/groundlight_openapi_client/model/image_query_type_enum.py +++ b/generated/groundlight_openapi_client/model/image_query_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200.py b/generated/groundlight_openapi_client/model/inline_response200.py index 78d48bf3..7e25ac59 100644 --- a/generated/groundlight_openapi_client/model/inline_response200.py +++ b/generated/groundlight_openapi_client/model/inline_response200.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/label_value.py b/generated/groundlight_openapi_client/model/label_value.py index 9bce25b9..d2981b5d 100644 --- a/generated/groundlight_openapi_client/model/label_value.py +++ b/generated/groundlight_openapi_client/model/label_value.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -114,14 +114,6 @@ def openapi_types(): int, none_type, ), # noqa: E501 - "text": ( - str, - none_type, - ), # noqa: E501 - "rois": ( - [ROI], - none_type, - ), # noqa: E501 "source": ( bool, date, @@ -133,6 +125,14 @@ def openapi_types(): str, none_type, ), # noqa: E501 + "text": ( + str, + none_type, + ), # noqa: E501 + "rois": ( + [ROI], + none_type, + ), # noqa: E501 } @cached_property @@ -145,9 +145,9 @@ def discriminator(): "annotations_requested": "annotations_requested", # noqa: E501 "created_at": "created_at", # noqa: E501 "detector_id": "detector_id", # noqa: E501 + "source": "source", # noqa: E501 "text": "text", # noqa: E501 "rois": "rois", # noqa: E501 - "source": "source", # noqa: E501 } read_only_vars = { @@ -156,8 +156,8 @@ def discriminator(): "annotations_requested", # noqa: E501 "created_at", # noqa: E501 "detector_id", # noqa: E501 - "text", # noqa: E501 "source", # noqa: E501 + "text", # noqa: E501 } _composed_schemas = {} @@ -165,7 +165,7 @@ def discriminator(): @classmethod @convert_js_args_to_python_args def _from_openapi_data( - cls, confidence, class_name, annotations_requested, created_at, detector_id, text, *args, **kwargs + cls, confidence, class_name, annotations_requested, created_at, detector_id, source, text, *args, **kwargs ): # noqa: E501 """LabelValue - a model defined in OpenAPI @@ -175,6 +175,7 @@ def _from_openapi_data( annotations_requested ([bool, date, datetime, dict, float, int, list, str, none_type]): created_at (datetime): detector_id (int, none_type): + source (bool, date, datetime, dict, float, int, list, str, none_type): text (str, none_type): Text annotations Keyword Args: @@ -209,7 +210,6 @@ def _from_openapi_data( through its discriminator because we passed in _visited_composed_classes = (Animal,) rois ([ROI], none_type): [optional] # noqa: E501 - source (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -243,6 +243,7 @@ def _from_openapi_data( self.annotations_requested = annotations_requested self.created_at = created_at self.detector_id = detector_id + self.source = source self.text = text for var_name, var_value in kwargs.items(): if ( @@ -301,7 +302,6 @@ def __init__(self, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) rois ([ROI], none_type): [optional] # noqa: E501 - source (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py index a388191d..362d870c 100644 --- a/generated/groundlight_openapi_client/model/label_value_request.py +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/mode_enum.py b/generated/groundlight_openapi_client/model/mode_enum.py index 46bed042..34987371 100644 --- a/generated/groundlight_openapi_client/model/mode_enum.py +++ b/generated/groundlight_openapi_client/model/mode_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py index b14f6c71..f08ee27c 100644 --- a/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py +++ b/generated/groundlight_openapi_client/model/multi_class_mode_configuration.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/multi_classification_result.py b/generated/groundlight_openapi_client/model/multi_classification_result.py index 9ee8a63f..e3e99760 100644 --- a/generated/groundlight_openapi_client/model/multi_classification_result.py +++ b/generated/groundlight_openapi_client/model/multi_classification_result.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -160,7 +160,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 - source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 + source (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -250,7 +250,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) confidence (float): [optional] # noqa: E501 - source (str): Source is optional to support edge v0.2. [optional] # noqa: E501 + source (str): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/note.py b/generated/groundlight_openapi_client/model/note.py index 8799080f..91b565c4 100644 --- a/generated/groundlight_openapi_client/model/note.py +++ b/generated/groundlight_openapi_client/model/note.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/note_request.py b/generated/groundlight_openapi_client/model/note_request.py index 32d6b05a..09e4fe5d 100644 --- a/generated/groundlight_openapi_client/model/note_request.py +++ b/generated/groundlight_openapi_client/model/note_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_detector_list.py b/generated/groundlight_openapi_client/model/paginated_detector_list.py index 51471f0e..5972205e 100644 --- a/generated/groundlight_openapi_client/model/paginated_detector_list.py +++ b/generated/groundlight_openapi_client/model/paginated_detector_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_image_query_list.py b/generated/groundlight_openapi_client/model/paginated_image_query_list.py index 39a70a0e..13dccab7 100644 --- a/generated/groundlight_openapi_client/model/paginated_image_query_list.py +++ b/generated/groundlight_openapi_client/model/paginated_image_query_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/paginated_rule_list.py b/generated/groundlight_openapi_client/model/paginated_rule_list.py index bbd1b9bb..6d007e1d 100644 --- a/generated/groundlight_openapi_client/model/paginated_rule_list.py +++ b/generated/groundlight_openapi_client/model/paginated_rule_list.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py index a2fcd579..251cb75d 100644 --- a/generated/groundlight_openapi_client/model/patched_detector_request.py +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/result_type_enum.py b/generated/groundlight_openapi_client/model/result_type_enum.py index 3220f6a0..187c5861 100644 --- a/generated/groundlight_openapi_client/model/result_type_enum.py +++ b/generated/groundlight_openapi_client/model/result_type_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi.py b/generated/groundlight_openapi_client/model/roi.py index 07870cc5..74c4fc66 100644 --- a/generated/groundlight_openapi_client/model/roi.py +++ b/generated/groundlight_openapi_client/model/roi.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/roi_request.py b/generated/groundlight_openapi_client/model/roi_request.py index a1f63aa8..07fb54e2 100644 --- a/generated/groundlight_openapi_client/model/roi_request.py +++ b/generated/groundlight_openapi_client/model/roi_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule.py b/generated/groundlight_openapi_client/model/rule.py index 5884c996..78760fca 100644 --- a/generated/groundlight_openapi_client/model/rule.py +++ b/generated/groundlight_openapi_client/model/rule.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/rule_request.py b/generated/groundlight_openapi_client/model/rule_request.py index 62234b6f..0cb5934f 100644 --- a/generated/groundlight_openapi_client/model/rule_request.py +++ b/generated/groundlight_openapi_client/model/rule_request.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py index 555d4076..f5586bb6 100644 --- a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py +++ b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/source_enum.py b/generated/groundlight_openapi_client/model/source_enum.py index 6248ac27..aee5be85 100644 --- a/generated/groundlight_openapi_client/model/source_enum.py +++ b/generated/groundlight_openapi_client/model/source_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/status_enum.py b/generated/groundlight_openapi_client/model/status_enum.py index ac77a141..b41c2871 100644 --- a/generated/groundlight_openapi_client/model/status_enum.py +++ b/generated/groundlight_openapi_client/model/status_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/verb_enum.py b/generated/groundlight_openapi_client/model/verb_enum.py index 84d601a3..8d138449 100644 --- a/generated/groundlight_openapi_client/model/verb_enum.py +++ b/generated/groundlight_openapi_client/model/verb_enum.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model_utils.py b/generated/groundlight_openapi_client/model_utils.py index e5da59e1..cf7bd6d4 100644 --- a/generated/groundlight_openapi_client/model_utils.py +++ b/generated/groundlight_openapi_client/model_utils.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/rest.py b/generated/groundlight_openapi_client/rest.py index 01876d57..16d8ca86 100644 --- a/generated/groundlight_openapi_client/rest.py +++ b/generated/groundlight_openapi_client/rest.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/model.py b/generated/model.py index 305eca8e..9c12f113 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2024-10-09T22:17:52+00:00 +# timestamp: 2024-10-30T23:38:30+00:00 from __future__ import annotations @@ -178,10 +178,6 @@ class VerbEnum(str, Enum): class Source(str, Enum): - """ - Source is optional to support edge v0.2 - """ - STILL_PROCESSING = "STILL_PROCESSING" CLOUD = "CLOUD" USER = "USER" @@ -197,20 +193,20 @@ class Label(str, Enum): class BinaryClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = Field(None, description="Source is optional to support edge v0.2") + source: Optional[Source] = None label: Label class CountingResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = Field(None, description="Source is optional to support edge v0.2") + source: Optional[Source] = None count: int greater_than_max: Optional[bool] = None class MultiClassificationResult(BaseModel): confidence: Optional[confloat(ge=0.0, le=1.0)] = None - source: Optional[Source] = Field(None, description="Source is optional to support edge v0.2") + source: Optional[Source] = None label: str @@ -362,7 +358,7 @@ class LabelValue(BaseModel): annotations_requested: List[AnnotationsRequestedEnum] created_at: datetime detector_id: Optional[int] = Field(...) - source: Optional[SourceEnum] = None + source: SourceEnum text: Optional[str] = Field(..., description="Text annotations") diff --git a/generated/setup.py b/generated/setup.py index 9c4c9456..9c4bb572 100644 --- a/generated/setup.py +++ b/generated/setup.py @@ -3,7 +3,7 @@ Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.18.1 + The version of the OpenAPI document: 0.18.2 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/pyproject.toml b/pyproject.toml index ed154e3c..accefdbe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ pytest = "^7.0.1" pytest-cov = "^3.0.0" pytest-markdown-docs = "^0.4.3" pytest-mock = "^3.10.0" +svix-ksuid = "^0.6.2" [tool.poetry.group.lint.dependencies] black = "^23.3.0" diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 5b18cf7f..b227293d 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1,7 +1,7 @@ openapi: 3.0.3 info: title: Groundlight API - version: 0.18.1 + version: 0.18.2 description: Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. @@ -336,18 +336,16 @@ paths: description: '' post: operationId: Submit image query - description: |2+ + description: |2 Submit an image query against a detector. - You must use `"Content-Type: image/jpeg"` for the image data. For example: - + You must use `"Content-Type: image/jpeg"` or similar (image/png, image/webp, etc) for the image data. For example: ```Bash $ curl https://api.groundlight.ai/device-api/v1/image-queries?detector_id=det_abc123 \ --header "Content-Type: image/jpeg" \ --data-binary @path/to/filename.jpeg ``` - parameters: - in: query name: detector_id @@ -363,6 +361,11 @@ paths: If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. + - in: query + name: image_query_id + schema: + type: string + description: The ID to assign to the created image query. - in: query name: inspection_id schema: @@ -995,6 +998,7 @@ components: - confidence - created_at - detector_id + - source - text LabelValueRequest: type: object @@ -1330,7 +1334,6 @@ components: minimum: 0.0 maximum: 1.0 source: - description: Source is optional to support edge v0.2 type: string enum: - STILL_PROCESSING @@ -1355,7 +1358,6 @@ components: minimum: 0.0 maximum: 1.0 source: - description: Source is optional to support edge v0.2 type: string enum: - STILL_PROCESSING @@ -1380,7 +1382,6 @@ components: minimum: 0.0 maximum: 1.0 source: - description: Source is optional to support edge v0.2 type: string enum: - STILL_PROCESSING diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 8f520e73..07d64804 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -441,6 +441,7 @@ def submit_image_query( # noqa: PLR0913 # pylint: disable=too-many-arguments, t want_async: bool = False, inspection_id: Optional[str] = None, metadata: Union[dict, str, None] = None, + image_query_id: Optional[str] = None, ) -> ImageQuery: """ Evaluates an image with Groundlight. @@ -482,6 +483,9 @@ def submit_image_query( # noqa: PLR0913 # pylint: disable=too-many-arguments, t the image query (limited to 1KB). You can retrieve this metadata later by calling `get_image_query()`. + :param image_query_id: The ID for the image query. This is to enable specific functionality and is not intended + for general external use. If not set, a random ID will be generated. + :return: ImageQuery """ if wait is None: @@ -517,6 +521,9 @@ def submit_image_query( # noqa: PLR0913 # pylint: disable=too-many-arguments, t # url- and base64-encode the metadata. params["metadata"] = url_encode_dict(metadata, name="metadata", size_limit_bytes=1024) + if image_query_id is not None: + params["image_query_id"] = image_query_id + raw_image_query = self.image_queries_api.submit_image_query(**params) image_query = ImageQuery.parse_obj(raw_image_query.to_dict()) diff --git a/test/integration/test_groundlight.py b/test/integration/test_groundlight.py index 85a472c2..a0dc44ba 100644 --- a/test/integration/test_groundlight.py +++ b/test/integration/test_groundlight.py @@ -8,13 +8,13 @@ from datetime import datetime from typing import Any, Dict, Optional, Union -import groundlight_openapi_client import pytest from groundlight import Groundlight from groundlight.binary_labels import VALID_DISPLAY_LABELS, DeprecatedLabel, Label, convert_internal_label_to_display -from groundlight.internalapi import InternalApiError, NotFoundError +from groundlight.internalapi import ApiException, InternalApiError, NotFoundError from groundlight.optional_imports import * from groundlight.status_codes import is_user_error +from ksuid import KsuidMs from model import ( BinaryClassificationResult, CountingResult, @@ -320,6 +320,20 @@ def test_submit_image_query_png(gl: Groundlight, detector: Detector): assert is_valid_display_result(_image_query.result) +def test_submit_image_query_with_id(gl: Groundlight, detector: Detector): + # submit_image_query + id = f"iq_{KsuidMs()}" + _image_query = gl.submit_image_query( + detector=detector.id, image="test/assets/dog.jpeg", wait=10, human_review="NEVER", image_query_id=id + ) + assert str(_image_query) + assert isinstance(_image_query, ImageQuery) + assert is_valid_display_result(_image_query.result) + assert _image_query.id == id + assert _image_query.metadata is not None + assert _image_query.metadata.get("is_from_edge") + + def test_submit_image_query_with_human_review_param(gl: Groundlight, detector: Detector): # For now, this just tests that the image query is submitted successfully. # There should probably be a better way to check whether the image query was escalated for human review. @@ -425,7 +439,7 @@ def test_submit_image_query_with_metadata_too_large(gl: Groundlight, detector: D @pytest.mark.run_only_for_edge_endpoint def test_submit_image_query_with_metadata_returns_user_error(gl: Groundlight, detector: Detector, image: str): """On the edge-endpoint, we raise an exception if the user passes metadata.""" - with pytest.raises(groundlight_openapi_client.exceptions.ApiException) as exc_info: + with pytest.raises(ApiException) as exc_info: gl.submit_image_query(detector=detector.id, image=image, human_review="NEVER", metadata={"a": 1}) assert is_user_error(exc_info.value.status) @@ -443,7 +457,7 @@ def test_submit_image_query_jpeg_truncated(gl: Groundlight, detector: Detector): jpeg_truncated = jpeg[:-500] # Cut off the last 500 bytes # This is an extra difficult test because the header is valid. # So a casual check of the image will appear valid. - with pytest.raises(groundlight_openapi_client.exceptions.ApiException) as exc_info: + with pytest.raises(ApiException) as exc_info: _image_query = gl.submit_image_query(detector=detector.id, image=jpeg_truncated, human_review="NEVER") exc_value = exc_info.value assert is_user_error(exc_value.status)