From d07f2869d2807fc87c1e0b1f5c9d2999e7a941ab Mon Sep 17 00:00:00 2001 From: Brandon Date: Tue, 27 Aug 2024 20:21:08 +0000 Subject: [PATCH 1/5] regenerating from spec --- generated/.openapi-generator/FILES | 14 + generated/README.md | 9 +- generated/docs/Detector.md | 4 +- generated/docs/DetectorRequest.md | 17 + generated/docs/DetectorResetApi.md | 84 +++++ generated/docs/DetectorsApi.md | 146 +++----- generated/docs/EscalationTypeEnum.md | 12 + generated/docs/LabelValue.md | 2 +- generated/docs/ModeEnum.md | 3 +- generated/docs/NoteRequest.md | 1 + generated/docs/NotesApi.md | 23 +- generated/docs/PatchedDetectorRequest.md | 17 + generated/docs/StatusEnum.md | 12 + .../api/detector_reset_api.py | 134 +++++++ .../api/detectors_api.py | 196 ++++------ .../api/notes_api.py | 45 +-- .../apis/__init__.py | 1 + .../model/blank_enum.py | 274 ++++++++++++++ .../model/detector.py | 50 ++- .../model/detector_request.py | 335 ++++++++++++++++++ .../model/escalation_type_enum.py | 283 +++++++++++++++ .../model/label_value.py | 2 +- .../model/mode_enum.py | 8 +- .../model/note_request.py | 7 + .../model/patched_detector_request.py | 329 +++++++++++++++++ .../model/status_enum.py | 283 +++++++++++++++ .../models/__init__.py | 5 + generated/model.py | 73 +++- generated/test/test_blank_enum.py | 35 ++ generated/test/test_detector_reset_api.py | 32 ++ generated/test/test_escalation_type_enum.py | 35 ++ .../test/test_patched_detector_request.py | 42 +++ generated/test/test_status_enum.py | 35 ++ package-lock.json | 10 +- package.json | 4 +- spec/public-api.yaml | 98 +++++ src/groundlight/experimental_api.py | 73 ++++ 37 files changed, 2424 insertions(+), 309 deletions(-) create mode 100644 generated/docs/DetectorRequest.md create mode 100644 generated/docs/DetectorResetApi.md create mode 100644 generated/docs/EscalationTypeEnum.md create mode 100644 generated/docs/PatchedDetectorRequest.md create mode 100644 generated/docs/StatusEnum.md create mode 100644 generated/groundlight_openapi_client/api/detector_reset_api.py create mode 100644 generated/groundlight_openapi_client/model/blank_enum.py create mode 100644 generated/groundlight_openapi_client/model/detector_request.py create mode 100644 generated/groundlight_openapi_client/model/escalation_type_enum.py create mode 100644 generated/groundlight_openapi_client/model/patched_detector_request.py create mode 100644 generated/groundlight_openapi_client/model/status_enum.py create mode 100644 generated/test/test_blank_enum.py create mode 100644 generated/test/test_detector_reset_api.py create mode 100644 generated/test/test_escalation_type_enum.py create mode 100644 generated/test/test_patched_detector_request.py create mode 100644 generated/test/test_status_enum.py diff --git a/generated/.openapi-generator/FILES b/generated/.openapi-generator/FILES index fd6931f8..be61decc 100644 --- a/generated/.openapi-generator/FILES +++ b/generated/.openapi-generator/FILES @@ -10,6 +10,7 @@ docs/AnnotationsRequestedEnum.md docs/BBoxGeometry.md docs/BBoxGeometryRequest.md docs/BinaryClassificationResult.md +docs/BlankEnum.md docs/ChannelEnum.md docs/Condition.md docs/ConditionRequest.md @@ -19,8 +20,10 @@ docs/DetectorCreationInputRequest.md docs/DetectorGroup.md docs/DetectorGroupRequest.md docs/DetectorGroupsApi.md +docs/DetectorResetApi.md docs/DetectorTypeEnum.md docs/DetectorsApi.md +docs/EscalationTypeEnum.md docs/ImageQueriesApi.md docs/ImageQuery.md docs/ImageQueryTypeEnum.md @@ -30,10 +33,12 @@ docs/LabelValueRequest.md docs/LabelsApi.md docs/ModeEnum.md docs/Note.md +docs/NoteRequest.md docs/NotesApi.md docs/PaginatedDetectorList.md docs/PaginatedImageQueryList.md docs/PaginatedRuleList.md +docs/PatchedDetectorRequest.md docs/ROI.md docs/ROIRequest.md docs/ResultTypeEnum.md @@ -41,6 +46,7 @@ docs/Rule.md docs/RuleRequest.md docs/SnoozeTimeUnitEnum.md docs/SourceEnum.md +docs/StatusEnum.md docs/UserApi.md docs/VerbEnum.md git_push.sh @@ -48,6 +54,7 @@ groundlight_openapi_client/__init__.py groundlight_openapi_client/api/__init__.py groundlight_openapi_client/api/actions_api.py groundlight_openapi_client/api/detector_groups_api.py +groundlight_openapi_client/api/detector_reset_api.py groundlight_openapi_client/api/detectors_api.py groundlight_openapi_client/api/image_queries_api.py groundlight_openapi_client/api/labels_api.py @@ -65,6 +72,7 @@ groundlight_openapi_client/model/annotations_requested_enum.py groundlight_openapi_client/model/b_box_geometry.py groundlight_openapi_client/model/b_box_geometry_request.py groundlight_openapi_client/model/binary_classification_result.py +groundlight_openapi_client/model/blank_enum.py groundlight_openapi_client/model/channel_enum.py groundlight_openapi_client/model/condition.py groundlight_openapi_client/model/condition_request.py @@ -74,6 +82,7 @@ groundlight_openapi_client/model/detector_creation_input_request.py groundlight_openapi_client/model/detector_group.py groundlight_openapi_client/model/detector_group_request.py groundlight_openapi_client/model/detector_type_enum.py +groundlight_openapi_client/model/escalation_type_enum.py groundlight_openapi_client/model/image_query.py groundlight_openapi_client/model/image_query_type_enum.py groundlight_openapi_client/model/inline_response200.py @@ -81,9 +90,11 @@ groundlight_openapi_client/model/label_value.py groundlight_openapi_client/model/label_value_request.py groundlight_openapi_client/model/mode_enum.py groundlight_openapi_client/model/note.py +groundlight_openapi_client/model/note_request.py groundlight_openapi_client/model/paginated_detector_list.py groundlight_openapi_client/model/paginated_image_query_list.py groundlight_openapi_client/model/paginated_rule_list.py +groundlight_openapi_client/model/patched_detector_request.py groundlight_openapi_client/model/result_type_enum.py groundlight_openapi_client/model/roi.py groundlight_openapi_client/model/roi_request.py @@ -91,6 +102,7 @@ groundlight_openapi_client/model/rule.py groundlight_openapi_client/model/rule_request.py groundlight_openapi_client/model/snooze_time_unit_enum.py groundlight_openapi_client/model/source_enum.py +groundlight_openapi_client/model/status_enum.py groundlight_openapi_client/model/verb_enum.py groundlight_openapi_client/model_utils.py groundlight_openapi_client/models/__init__.py @@ -100,4 +112,6 @@ setup.cfg setup.py test-requirements.txt test/__init__.py +test/test_detector_reset_api.py +test/test_patched_detector_request.py tox.ini diff --git a/generated/README.md b/generated/README.md index 33bdaaf2..da7217ab 100644 --- a/generated/README.md +++ b/generated/README.md @@ -116,12 +116,12 @@ Class | Method | HTTP request | Description *ActionsApi* | [**list_rules**](docs/ActionsApi.md#list_rules) | **GET** /v1/actions/rules | *DetectorGroupsApi* | [**create_detector_group**](docs/DetectorGroupsApi.md#create_detector_group) | **POST** /v1/detector-groups | *DetectorGroupsApi* | [**get_detector_groups**](docs/DetectorGroupsApi.md#get_detector_groups) | **GET** /v1/detector-groups | +*DetectorResetApi* | [**reset_detector**](docs/DetectorResetApi.md#reset_detector) | **DELETE** /v1/detector-reset/{id} | *DetectorsApi* | [**create_detector**](docs/DetectorsApi.md#create_detector) | **POST** /v1/detectors | -*DetectorsApi* | [**create_detector_group2**](docs/DetectorsApi.md#create_detector_group2) | **POST** /v1/detectors/detector-groups | *DetectorsApi* | [**delete_detector**](docs/DetectorsApi.md#delete_detector) | **DELETE** /v1/detectors/{id} | *DetectorsApi* | [**get_detector**](docs/DetectorsApi.md#get_detector) | **GET** /v1/detectors/{id} | -*DetectorsApi* | [**get_detector_groups2**](docs/DetectorsApi.md#get_detector_groups2) | **GET** /v1/detectors/detector-groups | *DetectorsApi* | [**list_detectors**](docs/DetectorsApi.md#list_detectors) | **GET** /v1/detectors | +*DetectorsApi* | [**update_detector**](docs/DetectorsApi.md#update_detector) | **PATCH** /v1/detectors/{id} | *ImageQueriesApi* | [**get_image**](docs/ImageQueriesApi.md#get_image) | **GET** /v1/image-queries/{id}/image | *ImageQueriesApi* | [**get_image_query**](docs/ImageQueriesApi.md#get_image_query) | **GET** /v1/image-queries/{id} | *ImageQueriesApi* | [**list_image_queries**](docs/ImageQueriesApi.md#list_image_queries) | **GET** /v1/image-queries | @@ -141,6 +141,7 @@ Class | Method | HTTP request | Description - [BBoxGeometry](docs/BBoxGeometry.md) - [BBoxGeometryRequest](docs/BBoxGeometryRequest.md) - [BinaryClassificationResult](docs/BinaryClassificationResult.md) + - [BlankEnum](docs/BlankEnum.md) - [ChannelEnum](docs/ChannelEnum.md) - [Condition](docs/Condition.md) - [ConditionRequest](docs/ConditionRequest.md) @@ -150,6 +151,7 @@ Class | Method | HTTP request | Description - [DetectorGroup](docs/DetectorGroup.md) - [DetectorGroupRequest](docs/DetectorGroupRequest.md) - [DetectorTypeEnum](docs/DetectorTypeEnum.md) + - [EscalationTypeEnum](docs/EscalationTypeEnum.md) - [ImageQuery](docs/ImageQuery.md) - [ImageQueryTypeEnum](docs/ImageQueryTypeEnum.md) - [InlineResponse200](docs/InlineResponse200.md) @@ -157,9 +159,11 @@ Class | Method | HTTP request | Description - [LabelValueRequest](docs/LabelValueRequest.md) - [ModeEnum](docs/ModeEnum.md) - [Note](docs/Note.md) + - [NoteRequest](docs/NoteRequest.md) - [PaginatedDetectorList](docs/PaginatedDetectorList.md) - [PaginatedImageQueryList](docs/PaginatedImageQueryList.md) - [PaginatedRuleList](docs/PaginatedRuleList.md) + - [PatchedDetectorRequest](docs/PatchedDetectorRequest.md) - [ROI](docs/ROI.md) - [ROIRequest](docs/ROIRequest.md) - [ResultTypeEnum](docs/ResultTypeEnum.md) @@ -167,6 +171,7 @@ Class | Method | HTTP request | Description - [RuleRequest](docs/RuleRequest.md) - [SnoozeTimeUnitEnum](docs/SnoozeTimeUnitEnum.md) - [SourceEnum](docs/SourceEnum.md) + - [StatusEnum](docs/StatusEnum.md) - [VerbEnum](docs/VerbEnum.md) diff --git a/generated/docs/Detector.md b/generated/docs/Detector.md index 0902c379..97d08359 100644 --- a/generated/docs/Detector.md +++ b/generated/docs/Detector.md @@ -12,10 +12,12 @@ Name | Type | Description | Notes **query** | **str** | A question about the image. | [readonly] **group_name** | **str** | Which group should this detector be part of? | [readonly] **metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | Metadata about the detector. | [readonly] -**mode** | **str** | | [readonly] +**mode** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly] **mode_configuration** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [readonly] **confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 **patience_time** | **float** | How long Groundlight will attempt to generate a confident prediction | [optional] if omitted the server will use the default value of 30.0 +**status** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] +**escalation_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/DetectorRequest.md b/generated/docs/DetectorRequest.md new file mode 100644 index 00000000..4ab13a90 --- /dev/null +++ b/generated/docs/DetectorRequest.md @@ -0,0 +1,17 @@ +# DetectorRequest + +Spec for serializing a detector object in the public API. + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | A short, descriptive name for the detector. | +**confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 +**patience_time** | **float** | How long Groundlight will attempt to generate a confident prediction | [optional] if omitted the server will use the default value of 30.0 +**status** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] +**escalation_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/DetectorResetApi.md b/generated/docs/DetectorResetApi.md new file mode 100644 index 00000000..087d781c --- /dev/null +++ b/generated/docs/DetectorResetApi.md @@ -0,0 +1,84 @@ +# groundlight_openapi_client.DetectorResetApi + +All URIs are relative to *https://api.groundlight.ai/device-api* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**reset_detector**](DetectorResetApi.md#reset_detector) | **DELETE** /v1/detector-reset/{id} | + + +# **reset_detector** +> reset_detector(id) + + + +Deletes all image queries on the detector + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import detector_reset_api +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = detector_reset_api.DetectorResetApi(api_client) + id = "id_example" # str | + + # example passing only required values which don't have defaults set + try: + api_instance.reset_detector(id) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling DetectorResetApi->reset_detector: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| | + +### Return type + +void (empty response body) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: Not defined + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**204** | No response body | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/generated/docs/DetectorsApi.md b/generated/docs/DetectorsApi.md index ad39beda..b0eeb292 100644 --- a/generated/docs/DetectorsApi.md +++ b/generated/docs/DetectorsApi.md @@ -5,11 +5,10 @@ All URIs are relative to *https://api.groundlight.ai/device-api* Method | HTTP request | Description ------------- | ------------- | ------------- [**create_detector**](DetectorsApi.md#create_detector) | **POST** /v1/detectors | -[**create_detector_group2**](DetectorsApi.md#create_detector_group2) | **POST** /v1/detectors/detector-groups | [**delete_detector**](DetectorsApi.md#delete_detector) | **DELETE** /v1/detectors/{id} | [**get_detector**](DetectorsApi.md#get_detector) | **GET** /v1/detectors/{id} | -[**get_detector_groups2**](DetectorsApi.md#get_detector_groups2) | **GET** /v1/detectors/detector-groups | [**list_detectors**](DetectorsApi.md#list_detectors) | **GET** /v1/detectors | +[**update_detector**](DetectorsApi.md#update_detector) | **PATCH** /v1/detectors/{id} | # **create_detector** @@ -92,86 +91,6 @@ Name | Type | Description | Notes - **Accept**: application/json -### HTTP response details - -| Status code | Description | Response headers | -|-------------|-------------|------------------| -**201** | | - | - -[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) - -# **create_detector_group2** -> DetectorGroup create_detector_group2(detector_group_request) - - - -Create a new detector group POST data: Required: - name (str) - name of the predictor set - -### Example - -* Api Key Authentication (ApiToken): - -```python -import time -import groundlight_openapi_client -from groundlight_openapi_client.api import detectors_api -from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest -from groundlight_openapi_client.model.detector_group import DetectorGroup -from pprint import pprint -# Defining the host is optional and defaults to https://api.groundlight.ai/device-api -# See configuration.py for a list of all supported configuration parameters. -configuration = groundlight_openapi_client.Configuration( - host = "https://api.groundlight.ai/device-api" -) - -# The client must configure the authentication and authorization parameters -# in accordance with the API server security policy. -# Examples for each auth method are provided below, use the example that -# satisfies your auth use case. - -# Configure API key authorization: ApiToken -configuration.api_key['ApiToken'] = 'YOUR_API_KEY' - -# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed -# configuration.api_key_prefix['ApiToken'] = 'Bearer' - -# Enter a context with an instance of the API client -with groundlight_openapi_client.ApiClient(configuration) as api_client: - # Create an instance of the API class - api_instance = detectors_api.DetectorsApi(api_client) - detector_group_request = DetectorGroupRequest( - name="name_example", - ) # DetectorGroupRequest | - - # example passing only required values which don't have defaults set - try: - api_response = api_instance.create_detector_group2(detector_group_request) - pprint(api_response) - except groundlight_openapi_client.ApiException as e: - print("Exception when calling DetectorsApi->create_detector_group2: %s\n" % e) -``` - - -### Parameters - -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **detector_group_request** | [**DetectorGroupRequest**](DetectorGroupRequest.md)| | - -### Return type - -[**DetectorGroup**](DetectorGroup.md) - -### Authorization - -[ApiToken](../README.md#ApiToken) - -### HTTP request headers - - - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data - - **Accept**: application/json - - ### HTTP response details | Status code | Description | Response headers | @@ -332,12 +251,12 @@ Name | Type | Description | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) -# **get_detector_groups2** -> [DetectorGroup] get_detector_groups2() +# **list_detectors** +> PaginatedDetectorList list_detectors() -List all detector groups +Retrieve a list of detectors. ### Example @@ -347,7 +266,7 @@ List all detector groups import time import groundlight_openapi_client from groundlight_openapi_client.api import detectors_api -from groundlight_openapi_client.model.detector_group import DetectorGroup +from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -370,22 +289,29 @@ configuration.api_key['ApiToken'] = 'YOUR_API_KEY' with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = detectors_api.DetectorsApi(api_client) + page = 1 # int | A page number within the paginated result set. (optional) + page_size = 1 # int | Number of items to return per page. (optional) - # example, this endpoint has no required or optional parameters + # example passing only required values which don't have defaults set + # and optional values try: - api_response = api_instance.get_detector_groups2() + api_response = api_instance.list_detectors(page=page, page_size=page_size) pprint(api_response) except groundlight_openapi_client.ApiException as e: - print("Exception when calling DetectorsApi->get_detector_groups2: %s\n" % e) + print("Exception when calling DetectorsApi->list_detectors: %s\n" % e) ``` ### Parameters -This endpoint does not need any parameter. + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **page** | **int**| A page number within the paginated result set. | [optional] + **page_size** | **int**| Number of items to return per page. | [optional] ### Return type -[**[DetectorGroup]**](DetectorGroup.md) +[**PaginatedDetectorList**](PaginatedDetectorList.md) ### Authorization @@ -405,12 +331,12 @@ This endpoint does not need any parameter. [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) -# **list_detectors** -> PaginatedDetectorList list_detectors() +# **update_detector** +> Detector update_detector(id) -Retrieve a list of detectors. +Update a detector ### Example @@ -420,7 +346,8 @@ Retrieve a list of detectors. import time import groundlight_openapi_client from groundlight_openapi_client.api import detectors_api -from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList +from groundlight_openapi_client.model.detector import Detector +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -443,16 +370,29 @@ configuration.api_key['ApiToken'] = 'YOUR_API_KEY' with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = detectors_api.DetectorsApi(api_client) - page = 1 # int | A page number within the paginated result set. (optional) - page_size = 1 # int | Number of items to return per page. (optional) + id = "id_example" # str | + patched_detector_request = PatchedDetectorRequest( + name="name_example", + confidence_threshold=0.9, + patience_time=30.0, + status=None, + escalation_type=None, + ) # PatchedDetectorRequest | (optional) + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.update_detector(id) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling DetectorsApi->update_detector: %s\n" % e) # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.list_detectors(page=page, page_size=page_size) + api_response = api_instance.update_detector(id, patched_detector_request=patched_detector_request) pprint(api_response) except groundlight_openapi_client.ApiException as e: - print("Exception when calling DetectorsApi->list_detectors: %s\n" % e) + print("Exception when calling DetectorsApi->update_detector: %s\n" % e) ``` @@ -460,12 +400,12 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - **page** | **int**| A page number within the paginated result set. | [optional] - **page_size** | **int**| Number of items to return per page. | [optional] + **id** | **str**| | + **patched_detector_request** | [**PatchedDetectorRequest**](PatchedDetectorRequest.md)| | [optional] ### Return type -[**PaginatedDetectorList**](PaginatedDetectorList.md) +[**Detector**](Detector.md) ### Authorization @@ -473,7 +413,7 @@ Name | Type | Description | Notes ### HTTP request headers - - **Content-Type**: Not defined + - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data - **Accept**: application/json diff --git a/generated/docs/EscalationTypeEnum.md b/generated/docs/EscalationTypeEnum.md new file mode 100644 index 00000000..36e1e46e --- /dev/null +++ b/generated/docs/EscalationTypeEnum.md @@ -0,0 +1,12 @@ +# EscalationTypeEnum + +* `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | must be one of ["STANDARD", "NO_HUMAN_LABELING", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/LabelValue.md b/generated/docs/LabelValue.md index a96d9ab6..7d1d28bf 100644 --- a/generated/docs/LabelValue.md +++ b/generated/docs/LabelValue.md @@ -5,7 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **confidence** | **float, none_type** | | [readonly] -**class_name** | **str, none_type** | A human-readable class name for this label (e.g. YES/NO) | [readonly] +**class_name** | **str, none_type** | Return a human-readable class name for this label (e.g. YES/NO) | [readonly] **annotations_requested** | **[bool, date, datetime, dict, float, int, list, str, none_type]** | | [readonly] **created_at** | **datetime** | | [readonly] **detector_id** | **int, none_type** | | [readonly] diff --git a/generated/docs/ModeEnum.md b/generated/docs/ModeEnum.md index 670fef49..178498d0 100644 --- a/generated/docs/ModeEnum.md +++ b/generated/docs/ModeEnum.md @@ -1,11 +1,10 @@ # ModeEnum -* `BINARY` - BINARY * `COUNT` - COUNT * `MULTI_CLASS` - MULTI_CLASS ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | * `BINARY` - BINARY * `COUNT` - COUNT * `MULTI_CLASS` - MULTI_CLASS | must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] +**value** | **str** | | must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/NoteRequest.md b/generated/docs/NoteRequest.md index 730546a8..4410b952 100644 --- a/generated/docs/NoteRequest.md +++ b/generated/docs/NoteRequest.md @@ -5,6 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **content** | **str** | Text content of the note. | +**image** | **file_type, none_type** | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/NotesApi.md b/generated/docs/NotesApi.md index 5e0a4a0b..877f4f8b 100644 --- a/generated/docs/NotesApi.md +++ b/generated/docs/NotesApi.md @@ -9,7 +9,7 @@ Method | HTTP request | Description # **create_note** -> create_note(detector_id, content) +> create_note(detector_id, note_request) @@ -23,6 +23,7 @@ Create a new note import time import groundlight_openapi_client from groundlight_openapi_client.api import notes_api +from groundlight_openapi_client.model.note_request import NoteRequest from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -46,19 +47,14 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = notes_api.NotesApi(api_client) detector_id = "detector_id_example" # str | the detector to associate the new note with - content = "content_example" # str | Text content of the note. - image = open('/path/to/file', 'rb') # file_type, none_type | (optional) + note_request = NoteRequest( + content="content_example", + image=open('/path/to/file', 'rb'), + ) # NoteRequest | # example passing only required values which don't have defaults set try: - api_instance.create_note(detector_id, content) - except groundlight_openapi_client.ApiException as e: - print("Exception when calling NotesApi->create_note: %s\n" % e) - - # example passing only required values which don't have defaults set - # and optional values - try: - api_instance.create_note(detector_id, content, image=image) + api_instance.create_note(detector_id, note_request) except groundlight_openapi_client.ApiException as e: print("Exception when calling NotesApi->create_note: %s\n" % e) ``` @@ -69,8 +65,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- **detector_id** | **str**| the detector to associate the new note with | - **content** | **str**| Text content of the note. | - **image** | **file_type, none_type**| | [optional] + **note_request** | [**NoteRequest**](NoteRequest.md)| | ### Return type @@ -82,7 +77,7 @@ void (empty response body) ### HTTP request headers - - **Content-Type**: multipart/form-data + - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data - **Accept**: Not defined diff --git a/generated/docs/PatchedDetectorRequest.md b/generated/docs/PatchedDetectorRequest.md new file mode 100644 index 00000000..24dc3363 --- /dev/null +++ b/generated/docs/PatchedDetectorRequest.md @@ -0,0 +1,17 @@ +# PatchedDetectorRequest + +Spec for serializing a detector object in the public API. + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | A short, descriptive name for the detector. | [optional] +**confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 +**patience_time** | **float** | How long Groundlight will attempt to generate a confident prediction | [optional] if omitted the server will use the default value of 30.0 +**status** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] +**escalation_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/StatusEnum.md b/generated/docs/StatusEnum.md new file mode 100644 index 00000000..6d4c3312 --- /dev/null +++ b/generated/docs/StatusEnum.md @@ -0,0 +1,12 @@ +# StatusEnum + +* `ON` - ON * `OFF` - OFF + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `ON` - ON * `OFF` - OFF | must be one of ["ON", "OFF", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/groundlight_openapi_client/api/detector_reset_api.py b/generated/groundlight_openapi_client/api/detector_reset_api.py new file mode 100644 index 00000000..6940a302 --- /dev/null +++ b/generated/groundlight_openapi_client/api/detector_reset_api.py @@ -0,0 +1,134 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.api_client import ApiClient, Endpoint as _Endpoint +from groundlight_openapi_client.model_utils import ( # noqa: F401 + check_allowed_values, + check_validations, + date, + datetime, + file_type, + none_type, + validate_and_convert_types, +) + + +class DetectorResetApi(object): + """NOTE: This class is auto generated by OpenAPI Generator + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + def __init__(self, api_client=None): + if api_client is None: + api_client = ApiClient() + self.api_client = api_client + self.reset_detector_endpoint = _Endpoint( + settings={ + "response_type": None, + "auth": ["ApiToken"], + "endpoint_path": "/v1/detector-reset/{id}", + "operation_id": "reset_detector", + "http_method": "DELETE", + "servers": None, + }, + params_map={ + "all": [ + "id", + ], + "required": [ + "id", + ], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": { + "id": (str,), + }, + "attribute_map": { + "id": "id", + }, + "location_map": { + "id": "path", + }, + "collection_format_map": {}, + }, + headers_map={ + "accept": [], + "content_type": [], + }, + api_client=api_client, + ) + + def reset_detector(self, id, **kwargs): + """reset_detector # noqa: E501 + + Deletes all image queries on the detector # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.reset_detector(id, async_req=True) + >>> result = thread.get() + + Args: + id (str): + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + None + If the method is called asynchronously, returns the request + thread. + """ + kwargs["async_req"] = kwargs.get("async_req", False) + kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) + kwargs["_preload_content"] = kwargs.get("_preload_content", True) + kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) + kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) + kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) + kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) + kwargs["_content_type"] = kwargs.get("_content_type") + kwargs["_host_index"] = kwargs.get("_host_index") + kwargs["id"] = id + return self.reset_detector_endpoint.call_with_http_info(**kwargs) diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index c03b0cdc..9c197f73 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -23,9 +23,8 @@ ) from groundlight_openapi_client.model.detector import Detector from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest -from groundlight_openapi_client.model.detector_group import DetectorGroup -from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest class DetectorsApi(object): @@ -74,44 +73,6 @@ def __init__(self, api_client=None): headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, api_client=api_client, ) - self.create_detector_group2_endpoint = _Endpoint( - settings={ - "response_type": (DetectorGroup,), - "auth": ["ApiToken"], - "endpoint_path": "/v1/detectors/detector-groups", - "operation_id": "create_detector_group2", - "http_method": "POST", - "servers": None, - }, - params_map={ - "all": [ - "detector_group_request", - ], - "required": [ - "detector_group_request", - ], - "nullable": [], - "enum": [], - "validation": [], - }, - root_map={ - "validations": {}, - "allowed_values": {}, - "openapi_types": { - "detector_group_request": (DetectorGroupRequest,), - }, - "attribute_map": {}, - "location_map": { - "detector_group_request": "body", - }, - "collection_format_map": {}, - }, - headers_map={ - "accept": ["application/json"], - "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], - }, - api_client=api_client, - ) self.delete_detector_endpoint = _Endpoint( settings={ "response_type": None, @@ -192,22 +153,40 @@ def __init__(self, api_client=None): }, api_client=api_client, ) - self.get_detector_groups2_endpoint = _Endpoint( + self.list_detectors_endpoint = _Endpoint( settings={ - "response_type": ([DetectorGroup],), + "response_type": (PaginatedDetectorList,), "auth": ["ApiToken"], - "endpoint_path": "/v1/detectors/detector-groups", - "operation_id": "get_detector_groups2", + "endpoint_path": "/v1/detectors", + "operation_id": "list_detectors", "http_method": "GET", "servers": None, }, - params_map={"all": [], "required": [], "nullable": [], "enum": [], "validation": []}, + params_map={ + "all": [ + "page", + "page_size", + ], + "required": [], + "nullable": [], + "enum": [], + "validation": [], + }, root_map={ "validations": {}, "allowed_values": {}, - "openapi_types": {}, - "attribute_map": {}, - "location_map": {}, + "openapi_types": { + "page": (int,), + "page_size": (int,), + }, + "attribute_map": { + "page": "page", + "page_size": "page_size", + }, + "location_map": { + "page": "query", + "page_size": "query", + }, "collection_format_map": {}, }, headers_map={ @@ -216,21 +195,23 @@ def __init__(self, api_client=None): }, api_client=api_client, ) - self.list_detectors_endpoint = _Endpoint( + self.update_detector_endpoint = _Endpoint( settings={ - "response_type": (PaginatedDetectorList,), + "response_type": (Detector,), "auth": ["ApiToken"], - "endpoint_path": "/v1/detectors", - "operation_id": "list_detectors", - "http_method": "GET", + "endpoint_path": "/v1/detectors/{id}", + "operation_id": "update_detector", + "http_method": "PATCH", "servers": None, }, params_map={ "all": [ - "page", - "page_size", + "id", + "patched_detector_request", + ], + "required": [ + "id", ], - "required": [], "nullable": [], "enum": [], "validation": [], @@ -239,22 +220,21 @@ def __init__(self, api_client=None): "validations": {}, "allowed_values": {}, "openapi_types": { - "page": (int,), - "page_size": (int,), + "id": (str,), + "patched_detector_request": (PatchedDetectorRequest,), }, "attribute_map": { - "page": "page", - "page_size": "page_size", + "id": "id", }, "location_map": { - "page": "query", - "page_size": "query", + "id": "path", + "patched_detector_request": "body", }, "collection_format_map": {}, }, headers_map={ "accept": ["application/json"], - "content_type": [], + "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], }, api_client=api_client, ) @@ -317,64 +297,6 @@ def create_detector(self, detector_creation_input_request, **kwargs): kwargs["detector_creation_input_request"] = detector_creation_input_request return self.create_detector_endpoint.call_with_http_info(**kwargs) - def create_detector_group2(self, detector_group_request, **kwargs): - """create_detector_group2 # noqa: E501 - - Create a new detector group POST data: Required: - name (str) - name of the predictor set # noqa: E501 - This method makes a synchronous HTTP request by default. To make an - asynchronous HTTP request, please pass async_req=True - - >>> thread = api.create_detector_group2(detector_group_request, async_req=True) - >>> result = thread.get() - - Args: - detector_group_request (DetectorGroupRequest): - - Keyword Args: - _return_http_data_only (bool): response data without head status - code and headers. Default is True. - _preload_content (bool): if False, the urllib3.HTTPResponse object - will be returned without reading/decoding response data. - Default is True. - _request_timeout (int/float/tuple): timeout setting for this request. If - one number provided, it will be total request timeout. It can also - be a pair (tuple) of (connection, read) timeouts. - Default is None. - _check_input_type (bool): specifies if type checking - should be done one the data sent to the server. - Default is True. - _check_return_type (bool): specifies if type checking - should be done one the data received from the server. - Default is True. - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _content_type (str/None): force body content-type. - Default is None and content-type will be predicted by allowed - content-types and body. - _host_index (int/None): specifies the index of the server - that we want to use. - Default is read from the configuration. - async_req (bool): execute request asynchronously - - Returns: - DetectorGroup - If the method is called asynchronously, returns the request - thread. - """ - kwargs["async_req"] = kwargs.get("async_req", False) - kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) - kwargs["_preload_content"] = kwargs.get("_preload_content", True) - kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) - kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) - kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) - kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) - kwargs["_content_type"] = kwargs.get("_content_type") - kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["detector_group_request"] = detector_group_request - return self.create_detector_group2_endpoint.call_with_http_info(**kwargs) - def delete_detector(self, id, **kwargs): """delete_detector # noqa: E501 @@ -491,18 +413,20 @@ def get_detector(self, id, **kwargs): kwargs["id"] = id return self.get_detector_endpoint.call_with_http_info(**kwargs) - def get_detector_groups2(self, **kwargs): - """get_detector_groups2 # noqa: E501 + def list_detectors(self, **kwargs): + """list_detectors # noqa: E501 - List all detector groups # noqa: E501 + Retrieve a list of detectors. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_detector_groups2(async_req=True) + >>> thread = api.list_detectors(async_req=True) >>> result = thread.get() Keyword Args: + page (int): A page number within the paginated result set.. [optional] + page_size (int): Number of items to return per page.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -531,7 +455,7 @@ def get_detector_groups2(self, **kwargs): async_req (bool): execute request asynchronously Returns: - [DetectorGroup] + PaginatedDetectorList If the method is called asynchronously, returns the request thread. """ @@ -544,22 +468,23 @@ def get_detector_groups2(self, **kwargs): kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") - return self.get_detector_groups2_endpoint.call_with_http_info(**kwargs) + return self.list_detectors_endpoint.call_with_http_info(**kwargs) - def list_detectors(self, **kwargs): - """list_detectors # noqa: E501 + def update_detector(self, id, **kwargs): + """update_detector # noqa: E501 - Retrieve a list of detectors. # noqa: E501 + Update a detector # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.list_detectors(async_req=True) + >>> thread = api.update_detector(id, async_req=True) >>> result = thread.get() + Args: + id (str): Keyword Args: - page (int): A page number within the paginated result set.. [optional] - page_size (int): Number of items to return per page.. [optional] + patched_detector_request (PatchedDetectorRequest): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -588,7 +513,7 @@ def list_detectors(self, **kwargs): async_req (bool): execute request asynchronously Returns: - PaginatedDetectorList + Detector If the method is called asynchronously, returns the request thread. """ @@ -601,4 +526,5 @@ def list_detectors(self, **kwargs): kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") - return self.list_detectors_endpoint.call_with_http_info(**kwargs) + kwargs["id"] = id + return self.update_detector_endpoint.call_with_http_info(**kwargs) diff --git a/generated/groundlight_openapi_client/api/notes_api.py b/generated/groundlight_openapi_client/api/notes_api.py index 14377076..00b937e7 100644 --- a/generated/groundlight_openapi_client/api/notes_api.py +++ b/generated/groundlight_openapi_client/api/notes_api.py @@ -22,6 +22,7 @@ validate_and_convert_types, ) from groundlight_openapi_client.model.all_notes import AllNotes +from groundlight_openapi_client.model.note_request import NoteRequest class NotesApi(object): @@ -47,49 +48,36 @@ def __init__(self, api_client=None): params_map={ "all": [ "detector_id", - "content", - "image", + "note_request", ], "required": [ "detector_id", - "content", - ], - "nullable": [ - "image", + "note_request", ], + "nullable": [], "enum": [], - "validation": [ - "content", - ], + "validation": [], }, root_map={ - "validations": { - ("content",): { - "min_length": 1, - }, - }, + "validations": {}, "allowed_values": {}, "openapi_types": { "detector_id": (str,), - "content": (str,), - "image": ( - file_type, - none_type, - ), + "note_request": (NoteRequest,), }, "attribute_map": { "detector_id": "detector_id", - "content": "content", - "image": "image", }, "location_map": { "detector_id": "query", - "content": "form", - "image": "form", + "note_request": "body", }, "collection_format_map": {}, }, - headers_map={"accept": [], "content_type": ["multipart/form-data"]}, + headers_map={ + "accept": [], + "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], + }, api_client=api_client, ) self.get_notes_endpoint = _Endpoint( @@ -133,22 +121,21 @@ def __init__(self, api_client=None): api_client=api_client, ) - def create_note(self, detector_id, content, **kwargs): + def create_note(self, detector_id, note_request, **kwargs): """create_note # noqa: E501 Create a new note # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_note(detector_id, content, async_req=True) + >>> thread = api.create_note(detector_id, note_request, async_req=True) >>> result = thread.get() Args: detector_id (str): the detector to associate the new note with - content (str): Text content of the note. + note_request (NoteRequest): Keyword Args: - image (file_type, none_type): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -191,7 +178,7 @@ def create_note(self, detector_id, content, **kwargs): kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") kwargs["detector_id"] = detector_id - kwargs["content"] = content + kwargs["note_request"] = note_request return self.create_note_endpoint.call_with_http_info(**kwargs) def get_notes(self, detector_id, **kwargs): diff --git a/generated/groundlight_openapi_client/apis/__init__.py b/generated/groundlight_openapi_client/apis/__init__.py index 2015077b..ef5c1236 100644 --- a/generated/groundlight_openapi_client/apis/__init__.py +++ b/generated/groundlight_openapi_client/apis/__init__.py @@ -15,6 +15,7 @@ # Import APIs into API package: from groundlight_openapi_client.api.actions_api import ActionsApi from groundlight_openapi_client.api.detector_groups_api import DetectorGroupsApi +from groundlight_openapi_client.api.detector_reset_api import DetectorResetApi from groundlight_openapi_client.api.detectors_api import DetectorsApi from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi from groundlight_openapi_client.api.labels_api import LabelsApi diff --git a/generated/groundlight_openapi_client/model/blank_enum.py b/generated/groundlight_openapi_client/model/blank_enum.py new file mode 100644 index 00000000..8f634d5b --- /dev/null +++ b/generated/groundlight_openapi_client/model/blank_enum.py @@ -0,0 +1,274 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class BlankEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "EMPTY": "", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """BlankEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): if omitted defaults to "", must be one of ["", ] # noqa: E501 + + Keyword Args: + value (str): if omitted defaults to "", must be one of ["", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + value = "" + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """BlankEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): if omitted defaults to "", must be one of ["", ] # noqa: E501 + + Keyword Args: + value (str): if omitted defaults to "", must be one of ["", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + value = "" + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model/detector.py b/generated/groundlight_openapi_client/model/detector.py index 95248c0f..23761a8d 100644 --- a/generated/groundlight_openapi_client/model/detector.py +++ b/generated/groundlight_openapi_client/model/detector.py @@ -30,9 +30,17 @@ def lazy_import(): + from groundlight_openapi_client.model.blank_enum import BlankEnum from groundlight_openapi_client.model.detector_type_enum import DetectorTypeEnum + from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum + from groundlight_openapi_client.model.mode_enum import ModeEnum + from groundlight_openapi_client.model.status_enum import StatusEnum + globals()["BlankEnum"] = BlankEnum globals()["DetectorTypeEnum"] = DetectorTypeEnum + globals()["EscalationTypeEnum"] = EscalationTypeEnum + globals()["ModeEnum"] = ModeEnum + globals()["StatusEnum"] = StatusEnum class Detector(ModelNormal): @@ -128,13 +136,45 @@ def openapi_types(): {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type, ), # noqa: E501 - "mode": (str,), # noqa: E501 + "mode": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 "mode_configuration": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type, ), # noqa: E501 "confidence_threshold": (float,), # noqa: E501 "patience_time": (float,), # noqa: E501 + "status": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "escalation_type": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 } @cached_property @@ -153,6 +193,8 @@ def discriminator(): "mode_configuration": "mode_configuration", # noqa: E501 "confidence_threshold": "confidence_threshold", # noqa: E501 "patience_time": "patience_time", # noqa: E501 + "status": "status", # noqa: E501 + "escalation_type": "escalation_type", # noqa: E501 } read_only_vars = { @@ -183,7 +225,7 @@ def _from_openapi_data( query (str): A question about the image. group_name (str): Which group should this detector be part of? metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Metadata about the detector. - mode (str): + mode (bool, date, datetime, dict, float, int, list, str, none_type): mode_configuration ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Keyword Args: @@ -219,6 +261,8 @@ def _from_openapi_data( _visited_composed_classes = (Animal,) confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -315,6 +359,8 @@ def __init__(self, name, *args, **kwargs): # noqa: E501 _visited_composed_classes = (Animal,) confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/detector_request.py b/generated/groundlight_openapi_client/model/detector_request.py new file mode 100644 index 00000000..c7f708f1 --- /dev/null +++ b/generated/groundlight_openapi_client/model/detector_request.py @@ -0,0 +1,335 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +def lazy_import(): + from groundlight_openapi_client.model.blank_enum import BlankEnum + from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum + from groundlight_openapi_client.model.status_enum import StatusEnum + + globals()["BlankEnum"] = BlankEnum + globals()["EscalationTypeEnum"] = EscalationTypeEnum + globals()["StatusEnum"] = StatusEnum + + +class DetectorRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = { + ("name",): { + "max_length": 200, + "min_length": 1, + }, + ("confidence_threshold",): { + "inclusive_maximum": 1.0, + "inclusive_minimum": 0.0, + }, + ("patience_time",): { + "inclusive_maximum": 3600, + "inclusive_minimum": 0, + }, + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "name": (str,), # noqa: E501 + "confidence_threshold": (float,), # noqa: E501 + "patience_time": (float,), # noqa: E501 + "status": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "escalation_type": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "name": "name", # noqa: E501 + "confidence_threshold": "confidence_threshold", # noqa: E501 + "patience_time": "patience_time", # noqa: E501 + "status": "status", # noqa: E501 + "escalation_type": "escalation_type", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501 + """DetectorRequest - a model defined in OpenAPI + + Args: + name (str): A short, descriptive name for the detector. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 + patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.name = name + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, name, *args, **kwargs): # noqa: E501 + """DetectorRequest - a model defined in OpenAPI + + Args: + name (str): A short, descriptive name for the detector. + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 + patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.name = name + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/escalation_type_enum.py b/generated/groundlight_openapi_client/model/escalation_type_enum.py new file mode 100644 index 00000000..291e25a5 --- /dev/null +++ b/generated/groundlight_openapi_client/model/escalation_type_enum.py @@ -0,0 +1,283 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class EscalationTypeEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "STANDARD": "STANDARD", + "NO_HUMAN_LABELING": "NO_HUMAN_LABELING", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """EscalationTypeEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + + Keyword Args: + value (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """EscalationTypeEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + + Keyword Args: + value (str): * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING., must be one of ["STANDARD", "NO_HUMAN_LABELING", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model/label_value.py b/generated/groundlight_openapi_client/model/label_value.py index b6ac9148..91e2606f 100644 --- a/generated/groundlight_openapi_client/model/label_value.py +++ b/generated/groundlight_openapi_client/model/label_value.py @@ -165,7 +165,7 @@ def _from_openapi_data( Args: confidence (float, none_type): - class_name (str, none_type): A human-readable class name for this label (e.g. YES/NO) + class_name (str, none_type): Return a human-readable class name for this label (e.g. YES/NO) annotations_requested ([bool, date, datetime, dict, float, int, list, str, none_type]): created_at (datetime): detector_id (int, none_type): diff --git a/generated/groundlight_openapi_client/model/mode_enum.py b/generated/groundlight_openapi_client/model/mode_enum.py index 41f460b2..1ba41227 100644 --- a/generated/groundlight_openapi_client/model/mode_enum.py +++ b/generated/groundlight_openapi_client/model/mode_enum.py @@ -103,10 +103,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `BINARY` - BINARY * `COUNT` - COUNT * `MULTI_CLASS` - MULTI_CLASS., must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 + args[0] (str):, must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 Keyword Args: - value (str): * `BINARY` - BINARY * `COUNT` - COUNT * `MULTI_CLASS` - MULTI_CLASS., must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 + value (str):, must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -195,10 +195,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): * `BINARY` - BINARY * `COUNT` - COUNT * `MULTI_CLASS` - MULTI_CLASS., must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 + args[0] (str):, must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 Keyword Args: - value (str): * `BINARY` - BINARY * `COUNT` - COUNT * `MULTI_CLASS` - MULTI_CLASS., must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 + value (str):, must be one of ["BINARY", "COUNT", "MULTI_CLASS", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. diff --git a/generated/groundlight_openapi_client/model/note_request.py b/generated/groundlight_openapi_client/model/note_request.py index bc4961a6..545cb56c 100644 --- a/generated/groundlight_openapi_client/model/note_request.py +++ b/generated/groundlight_openapi_client/model/note_request.py @@ -93,6 +93,10 @@ def openapi_types(): """ return { "content": (str,), # noqa: E501 + "image": ( + file_type, + none_type, + ), # noqa: E501 } @cached_property @@ -101,6 +105,7 @@ def discriminator(): attribute_map = { "content": "content", # noqa: E501 + "image": "image", # noqa: E501 } read_only_vars = {} @@ -146,6 +151,7 @@ def _from_openapi_data(cls, content, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + image (file_type, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -234,6 +240,7 @@ def __init__(self, content, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + image (file_type, none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/patched_detector_request.py b/generated/groundlight_openapi_client/model/patched_detector_request.py new file mode 100644 index 00000000..3572396d --- /dev/null +++ b/generated/groundlight_openapi_client/model/patched_detector_request.py @@ -0,0 +1,329 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +def lazy_import(): + from groundlight_openapi_client.model.blank_enum import BlankEnum + from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum + from groundlight_openapi_client.model.status_enum import StatusEnum + + globals()["BlankEnum"] = BlankEnum + globals()["EscalationTypeEnum"] = EscalationTypeEnum + globals()["StatusEnum"] = StatusEnum + + +class PatchedDetectorRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = { + ("name",): { + "max_length": 200, + "min_length": 1, + }, + ("confidence_threshold",): { + "inclusive_maximum": 1.0, + "inclusive_minimum": 0.0, + }, + ("patience_time",): { + "inclusive_maximum": 3600, + "inclusive_minimum": 0, + }, + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "name": (str,), # noqa: E501 + "confidence_threshold": (float,), # noqa: E501 + "patience_time": (float,), # noqa: E501 + "status": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "escalation_type": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "name": "name", # noqa: E501 + "confidence_threshold": "confidence_threshold", # noqa: E501 + "patience_time": "patience_time", # noqa: E501 + "status": "status", # noqa: E501 + "escalation_type": "escalation_type", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + """PatchedDetectorRequest - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + name (str): A short, descriptive name for the detector.. [optional] # noqa: E501 + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 + patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): # noqa: E501 + """PatchedDetectorRequest - a model defined in OpenAPI + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + name (str): A short, descriptive name for the detector.. [optional] # noqa: E501 + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 + patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + status (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + escalation_type (bool, date, datetime, dict, float, int, list, str, none_type): Category that define internal proccess for labeling image queries * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING. [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/status_enum.py b/generated/groundlight_openapi_client/model/status_enum.py new file mode 100644 index 00000000..fe8df1e7 --- /dev/null +++ b/generated/groundlight_openapi_client/model/status_enum.py @@ -0,0 +1,283 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class StatusEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "ON": "ON", + "OFF": "OFF", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """StatusEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `ON` - ON * `OFF` - OFF., must be one of ["ON", "OFF", ] # noqa: E501 + + Keyword Args: + value (str): * `ON` - ON * `OFF` - OFF., must be one of ["ON", "OFF", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """StatusEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `ON` - ON * `OFF` - OFF., must be one of ["ON", "OFF", ] # noqa: E501 + + Keyword Args: + value (str): * `ON` - ON * `OFF` - OFF., must be one of ["ON", "OFF", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/models/__init__.py b/generated/groundlight_openapi_client/models/__init__.py index 95e2b851..272c5e80 100644 --- a/generated/groundlight_openapi_client/models/__init__.py +++ b/generated/groundlight_openapi_client/models/__init__.py @@ -16,6 +16,7 @@ from groundlight_openapi_client.model.b_box_geometry import BBoxGeometry from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest from groundlight_openapi_client.model.binary_classification_result import BinaryClassificationResult +from groundlight_openapi_client.model.blank_enum import BlankEnum from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition import Condition from groundlight_openapi_client.model.condition_request import ConditionRequest @@ -25,6 +26,7 @@ from groundlight_openapi_client.model.detector_group import DetectorGroup from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest from groundlight_openapi_client.model.detector_type_enum import DetectorTypeEnum +from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.image_query import ImageQuery from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.inline_response200 import InlineResponse200 @@ -32,9 +34,11 @@ from groundlight_openapi_client.model.label_value_request import LabelValueRequest from groundlight_openapi_client.model.mode_enum import ModeEnum from groundlight_openapi_client.model.note import Note +from groundlight_openapi_client.model.note_request import NoteRequest from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList from groundlight_openapi_client.model.paginated_image_query_list import PaginatedImageQueryList from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.roi import ROI from groundlight_openapi_client.model.roi_request import ROIRequest from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum @@ -42,4 +46,5 @@ from groundlight_openapi_client.model.rule_request import RuleRequest from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum from groundlight_openapi_client.model.source_enum import SourceEnum +from groundlight_openapi_client.model.status_enum import StatusEnum from groundlight_openapi_client.model.verb_enum import VerbEnum diff --git a/generated/model.py b/generated/model.py index 114ded38..b5113a5c 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2024-08-14T20:35:47+00:00 +# timestamp: 2024-08-27T20:38:52+00:00 from __future__ import annotations @@ -45,6 +45,10 @@ class BBoxGeometryRequest(BaseModel): bottom: float +class BlankEnum(Enum): + field_ = "" + + class ChannelEnum(Enum): """ * `EMAIL` - EMAIL @@ -68,17 +72,21 @@ class DetectorTypeEnum(Enum): detector = "detector" +class EscalationTypeEnum(Enum): + """ + * `STANDARD` - STANDARD + * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING + """ + + STANDARD = "STANDARD" + NO_HUMAN_LABELING = "NO_HUMAN_LABELING" + + class ImageQueryTypeEnum(Enum): image_query = "image_query" class ModeEnum(Enum): - """ - * `BINARY` - BINARY - * `COUNT` - COUNT - * `MULTI_CLASS` - MULTI_CLASS - """ - BINARY = "BINARY" COUNT = "COUNT" MULTI_CLASS = "MULTI_CLASS" @@ -152,6 +160,16 @@ class SourceEnum(Enum): ALG_UNCLEAR = "ALG_UNCLEAR" +class StatusEnum(Enum): + """ + * `ON` - ON + * `OFF` - OFF + """ + + ON = "ON" + OFF = "OFF" + + class VerbEnum(Enum): """ * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY @@ -231,8 +249,16 @@ class Detector(BaseModel): 30.0, description="How long Groundlight will attempt to generate a confident prediction" ) metadata: Optional[Dict[str, Any]] = Field(..., description="Metadata about the detector.") - mode: str + mode: ModeEnum mode_configuration: Optional[Dict[str, Any]] = Field(...) + status: Optional[Union[StatusEnum, BlankEnum]] = None + escalation_type: Optional[EscalationTypeEnum] = Field( + None, + description=( + "Category that define internal proccess for labeling image queries\n\n* `STANDARD` - STANDARD\n*" + " `NO_HUMAN_LABELING` - NO_HUMAN_LABELING" + ), + ) class DetectorCreationInputRequest(BaseModel): @@ -300,7 +326,9 @@ class ImageQuery(BaseModel): class LabelValue(BaseModel): confidence: Optional[float] = Field(...) - class_name: Optional[str] = Field(..., description="A human-readable class name for this label (e.g. YES/NO)") + class_name: Optional[str] = Field( + ..., description="Return a human-readable class name for this label (e.g. YES/NO)" + ) rois: Optional[List[ROI]] = None annotations_requested: List[AnnotationsRequestedEnum] created_at: datetime @@ -328,6 +356,33 @@ class PaginatedImageQueryList(BaseModel): results: List[ImageQuery] +class PatchedDetectorRequest(BaseModel): + """ + Spec for serializing a detector object in the public API. + """ + + name: Optional[constr(min_length=1, max_length=200)] = Field( + None, description="A short, descriptive name for the detector." + ) + confidence_threshold: confloat(ge=0.0, le=1.0) = Field( + 0.9, + description=( + "If the detector's prediction is below this confidence threshold, send the image query for human review." + ), + ) + patience_time: confloat(ge=0.0, le=3600.0) = Field( + 30.0, description="How long Groundlight will attempt to generate a confident prediction" + ) + status: Optional[Union[StatusEnum, BlankEnum]] = None + escalation_type: Optional[EscalationTypeEnum] = Field( + None, + description=( + "Category that define internal proccess for labeling image queries\n\n* `STANDARD` - STANDARD\n*" + " `NO_HUMAN_LABELING` - NO_HUMAN_LABELING" + ), + ) + + class Rule(BaseModel): id: int detector_id: str diff --git a/generated/test/test_blank_enum.py b/generated/test/test_blank_enum.py new file mode 100644 index 00000000..14707bf4 --- /dev/null +++ b/generated/test/test_blank_enum.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.blank_enum import BlankEnum + + +class TestBlankEnum(unittest.TestCase): + """BlankEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testBlankEnum(self): + """Test BlankEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = BlankEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_detector_reset_api.py b/generated/test/test_detector_reset_api.py new file mode 100644 index 00000000..6473da80 --- /dev/null +++ b/generated/test/test_detector_reset_api.py @@ -0,0 +1,32 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.api.detector_reset_api import DetectorResetApi # noqa: E501 + + +class TestDetectorResetApi(unittest.TestCase): + """DetectorResetApi unit test stubs""" + + def setUp(self): + self.api = DetectorResetApi() # noqa: E501 + + def tearDown(self): + pass + + def test_reset_detector(self): + """Test case for reset_detector""" + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_escalation_type_enum.py b/generated/test/test_escalation_type_enum.py new file mode 100644 index 00000000..cd462ebd --- /dev/null +++ b/generated/test/test_escalation_type_enum.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum + + +class TestEscalationTypeEnum(unittest.TestCase): + """EscalationTypeEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testEscalationTypeEnum(self): + """Test EscalationTypeEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = EscalationTypeEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_patched_detector_request.py b/generated/test/test_patched_detector_request.py new file mode 100644 index 00000000..cc89a0dc --- /dev/null +++ b/generated/test/test_patched_detector_request.py @@ -0,0 +1,42 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.blank_enum import BlankEnum +from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum +from groundlight_openapi_client.model.status_enum import StatusEnum + +globals()["BlankEnum"] = BlankEnum +globals()["EscalationTypeEnum"] = EscalationTypeEnum +globals()["StatusEnum"] = StatusEnum +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest + + +class TestPatchedDetectorRequest(unittest.TestCase): + """PatchedDetectorRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testPatchedDetectorRequest(self): + """Test PatchedDetectorRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = PatchedDetectorRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_status_enum.py b/generated/test/test_status_enum.py new file mode 100644 index 00000000..254c5b0c --- /dev/null +++ b/generated/test/test_status_enum.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.status_enum import StatusEnum + + +class TestStatusEnum(unittest.TestCase): + """StatusEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testStatusEnum(self): + """Test StatusEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = StatusEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/package-lock.json b/package-lock.json index 36adb0f7..e0bc6737 100644 --- a/package-lock.json +++ b/package-lock.json @@ -7,7 +7,7 @@ "name": "groundlight-sdk-generator", "dependencies": { "@openapitools/openapi-generator-cli": "^2.9.0", - "rehype-katex": "^7.0.0", + "rehype-katex": "^7.0.1", "remark-math": "^6.0.0" } }, @@ -1621,9 +1621,10 @@ "license": "Apache-2.0" }, "node_modules/rehype-katex": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.0.tgz", - "integrity": "sha512-h8FPkGE00r2XKU+/acgqwWUlyzve1IiOKwsEkg4pDL3k48PiE0Pt+/uLtVHDVkN1yA4iurZN6UES8ivHVEQV6Q==", + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/rehype-katex/-/rehype-katex-7.0.1.tgz", + "integrity": "sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==", + "license": "MIT", "dependencies": { "@types/hast": "^3.0.0", "@types/katex": "^0.16.0", @@ -1642,6 +1643,7 @@ "version": "6.0.0", "resolved": "https://registry.npmjs.org/remark-math/-/remark-math-6.0.0.tgz", "integrity": "sha512-MMqgnP74Igy+S3WwnhQ7kqGlEerTETXMvJhrUzDikVZ2/uogJCb+WHUg97hK9/jcfc0dkD73s3LN8zU49cTEtA==", + "license": "MIT", "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-math": "^3.0.0", diff --git a/package.json b/package.json index c279bd3e..baebe0b9 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "groundlight-sdk-generator", "dependencies": { "@openapitools/openapi-generator-cli": "^2.9.0", - "rehype-katex": "^7.0.0", + "rehype-katex": "^7.0.1", "remark-math": "^6.0.0" } -} \ No newline at end of file +} diff --git a/spec/public-api.yaml b/spec/public-api.yaml index d775fe86..d3ca4129 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -175,6 +175,23 @@ paths: schema: $ref: '#/components/schemas/DetectorGroup' description: '' + /v1/detector-reset/{id}: + delete: + operationId: Reset detector + description: Deletes all image queries on the detector + parameters: + - in: path + name: id + schema: + type: string + required: true + tags: + - detector-reset + security: + - ApiToken: [] + responses: + '204': + description: No response body /v1/detectors: get: operationId: List detectors @@ -243,14 +260,21 @@ paths: schema: $ref: '#/components/schemas/Detector' description: '' +<<<<<<< Updated upstream delete: operationId: Delete detector description: Delete a detector by its ID. +======= + patch: + operationId: Update detector + description: Update a detector +>>>>>>> Stashed changes parameters: - in: path name: id schema: type: string +<<<<<<< Updated upstream description: Choose a detector by its ID. required: true tags: @@ -285,6 +309,9 @@ paths: POST data: Required: - name (str) - name of the predictor set +======= + required: true +>>>>>>> Stashed changes tags: - detectors requestBody: @@ -649,6 +676,9 @@ components: - left - right - top + BlankEnum: + enum: + - '' ChannelEnum: enum: - EMAIL @@ -738,6 +768,18 @@ components: additionalProperties: {} nullable: true readOnly: true + status: + oneOf: + - $ref: '#/components/schemas/StatusEnum' + - $ref: '#/components/schemas/BlankEnum' + escalation_type: + allOf: + - $ref: '#/components/schemas/EscalationTypeEnum' + description: |- + Category that define internal proccess for labeling image queries + + * `STANDARD` - STANDARD + * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING required: - created_at - group_name @@ -838,6 +880,14 @@ components: enum: - detector type: string + EscalationTypeEnum: + enum: + - STANDARD + - NO_HUMAN_LABELING + type: string + description: |- + * `STANDARD` - STANDARD + * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING ImageQuery: type: object description: Spec for serializing a image-query object in the public API. @@ -1090,6 +1140,46 @@ components: type: array items: $ref: '#/components/schemas/Rule' +<<<<<<< Updated upstream +======= + PatchedDetectorRequest: + type: object + description: Spec for serializing a detector object in the public API. + properties: + name: + type: string + minLength: 1 + description: A short, descriptive name for the detector. + maxLength: 200 + confidence_threshold: + type: number + format: double + maximum: 1.0 + minimum: 0.0 + default: 0.9 + description: If the detector's prediction is below this confidence threshold, + send the image query for human review. + patience_time: + type: number + format: double + maximum: 3600 + minimum: 0 + default: 30.0 + description: How long Groundlight will attempt to generate a confident prediction + status: + oneOf: + - $ref: '#/components/schemas/StatusEnum' + - $ref: '#/components/schemas/BlankEnum' + escalation_type: + allOf: + - $ref: '#/components/schemas/EscalationTypeEnum' + description: |- + Category that define internal proccess for labeling image queries + + * `STANDARD` - STANDARD + * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING + x-internal: true +>>>>>>> Stashed changes ROI: type: object description: Mixin for serializers to handle data in the StrictBaseModel format @@ -1231,6 +1321,14 @@ components: * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear + StatusEnum: + enum: + - 'ON' + - 'OFF' + type: string + description: |- + * `ON` - ON + * `OFF` - OFF VerbEnum: enum: - ANSWERED_CONSECUTIVELY diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 2386dbe7..e6c3014e 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -20,10 +20,13 @@ from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition_request import ConditionRequest from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.label_value_request import LabelValueRequest from groundlight_openapi_client.model.roi_request import ROIRequest from groundlight_openapi_client.model.rule_request import RuleRequest from groundlight_openapi_client.model.verb_enum import VerbEnum +from groundlight_openapi_client.model.status_enum import StatusEnum +from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from model import ROI, BBoxGeometry, Detector, DetectorGroup, ImageQuery, PaginatedRuleList, Rule from groundlight.binary_labels import Label, convert_display_label_to_internal @@ -293,3 +296,73 @@ def add_label( ) request_params = LabelValueRequest(label=api_label, image_query_id=image_query_id, rois=roi_requests) self.labels_api.create_label(request_params) + + def update_detector_confidence(self, detector: Union[str, Detector], confidence: float) -> None: + """ + Updates the confidence threshold for the given detector + + :param detector: the detector to update + :param confidence: the new confidence threshold + + :return: None + """ + if isinstance(detector, Detector): + detector = detector.id + self.detector_group_api.update_detector(detector, PatchedDetectorRequest(confidence_threshold=confidence)) + + def update_detector_name(self, detector: Union[str, Detector], name: str) -> None: + """ + Updates the name of the given detector + + :param detector: the detector to update + :param name: the new name + + :return: None + """ + if isinstance(detector, Detector): + detector = detector.id + self.detector_group_api.update_detector(detector, PatchedDetectorRequest(name=name)) + + def update_detector_status(self, detector: Union[str, Detector], enabled: bool) -> None: + """ + Updates the status of the given detector. If the detector is disabled, it will not receive new image queries + + :param detector: the detector to update + :param enabled: whether the detector is enabled, can be either True or False + + :return: None + """ + if isinstance(detector, Detector): + detector = detector.id + self.detector_group_api.update_detector(detector, PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF"))) + + def update_detector_escalation_type(self, detector: Union[str, Detector], escalation_type: str) -> None: + """ + Updates the escalation type of the given detector + + This is particularly useful for turning off human labeling for billing or security purposes. By setting a detector to "NO_HUMAN_LABELING", + no image queries sent to this detector will be sent to human labelers. + + :param detector: the detector to update + :param escalation_type: the new escalation type, can be "STANDARD" or "NO_HUMAN_LABELING" + + :return: None + """ + if isinstance(detector, Detector): + detector = detector.id + escalation_type = escalation_type.upper() + if escalation_type not in ["STANDARD", "NO_HUMAN_LABELING"]: + raise ValueError("escalation_type must be either 'STANDARD' or 'NO_HUMAN_LABELING'") + self.detector_group_api.update_detector(detector, PatchedDetectorRequest(escalation_type=EscalationTypeEnum(escalation_type))) + + def reset_detector(self, detector: Union[str, Detector]) -> None: + """ + Removes all image queries for the given detector + + :param detector_id: the id of the detector to reset + + :return: None + """ + if isinstance(detector, Detector): + detector = detector.id + self.detector_reset_api.reset_detector(detector) From facd26f0388f1e9a08fcd7c5cedc6341a9da3d74 Mon Sep 17 00:00:00 2001 From: Brandon Date: Tue, 27 Aug 2024 20:54:30 +0000 Subject: [PATCH 2/5] Add update detector functions --- spec/public-api.yaml | 89 ++++++++++------------------- src/groundlight/client.py | 19 +++--- src/groundlight/experimental_api.py | 35 +++--------- src/groundlight/internalapi.py | 24 -------- test/unit/test_experimental.py | 50 +++++++++++++++- 5 files changed, 98 insertions(+), 119 deletions(-) diff --git a/spec/public-api.yaml b/spec/public-api.yaml index d3ca4129..227104f0 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -260,81 +260,54 @@ paths: schema: $ref: '#/components/schemas/Detector' description: '' -<<<<<<< Updated upstream - delete: - operationId: Delete detector - description: Delete a detector by its ID. -======= patch: operationId: Update detector description: Update a detector ->>>>>>> Stashed changes parameters: - in: path name: id schema: type: string -<<<<<<< Updated upstream - description: Choose a detector by its ID. required: true tags: - detectors - security: - - ApiToken: [] - responses: - '204': - description: No response body - /v1/detectors/detector-groups: - get: - operationId: Get Detector Groups_2 - description: List all detector groups - tags: - - detectors - security: - - ApiToken: [] - responses: - '200': - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/DetectorGroup' - description: '' - post: - operationId: Create Detector Group_2 - description: |- - Create a new detector group - - POST data: - Required: - - name (str) - name of the predictor set -======= - required: true ->>>>>>> Stashed changes - tags: - - detectors requestBody: content: application/json: schema: - $ref: '#/components/schemas/DetectorGroupRequest' + $ref: '#/components/schemas/PatchedDetectorRequest' application/x-www-form-urlencoded: schema: - $ref: '#/components/schemas/DetectorGroupRequest' + $ref: '#/components/schemas/PatchedDetectorRequest' multipart/form-data: schema: - $ref: '#/components/schemas/DetectorGroupRequest' - required: true + $ref: '#/components/schemas/PatchedDetectorRequest' security: - ApiToken: [] responses: - '201': + '200': content: application/json: schema: - $ref: '#/components/schemas/DetectorGroup' + $ref: '#/components/schemas/Detector' description: '' + delete: + operationId: Delete detector + description: Delete a detector by its ID. + parameters: + - in: path + name: id + schema: + type: string + description: Choose a detector by its ID. + required: true + tags: + - detectors + security: + - ApiToken: [] + responses: + '204': + description: No response body /v1/image-queries: get: operationId: List image queries @@ -561,6 +534,12 @@ paths: - notes requestBody: content: + application/json: + schema: + $ref: '#/components/schemas/NoteRequest' + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/NoteRequest' multipart/form-data: schema: $ref: '#/components/schemas/NoteRequest' @@ -761,7 +740,8 @@ components: readOnly: true description: Metadata about the detector. mode: - type: string + allOf: + - $ref: '#/components/schemas/ModeEnum' readOnly: true mode_configuration: type: object @@ -979,7 +959,7 @@ components: class_name: type: string nullable: true - description: A human-readable class name for this label (e.g. YES/NO) + description: Return a human-readable class name for this label (e.g. YES/NO) readOnly: true rois: type: array @@ -1041,10 +1021,6 @@ components: - COUNT - MULTI_CLASS type: string - description: |- - * `BINARY` - BINARY - * `COUNT` - COUNT - * `MULTI_CLASS` - MULTI_CLASS Note: type: object properties: @@ -1140,8 +1116,6 @@ components: type: array items: $ref: '#/components/schemas/Rule' -<<<<<<< Updated upstream -======= PatchedDetectorRequest: type: object description: Spec for serializing a detector object in the public API. @@ -1179,7 +1153,6 @@ components: * `STANDARD` - STANDARD * `NO_HUMAN_LABELING` - NO_HUMAN_LABELING x-internal: true ->>>>>>> Stashed changes ROI: type: object description: Mixin for serializers to handle data in the StrictBaseModel format diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 35eca62b..d3150429 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -14,6 +14,7 @@ from groundlight_openapi_client.exceptions import NotFoundException, UnauthorizedException from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest from groundlight_openapi_client.model.label_value_request import LabelValueRequest +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from model import ( ROI, Detector, @@ -817,15 +818,17 @@ def stop_inspection(self, inspection_id: str) -> str: """ return self.api_client.stop_inspection(inspection_id) - def update_detector_confidence_threshold(self, detector_id: str, confidence_threshold: float) -> None: + def update_detector_confidence_threshold(self, detector: Union[str, Detector], confidence_threshold: float) -> None: """ - Updates the confidence threshold of a detector given a detector_id. + Updates the confidence threshold for the given detector - :param detector_id: The id of the detector to update. + :param detector: the detector to update + :param confidence_threshold: the new confidence threshold - :param confidence_threshold: The new confidence threshold for the detector. - - :return None - :rtype None + :return: None """ - self.api_client.update_detector_confidence_threshold(detector_id, confidence_threshold) + if isinstance(detector, Detector): + detector = detector.id + if confidence_threshold < 0 or confidence_threshold > 1: + raise ValueError("confidence must be between 0 and 1") + self.detectors_api.update_detector(detector, patched_detector_request=PatchedDetectorRequest(confidence_threshold=confidence_threshold)) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index e6c3014e..7a6743b8 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -297,19 +297,6 @@ def add_label( request_params = LabelValueRequest(label=api_label, image_query_id=image_query_id, rois=roi_requests) self.labels_api.create_label(request_params) - def update_detector_confidence(self, detector: Union[str, Detector], confidence: float) -> None: - """ - Updates the confidence threshold for the given detector - - :param detector: the detector to update - :param confidence: the new confidence threshold - - :return: None - """ - if isinstance(detector, Detector): - detector = detector.id - self.detector_group_api.update_detector(detector, PatchedDetectorRequest(confidence_threshold=confidence)) - def update_detector_name(self, detector: Union[str, Detector], name: str) -> None: """ Updates the name of the given detector @@ -321,7 +308,7 @@ def update_detector_name(self, detector: Union[str, Detector], name: str) -> Non """ if isinstance(detector, Detector): detector = detector.id - self.detector_group_api.update_detector(detector, PatchedDetectorRequest(name=name)) + self.detectors_api.update_detector(detector, patched_detector_request=PatchedDetectorRequest(name=name)) def update_detector_status(self, detector: Union[str, Detector], enabled: bool) -> None: """ @@ -334,7 +321,9 @@ def update_detector_status(self, detector: Union[str, Detector], enabled: bool) """ if isinstance(detector, Detector): detector = detector.id - self.detector_group_api.update_detector(detector, PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF"))) + self.detectors_api.update_detector( + detector, patched_detector_request=PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF")) + ) def update_detector_escalation_type(self, detector: Union[str, Detector], escalation_type: str) -> None: """ @@ -353,16 +342,6 @@ def update_detector_escalation_type(self, detector: Union[str, Detector], escala escalation_type = escalation_type.upper() if escalation_type not in ["STANDARD", "NO_HUMAN_LABELING"]: raise ValueError("escalation_type must be either 'STANDARD' or 'NO_HUMAN_LABELING'") - self.detector_group_api.update_detector(detector, PatchedDetectorRequest(escalation_type=EscalationTypeEnum(escalation_type))) - - def reset_detector(self, detector: Union[str, Detector]) -> None: - """ - Removes all image queries for the given detector - - :param detector_id: the id of the detector to reset - - :return: None - """ - if isinstance(detector, Detector): - detector = detector.id - self.detector_reset_api.reset_detector(detector) + self.detectors_api.update_detector( + detector, patched_detector_request=PatchedDetectorRequest(escalation_type=EscalationTypeEnum(escalation_type)) + ) diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index f4984810..d680bd1b 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -359,27 +359,3 @@ def stop_inspection(self, inspection_id: str) -> str: ) return response.json()["result"] - - @RequestsRetryDecorator() - def update_detector_confidence_threshold(self, detector_id: str, confidence_threshold: float) -> None: - """Updates the confidence threshold of a detector.""" - - # The API does not validate the confidence threshold, - # so we will validate it here and raise an exception if necessary. - if confidence_threshold < 0 or confidence_threshold > 1: - raise ValueError(f"Confidence threshold must be between 0 and 1. Got {confidence_threshold}.") - - url = f"{self.configuration.host}/predictors/{detector_id}" - - headers = self._headers() - - payload = {"confidence_threshold": confidence_threshold} - - response = requests.request("PATCH", url, headers=headers, json=payload, verify=self.configuration.verify_ssl) - - if not is_ok(response.status_code): - raise InternalApiError( - status=response.status_code, - reason=f"Error updating detector: {detector_id}.", - http_resp=response, - ) diff --git a/test/unit/test_experimental.py b/test/unit/test_experimental.py index 5a94a8a9..cf222087 100644 --- a/test/unit/test_experimental.py +++ b/test/unit/test_experimental.py @@ -2,7 +2,7 @@ import pytest from groundlight import ExperimentalApi -from model import ImageQuery +from model import Detector, ImageQuery def test_detector_groups(gl_experimental: ExperimentalApi): @@ -15,6 +15,54 @@ def test_detector_groups(gl_experimental: ExperimentalApi): assert created_group in all_groups +def test_update_detector_confidence_threshold(gl_experimental: ExperimentalApi, detector: Detector): + """ + verify that we can update the confidence of a detector + """ + gl_experimental.update_detector_confidence_threshold(detector.id, 0.5) + updated_detector = gl_experimental.get_detector(detector.id) + assert updated_detector.confidence_threshold == 0.5 + gl_experimental.update_detector_confidence_threshold(detector.id, 0.9) + updated_detector = gl_experimental.get_detector(detector.id) + assert updated_detector.confidence_threshold == 0.9 + + +def test_update_detector_name(gl_experimental: ExperimentalApi, detector: Detector): + """ + verify that we can update the name of a detector + """ + new_name = f"Test {datetime.utcnow()}" + gl_experimental.update_detector_name(detector.id, new_name) + updated_detector = gl_experimental.get_detector(detector.id) + assert updated_detector.name == new_name + + +def test_update_detector_status(gl_experimental: ExperimentalApi): + """ + verify that we can update the status of a detector + """ + detector = gl_experimental.get_or_create_detector(f"test {datetime.utcnow()}", "Is there a dog?") + gl_experimental.update_detector_status(detector.id, False) + updated_detector = gl_experimental.get_detector(detector.id) + assert updated_detector.status.value == "OFF" + gl_experimental.update_detector_status(detector.id, True) + updated_detector = gl_experimental.get_detector(detector.id) + assert updated_detector.status.value == "ON" + + +def test_update_detector_escalation_type(gl_experimental: ExperimentalApi): + """ + verify that we can update the escalation type of a detector + """ + detector = gl_experimental.get_or_create_detector(f"test {datetime.utcnow()}", "Is there a dog?") + gl_experimental.update_detector_escalation_type(detector.id, "NO_HUMAN_LABELING") + updated_detector = gl_experimental.get_detector(detector.id) + updated_detector.escalation_type.value == "NO_HUMAN_LABELING" + gl_experimental.update_detector_escalation_type(detector.id, "STANDARD") + updated_detector = gl_experimental.get_detector(detector.id) + updated_detector.escalation_type.value == "STANDARD" + + @pytest.mark.skip( reason=( "Users currently don't have permission to turn object detection on their own. If you have questions, reach out" From 6fa05f5fd36151de91eaf2cf8111a03f184c8c76 Mon Sep 17 00:00:00 2001 From: Auto-format Bot Date: Tue, 27 Aug 2024 20:55:40 +0000 Subject: [PATCH 3/5] Automatically reformatting code --- src/groundlight/client.py | 4 +++- src/groundlight/experimental_api.py | 12 +++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/groundlight/client.py b/src/groundlight/client.py index d3150429..3cd15825 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -831,4 +831,6 @@ def update_detector_confidence_threshold(self, detector: Union[str, Detector], c detector = detector.id if confidence_threshold < 0 or confidence_threshold > 1: raise ValueError("confidence must be between 0 and 1") - self.detectors_api.update_detector(detector, patched_detector_request=PatchedDetectorRequest(confidence_threshold=confidence_threshold)) + self.detectors_api.update_detector( + detector, patched_detector_request=PatchedDetectorRequest(confidence_threshold=confidence_threshold) + ) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 7a6743b8..9f6b30de 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -20,13 +20,13 @@ from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition_request import ConditionRequest from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest -from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest +from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.label_value_request import LabelValueRequest +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.roi_request import ROIRequest from groundlight_openapi_client.model.rule_request import RuleRequest -from groundlight_openapi_client.model.verb_enum import VerbEnum from groundlight_openapi_client.model.status_enum import StatusEnum -from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum +from groundlight_openapi_client.model.verb_enum import VerbEnum from model import ROI, BBoxGeometry, Detector, DetectorGroup, ImageQuery, PaginatedRuleList, Rule from groundlight.binary_labels import Label, convert_display_label_to_internal @@ -322,7 +322,8 @@ def update_detector_status(self, detector: Union[str, Detector], enabled: bool) if isinstance(detector, Detector): detector = detector.id self.detectors_api.update_detector( - detector, patched_detector_request=PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF")) + detector, + patched_detector_request=PatchedDetectorRequest(status=StatusEnum("ON") if enabled else StatusEnum("OFF")), ) def update_detector_escalation_type(self, detector: Union[str, Detector], escalation_type: str) -> None: @@ -343,5 +344,6 @@ def update_detector_escalation_type(self, detector: Union[str, Detector], escala if escalation_type not in ["STANDARD", "NO_HUMAN_LABELING"]: raise ValueError("escalation_type must be either 'STANDARD' or 'NO_HUMAN_LABELING'") self.detectors_api.update_detector( - detector, patched_detector_request=PatchedDetectorRequest(escalation_type=EscalationTypeEnum(escalation_type)) + detector, + patched_detector_request=PatchedDetectorRequest(escalation_type=EscalationTypeEnum(escalation_type)), ) From e1e3199dd282ff76744079335915739c596a6533 Mon Sep 17 00:00:00 2001 From: Auto-format Bot Date: Thu, 21 Nov 2024 01:01:19 +0000 Subject: [PATCH 4/5] Automatically reformatting code --- src/groundlight/experimental_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 5dbf568a..55a865c8 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -24,8 +24,8 @@ from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest from groundlight_openapi_client.model.escalation_type_enum import EscalationTypeEnum from groundlight_openapi_client.model.label_value_request import LabelValueRequest -from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.multi_class_mode_configuration import MultiClassModeConfiguration +from groundlight_openapi_client.model.patched_detector_request import PatchedDetectorRequest from groundlight_openapi_client.model.roi_request import ROIRequest from groundlight_openapi_client.model.rule_request import RuleRequest from groundlight_openapi_client.model.status_enum import StatusEnum From 4c47f0f99462bd7b6dd17249c74ff175f94004d9 Mon Sep 17 00:00:00 2001 From: brandon Date: Wed, 20 Nov 2024 17:10:17 -0800 Subject: [PATCH 5/5] appease the linters --- .pylintrc | 4 ++-- src/groundlight/experimental_api.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.pylintrc b/.pylintrc index 991ed3af..0a1fba02 100644 --- a/.pylintrc +++ b/.pylintrc @@ -563,5 +563,5 @@ min-public-methods=2 # Exceptions that will emit a warning when being caught. Defaults to # "BaseException, Exception". -overgeneral-exceptions=BaseException, - Exception +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 55a865c8..69f35c47 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -344,8 +344,9 @@ def update_detector_escalation_type(self, detector: Union[str, Detector], escala """ Updates the escalation type of the given detector - This is particularly useful for turning off human labeling for billing or security purposes. By setting a detector to "NO_HUMAN_LABELING", - no image queries sent to this detector will be sent to human labelers. + This is particularly useful for turning off human labeling for billing or security purposes. + By setting a detector to "NO_HUMAN_LABELING", no image queries sent to this detector will be + sent to human labelers. :param detector: the detector to update :param escalation_type: the new escalation type, can be "STANDARD" or "NO_HUMAN_LABELING"