diff --git a/lightly/openapi_generated/swagger_client/api/datasources_api.py b/lightly/openapi_generated/swagger_client/api/datasources_api.py index d409937b4..63d82eac1 100644 --- a/lightly/openapi_generated/swagger_client/api/datasources_api.py +++ b/lightly/openapi_generated/swagger_client/api/datasources_api.py @@ -54,10 +54,10 @@ def __init__(self, api_client=None): self.api_client = api_client @validate_arguments - def get_custom_embedding_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the csv file within the embeddings folder to get the readUrl for")], **kwargs) -> str: # noqa: E501 + def get_custom_embedding_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the csv file within the embeddings folder to get the GET readUrl for")], **kwargs) -> str: # noqa: E501 """get_custom_embedding_file_read_url_from_datasource_by_dataset_id # noqa: E501 - Get the ReadURL of a custom embedding csv file within the embeddings folder (e.g myCustomEmbedding.csv) # noqa: E501 + Get the GET ReadURL of a custom embedding csv file within the embeddings folder (e.g myCustomEmbedding.csv) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -66,7 +66,7 @@ def get_custom_embedding_file_read_url_from_datasource_by_dataset_id(self, datas :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the csv file within the embeddings folder to get the readUrl for (required) + :param file_name: The name of the csv file within the embeddings folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -85,10 +85,10 @@ def get_custom_embedding_file_read_url_from_datasource_by_dataset_id(self, datas return self.get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501 @validate_arguments - def get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the csv file within the embeddings folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 + def get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the csv file within the embeddings folder to get the GET readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 """get_custom_embedding_file_read_url_from_datasource_by_dataset_id # noqa: E501 - Get the ReadURL of a custom embedding csv file within the embeddings folder (e.g myCustomEmbedding.csv) # noqa: E501 + Get the GET ReadURL of a custom embedding csv file within the embeddings folder (e.g myCustomEmbedding.csv) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -97,7 +97,7 @@ def get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_i :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the csv file within the embeddings folder to get the readUrl for (required) + :param file_name: The name of the csv file within the embeddings folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -209,9 +209,9 @@ def get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_i @validate_arguments def get_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], purpose : Annotated[Optional[DatasourcePurpose], Field(description="Which datasource with which purpose we want to get. Defaults to INPUT_OUTPUT")] = None, **kwargs) -> DatasourceConfig: # noqa: E501 - """get_datasource_by_dataset_id # noqa: E501 + """(Deprecated) get_datasource_by_dataset_id # noqa: E501 - Get the datasource of a dataset # noqa: E501 + DEPRECATED - use getDatasourcesByDatasetId. Get the datasource of a dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -240,9 +240,9 @@ def get_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True @validate_arguments def get_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], purpose : Annotated[Optional[DatasourcePurpose], Field(description="Which datasource with which purpose we want to get. Defaults to INPUT_OUTPUT")] = None, **kwargs) -> ApiResponse: # noqa: E501 - """get_datasource_by_dataset_id # noqa: E501 + """(Deprecated) get_datasource_by_dataset_id # noqa: E501 - Get the datasource of a dataset # noqa: E501 + DEPRECATED - use getDatasourcesByDatasetId. Get the datasource of a dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -278,6 +278,8 @@ def get_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[con :rtype: tuple(DatasourceConfig, status_code(int), headers(HTTPHeaderDict)) """ + warnings.warn("GET /v1/datasets/{datasetId}/datasource is deprecated.", DeprecationWarning) + _params = locals() _all_params = [ @@ -647,6 +649,160 @@ def get_datasources_by_dataset_id_with_http_info(self, dataset_id : Annotated[co collection_formats=_collection_formats, _request_auth=_params.get('_request_auth')) + @validate_arguments + def get_head_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The name of the file within the the datasource to get a HEAD readUrl or GET readURL")], **kwargs) -> str: # noqa: E501 + """get_head_file_read_url_from_datasource_by_dataset_id # noqa: E501 + + Get a HEAD ReadURL of a file within datasources. Can only be used for HEAD request, no GET requests. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.get_head_file_read_url_from_datasource_by_dataset_id(dataset_id, file_name, async_req=True) + >>> result = thread.get() + + :param dataset_id: ObjectId of the dataset (required) + :type dataset_id: str + :param file_name: The name of the file within the the datasource to get a HEAD readUrl or GET readURL (required) + :type file_name: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: str + """ + kwargs['_return_http_data_only'] = True + if '_preload_content' in kwargs: + raise ValueError("Error! Please call the get_head_file_read_url_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") + return self.get_head_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501 + + @validate_arguments + def get_head_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The name of the file within the the datasource to get a HEAD readUrl or GET readURL")], **kwargs) -> ApiResponse: # noqa: E501 + """get_head_file_read_url_from_datasource_by_dataset_id # noqa: E501 + + Get a HEAD ReadURL of a file within datasources. Can only be used for HEAD request, no GET requests. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.get_head_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, async_req=True) + >>> result = thread.get() + + :param dataset_id: ObjectId of the dataset (required) + :type dataset_id: str + :param file_name: The name of the file within the the datasource to get a HEAD readUrl or GET readURL (required) + :type file_name: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _preload_content: if False, the ApiResponse.data will + be set to none and raw_data will store the + HTTP response body without reading/decoding. + Default is True. + :type _preload_content: bool, optional + :param _return_http_data_only: response data instead of ApiResponse + object with status code, headers, etc + :type _return_http_data_only: bool, optional + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :type _request_auth: dict, optional + :type _content_type: string, optional: force content-type for the request + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: tuple(str, status_code(int), headers(HTTPHeaderDict)) + """ + + _params = locals() + + _all_params = [ + 'dataset_id', + 'file_name' + ] + _all_params.extend( + [ + 'async_req', + '_return_http_data_only', + '_preload_content', + '_request_timeout', + '_request_auth', + '_content_type', + '_headers' + ] + ) + + # validate the arguments + for _key, _val in _params['kwargs'].items(): + if _key not in _all_params: + raise ApiTypeError( + "Got an unexpected keyword argument '%s'" + " to method get_head_file_read_url_from_datasource_by_dataset_id" % _key + ) + _params[_key] = _val + del _params['kwargs'] + + _collection_formats = {} + + # process the path parameters + _path_params = {} + if _params['dataset_id']: + _path_params['datasetId'] = _params['dataset_id'] + + + # process the query parameters + _query_params = [] + if _params.get('file_name') is not None: # noqa: E501 + _query_params.append(( + 'fileName', + _params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name'] + )) + + # process the header parameters + _header_params = dict(_params.get('_headers', {})) + # process the form parameters + _form_params = [] + _files = {} + # process the body parameter + _body_params = None + # set the HTTP header `Accept` + _header_params['Accept'] = self.api_client.select_header_accept( + ['application/json']) # noqa: E501 + + # authentication setting + _auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501 + + _response_types_map = { + '200': "str", + '400': "ApiErrorResponse", + '401': "ApiErrorResponse", + '403': "ApiErrorResponse", + '404': "ApiErrorResponse", + } + + return self.api_client.call_api( + '/v1/datasets/{datasetId}/datasource/fileHEAD', 'GET', + _path_params, + _query_params, + _header_params, + body=_body_params, + post_params=_form_params, + files=_files, + response_types_map=_response_types_map, + auth_settings=_auth_settings, + async_req=_params.get('async_req'), + _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501 + _preload_content=_params.get('_preload_content', True), + _request_timeout=_params.get('_request_timeout'), + collection_formats=_collection_formats, + _request_auth=_params.get('_request_auth')) + @validate_arguments def get_list_of_raw_samples_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, **kwargs) -> DatasourceRawSamplesData: # noqa: E501 """get_list_of_raw_samples_from_datasource_by_dataset_id # noqa: E501 @@ -1297,10 +1453,10 @@ def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_ _request_auth=_params.get('_request_auth')) @validate_arguments - def get_metadata_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=5), Field(..., description="The name of the file within the metadata folder to get the readUrl for")], **kwargs) -> str: # noqa: E501 + def get_metadata_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=5), Field(..., description="The name of the file within the metadata folder to get the GET readUrl for")], **kwargs) -> str: # noqa: E501 """get_metadata_file_read_url_from_datasource_by_dataset_id # noqa: E501 - Get the ReadURL of a file within the metadata folder (e.g. my_image.json or my_video-099-mp4.json) # noqa: E501 + Get the GET ReadURL of a file within the metadata folder (e.g. my_image.json or my_video-099-mp4.json) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -1309,7 +1465,7 @@ def get_metadata_file_read_url_from_datasource_by_dataset_id(self, dataset_id : :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the file within the metadata folder to get the readUrl for (required) + :param file_name: The name of the file within the metadata folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -1328,10 +1484,10 @@ def get_metadata_file_read_url_from_datasource_by_dataset_id(self, dataset_id : return self.get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501 @validate_arguments - def get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=5), Field(..., description="The name of the file within the metadata folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 + def get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=5), Field(..., description="The name of the file within the metadata folder to get the GET readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 """get_metadata_file_read_url_from_datasource_by_dataset_id # noqa: E501 - Get the ReadURL of a file within the metadata folder (e.g. my_image.json or my_video-099-mp4.json) # noqa: E501 + Get the GET ReadURL of a file within the metadata folder (e.g. my_image.json or my_video-099-mp4.json) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -1340,7 +1496,7 @@ def get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(self :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the file within the metadata folder to get the readUrl for (required) + :param file_name: The name of the file within the metadata folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -1451,10 +1607,10 @@ def get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(self _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> str: # noqa: E501 + def get_prediction_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the GET readUrl for")], **kwargs) -> str: # noqa: E501 """get_prediction_file_read_url_from_datasource_by_dataset_id # noqa: E501 - Get the ReadURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501 + Get the GET ReadURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -1463,7 +1619,7 @@ def get_prediction_file_read_url_from_datasource_by_dataset_id(self, dataset_id :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the file within the prediction folder to get the readUrl for (required) + :param file_name: The name of the file within the prediction folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -1482,10 +1638,10 @@ def get_prediction_file_read_url_from_datasource_by_dataset_id(self, dataset_id return self.get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 + def get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the GET readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 """get_prediction_file_read_url_from_datasource_by_dataset_id # noqa: E501 - Get the ReadURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501 + Get the GET ReadURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -1494,7 +1650,7 @@ def get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(se :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the file within the prediction folder to get the readUrl for (required) + :param file_name: The name of the file within the prediction folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -1605,7 +1761,7 @@ def get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(se _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_file_write_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> str: # noqa: E501 + def get_prediction_file_write_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the GET readUrl for")], **kwargs) -> str: # noqa: E501 """get_prediction_file_write_url_from_datasource_by_dataset_id # noqa: E501 Get the WriteURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501 @@ -1617,7 +1773,7 @@ def get_prediction_file_write_url_from_datasource_by_dataset_id(self, dataset_id :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the file within the prediction folder to get the readUrl for (required) + :param file_name: The name of the file within the prediction folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional @@ -1636,7 +1792,7 @@ def get_prediction_file_write_url_from_datasource_by_dataset_id(self, dataset_id return self.get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 + def get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the GET readUrl for")], **kwargs) -> ApiResponse: # noqa: E501 """get_prediction_file_write_url_from_datasource_by_dataset_id # noqa: E501 Get the WriteURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501 @@ -1648,7 +1804,7 @@ def get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(s :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: The name of the file within the prediction folder to get the readUrl for (required) + :param file_name: The name of the file within the prediction folder to get the GET readUrl for (required) :type file_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional diff --git a/lightly/openapi_generated/swagger_client/api/docker_api.py b/lightly/openapi_generated/swagger_client/api/docker_api.py index 633383cbc..963273743 100644 --- a/lightly/openapi_generated/swagger_client/api/docker_api.py +++ b/lightly/openapi_generated/swagger_client/api/docker_api.py @@ -2150,7 +2150,7 @@ def get_docker_run_logs_by_id_with_http_info(self, run_id : Annotated[constr(str def get_docker_run_report_read_url_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> str: # noqa: E501 """(Deprecated) get_docker_run_report_read_url_by_id # noqa: E501 - Get the url of a specific docker runs report # noqa: E501 + DEPRECATED, use getDockerRunArtifactReadUrlById - Get the url of a specific docker runs report # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2179,7 +2179,7 @@ def get_docker_run_report_read_url_by_id(self, run_id : Annotated[constr(strict= def get_docker_run_report_read_url_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> ApiResponse: # noqa: E501 """(Deprecated) get_docker_run_report_read_url_by_id # noqa: E501 - Get the url of a specific docker runs report # noqa: E501 + DEPRECATED, use getDockerRunArtifactReadUrlById - Get the url of a specific docker runs report # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2295,7 +2295,7 @@ def get_docker_run_report_read_url_by_id_with_http_info(self, run_id : Annotated def get_docker_run_report_write_url_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> str: # noqa: E501 """(Deprecated) get_docker_run_report_write_url_by_id # noqa: E501 - Get the signed url to upload a report of a docker run # noqa: E501 + DEPRECATED, use createDockerRunArtifact - Get the signed url to upload a report of a docker run # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2324,7 +2324,7 @@ def get_docker_run_report_write_url_by_id(self, run_id : Annotated[constr(strict def get_docker_run_report_write_url_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> ApiResponse: # noqa: E501 """(Deprecated) get_docker_run_report_write_url_by_id # noqa: E501 - Get the signed url to upload a report of a docker run # noqa: E501 + DEPRECATED, use createDockerRunArtifact - Get the signed url to upload a report of a docker run # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True diff --git a/lightly/openapi_generated/swagger_client/api/predictions_api.py b/lightly/openapi_generated/swagger_client/api/predictions_api.py index 057cbdcb7..1d8d1b649 100644 --- a/lightly/openapi_generated/swagger_client/api/predictions_api.py +++ b/lightly/openapi_generated/swagger_client/api/predictions_api.py @@ -395,22 +395,22 @@ def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, d _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> List[PredictionSingleton]: # noqa: E501 - """get_prediction_by_sample_id # noqa: E501 + def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> PredictionTaskSchema: # noqa: E501 + """get_prediction_task_schema_by_task_name # noqa: E501 - Get all prediction singletons of a specific sample of a dataset # noqa: E501 + Get a prediction task schemas named taskName for a datasetId # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_by_sample_id(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True) + >>> thread = api.get_prediction_task_schema_by_task_name(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param sample_id: ObjectId of the sample (required) - :type sample_id: str :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) :type prediction_uuid_timestamp: int + :param task_name: The prediction task name for which one wants to list the predictions (required) + :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -420,30 +420,30 @@ def get_prediction_by_sample_id(self, dataset_id : Annotated[constr(strict=True) :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: List[PredictionSingleton] + :rtype: PredictionTaskSchema """ kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: - raise ValueError("Error! Please call the get_prediction_by_sample_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 + raise ValueError("Error! Please call the get_prediction_task_schema_by_task_name_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") + return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> ApiResponse: # noqa: E501 - """get_prediction_by_sample_id # noqa: E501 + def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> ApiResponse: # noqa: E501 + """get_prediction_task_schema_by_task_name # noqa: E501 - Get all prediction singletons of a specific sample of a dataset # noqa: E501 + Get a prediction task schemas named taskName for a datasetId # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True) + >>> thread = api.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param sample_id: ObjectId of the sample (required) - :type sample_id: str :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) :type prediction_uuid_timestamp: int + :param task_name: The prediction task name for which one wants to list the predictions (required) + :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -466,15 +466,15 @@ def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[cons :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: tuple(List[PredictionSingleton], status_code(int), headers(HTTPHeaderDict)) + :rtype: tuple(PredictionTaskSchema, status_code(int), headers(HTTPHeaderDict)) """ _params = locals() _all_params = [ 'dataset_id', - 'sample_id', - 'prediction_uuid_timestamp' + 'prediction_uuid_timestamp', + 'task_name' ] _all_params.extend( [ @@ -493,7 +493,7 @@ def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[cons if _key not in _all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" - " to method get_prediction_by_sample_id" % _key + " to method get_prediction_task_schema_by_task_name" % _key ) _params[_key] = _val del _params['kwargs'] @@ -505,8 +505,8 @@ def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[cons if _params['dataset_id']: _path_params['datasetId'] = _params['dataset_id'] - if _params['sample_id']: - _path_params['sampleId'] = _params['sample_id'] + if _params['task_name']: + _path_params['taskName'] = _params['task_name'] # process the query parameters @@ -532,7 +532,7 @@ def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[cons _auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501 _response_types_map = { - '200': "List[PredictionSingleton]", + '200': "PredictionTaskSchema", '400': "ApiErrorResponse", '401': "ApiErrorResponse", '403': "ApiErrorResponse", @@ -540,7 +540,7 @@ def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[cons } return self.api_client.call_api( - '/v1/datasets/{datasetId}/predictions/samples/{sampleId}', 'GET', + '/v1/datasets/{datasetId}/predictions/tasks/{taskName}', 'GET', _path_params, _query_params, _header_params, @@ -557,22 +557,20 @@ def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[cons _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> PredictionTaskSchema: # noqa: E501 - """get_prediction_task_schema_by_task_name # noqa: E501 + def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchemas: # noqa: E501 + """get_prediction_task_schemas_by_dataset_id # noqa: E501 - Get a prediction task schemas named taskName for a datasetId # noqa: E501 + Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_task_schema_by_task_name(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) + >>> thread = api.get_prediction_task_schemas_by_dataset_id(dataset_id, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int - :param task_name: The prediction task name for which one wants to list the predictions (required) - :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -582,30 +580,28 @@ def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr( :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: PredictionTaskSchema + :rtype: PredictionTaskSchemas """ kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: - raise ValueError("Error! Please call the get_prediction_task_schema_by_task_name_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501 + raise ValueError("Error! Please call the get_prediction_task_schemas_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") + return self.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> ApiResponse: # noqa: E501 - """get_prediction_task_schema_by_task_name # noqa: E501 + def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 + """get_prediction_task_schemas_by_dataset_id # noqa: E501 - Get a prediction task schemas named taskName for a datasetId # noqa: E501 + Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) + >>> thread = api.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) + :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. :type prediction_uuid_timestamp: int - :param task_name: The prediction task name for which one wants to list the predictions (required) - :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -628,15 +624,14 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: tuple(PredictionTaskSchema, status_code(int), headers(HTTPHeaderDict)) + :rtype: tuple(PredictionTaskSchemas, status_code(int), headers(HTTPHeaderDict)) """ _params = locals() _all_params = [ 'dataset_id', - 'prediction_uuid_timestamp', - 'task_name' + 'prediction_uuid_timestamp' ] _all_params.extend( [ @@ -655,7 +650,7 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An if _key not in _all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" - " to method get_prediction_task_schema_by_task_name" % _key + " to method get_prediction_task_schemas_by_dataset_id" % _key ) _params[_key] = _val del _params['kwargs'] @@ -667,9 +662,6 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An if _params['dataset_id']: _path_params['datasetId'] = _params['dataset_id'] - if _params['task_name']: - _path_params['taskName'] = _params['task_name'] - # process the query parameters _query_params = [] @@ -694,7 +686,7 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An _auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501 _response_types_map = { - '200': "PredictionTaskSchema", + '200': "PredictionTaskSchemas", '400': "ApiErrorResponse", '401': "ApiErrorResponse", '403': "ApiErrorResponse", @@ -702,7 +694,7 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An } return self.api_client.call_api( - '/v1/datasets/{datasetId}/predictions/tasks/{taskName}', 'GET', + '/v1/datasets/{datasetId}/predictions/tasks', 'GET', _path_params, _query_params, _header_params, @@ -719,20 +711,22 @@ def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : An _request_auth=_params.get('_request_auth')) @validate_arguments - def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchemas: # noqa: E501 - """get_prediction_task_schemas_by_dataset_id # noqa: E501 + def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501 + """get_predictions_by_dataset_id # noqa: E501 - Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501 + Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_task_schemas_by_dataset_id(dataset_id, prediction_uuid_timestamp, async_req=True) + >>> thread = api.get_predictions_by_dataset_id(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) :type prediction_uuid_timestamp: int + :param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name + :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -742,28 +736,30 @@ def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[const :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: PredictionTaskSchemas + :rtype: List[List] """ kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: - raise ValueError("Error! Please call the get_prediction_task_schemas_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 + raise ValueError("Error! Please call the get_predictions_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") + return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501 @validate_arguments - def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501 - """get_prediction_task_schemas_by_dataset_id # noqa: E501 + def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501 + """get_predictions_by_dataset_id # noqa: E501 - Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501 + Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, async_req=True) + >>> thread = api.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. + :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) :type prediction_uuid_timestamp: int + :param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name + :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -786,14 +782,15 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: tuple(PredictionTaskSchemas, status_code(int), headers(HTTPHeaderDict)) + :rtype: tuple(List[List], status_code(int), headers(HTTPHeaderDict)) """ _params = locals() _all_params = [ 'dataset_id', - 'prediction_uuid_timestamp' + 'prediction_uuid_timestamp', + 'task_name' ] _all_params.extend( [ @@ -812,7 +809,7 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : if _key not in _all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" - " to method get_prediction_task_schemas_by_dataset_id" % _key + " to method get_predictions_by_dataset_id" % _key ) _params[_key] = _val del _params['kwargs'] @@ -833,6 +830,12 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : _params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp'] )) + if _params.get('task_name') is not None: # noqa: E501 + _query_params.append(( + 'taskName', + _params['task_name'].value if hasattr(_params['task_name'], 'value') else _params['task_name'] + )) + # process the header parameters _header_params = dict(_params.get('_headers', {})) # process the form parameters @@ -848,7 +851,7 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : _auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501 _response_types_map = { - '200': "PredictionTaskSchemas", + '200': "List[List]", '400': "ApiErrorResponse", '401': "ApiErrorResponse", '403': "ApiErrorResponse", @@ -856,7 +859,7 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : } return self.api_client.call_api( - '/v1/datasets/{datasetId}/predictions/tasks', 'GET', + '/v1/datasets/{datasetId}/predictions/samples', 'GET', _path_params, _query_params, _header_params, @@ -873,22 +876,22 @@ def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : _request_auth=_params.get('_request_auth')) @validate_arguments - def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501 - """get_predictions_by_dataset_id # noqa: E501 + def get_predictions_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> List[PredictionSingleton]: # noqa: E501 + """get_predictions_by_sample_id # noqa: E501 - Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 + Get all prediction singletons of all tasks for a specific sample of a dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_predictions_by_dataset_id(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) + >>> thread = api.get_predictions_by_sample_id(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str + :param sample_id: ObjectId of the sample (required) + :type sample_id: str :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) :type prediction_uuid_timestamp: int - :param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name - :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _request_timeout: timeout setting for this request. If one @@ -898,30 +901,30 @@ def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=Tru :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: List[List] + :rtype: List[PredictionSingleton] """ kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: - raise ValueError("Error! Please call the get_predictions_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") - return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501 + raise ValueError("Error! Please call the get_predictions_by_sample_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data") + return self.get_predictions_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501 @validate_arguments - def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501 - """get_predictions_by_dataset_id # noqa: E501 + def get_predictions_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> ApiResponse: # noqa: E501 + """get_predictions_by_sample_id # noqa: E501 - Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501 + Get all prediction singletons of all tasks for a specific sample of a dataset # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True) + >>> thread = api.get_predictions_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True) >>> result = thread.get() :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str + :param sample_id: ObjectId of the sample (required) + :type sample_id: str :param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required) :type prediction_uuid_timestamp: int - :param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name - :type task_name: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the ApiResponse.data will @@ -944,15 +947,15 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: tuple(List[List], status_code(int), headers(HTTPHeaderDict)) + :rtype: tuple(List[PredictionSingleton], status_code(int), headers(HTTPHeaderDict)) """ _params = locals() _all_params = [ 'dataset_id', - 'prediction_uuid_timestamp', - 'task_name' + 'sample_id', + 'prediction_uuid_timestamp' ] _all_params.extend( [ @@ -971,7 +974,7 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co if _key not in _all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" - " to method get_predictions_by_dataset_id" % _key + " to method get_predictions_by_sample_id" % _key ) _params[_key] = _val del _params['kwargs'] @@ -983,6 +986,9 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co if _params['dataset_id']: _path_params['datasetId'] = _params['dataset_id'] + if _params['sample_id']: + _path_params['sampleId'] = _params['sample_id'] + # process the query parameters _query_params = [] @@ -992,12 +998,6 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co _params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp'] )) - if _params.get('task_name') is not None: # noqa: E501 - _query_params.append(( - 'taskName', - _params['task_name'].value if hasattr(_params['task_name'], 'value') else _params['task_name'] - )) - # process the header parameters _header_params = dict(_params.get('_headers', {})) # process the form parameters @@ -1013,7 +1013,7 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co _auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501 _response_types_map = { - '200': "List[List]", + '200': "List[PredictionSingleton]", '400': "ApiErrorResponse", '401': "ApiErrorResponse", '403': "ApiErrorResponse", @@ -1021,7 +1021,7 @@ def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[co } return self.api_client.call_api( - '/v1/datasets/{datasetId}/predictions/samples', 'GET', + '/v1/datasets/{datasetId}/predictions/samples/{sampleId}', 'GET', _path_params, _query_params, _header_params, diff --git a/lightly/openapi_generated/swagger_client/api/samples_api.py b/lightly/openapi_generated/swagger_client/api/samples_api.py index 53f3ab74e..85be18c03 100644 --- a/lightly/openapi_generated/swagger_client/api/samples_api.py +++ b/lightly/openapi_generated/swagger_client/api/samples_api.py @@ -1153,7 +1153,7 @@ def get_sample_image_write_urls_by_id_with_http_info(self, dataset_id : Annotate _request_auth=_params.get('_request_auth')) @validate_arguments - def get_samples_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[Optional[StrictStr], Field(description="filter the samples by filename")] = None, sort_by : Annotated[Optional[SampleSortBy], Field(description="sort the samples")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[SampleData]: # noqa: E501 + def get_samples_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[Optional[StrictStr], Field(description="DEPRECATED, use without and filter yourself - Filter the samples by filename")] = None, sort_by : Annotated[Optional[SampleSortBy], Field(description="sort the samples")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[SampleData]: # noqa: E501 """get_samples_by_dataset_id # noqa: E501 Get all samples of a dataset # noqa: E501 @@ -1165,7 +1165,7 @@ def get_samples_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: filter the samples by filename + :param file_name: DEPRECATED, use without and filter yourself - Filter the samples by filename :type file_name: str :param sort_by: sort the samples :type sort_by: SampleSortBy @@ -1190,7 +1190,7 @@ def get_samples_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), return self.get_samples_by_dataset_id_with_http_info(dataset_id, file_name, sort_by, page_size, page_offset, **kwargs) # noqa: E501 @validate_arguments - def get_samples_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[Optional[StrictStr], Field(description="filter the samples by filename")] = None, sort_by : Annotated[Optional[SampleSortBy], Field(description="sort the samples")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501 + def get_samples_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[Optional[StrictStr], Field(description="DEPRECATED, use without and filter yourself - Filter the samples by filename")] = None, sort_by : Annotated[Optional[SampleSortBy], Field(description="sort the samples")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501 """get_samples_by_dataset_id # noqa: E501 Get all samples of a dataset # noqa: E501 @@ -1202,7 +1202,7 @@ def get_samples_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr :param dataset_id: ObjectId of the dataset (required) :type dataset_id: str - :param file_name: filter the samples by filename + :param file_name: DEPRECATED, use without and filter yourself - Filter the samples by filename :type file_name: str :param sort_by: sort the samples :type sort_by: SampleSortBy diff --git a/lightly/openapi_generated/swagger_client/api/tags_api.py b/lightly/openapi_generated/swagger_client/api/tags_api.py index 4affcbccd..428c23c98 100644 --- a/lightly/openapi_generated/swagger_client/api/tags_api.py +++ b/lightly/openapi_generated/swagger_client/api/tags_api.py @@ -524,9 +524,9 @@ def delete_tag_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(stri @validate_arguments def download_zip_of_samples_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> bytearray: # noqa: E501 - """download_zip_of_samples_by_tag_id # noqa: E501 + """(Deprecated) download_zip_of_samples_by_tag_id # noqa: E501 - Download a zip file of the samples of a tag. Limited to 1000 images # noqa: E501 + DEPRECATED - Download a zip file of the samples of a tag. Limited to 1000 images # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -555,9 +555,9 @@ def download_zip_of_samples_by_tag_id(self, dataset_id : Annotated[constr(strict @validate_arguments def download_zip_of_samples_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501 - """download_zip_of_samples_by_tag_id # noqa: E501 + """(Deprecated) download_zip_of_samples_by_tag_id # noqa: E501 - Download a zip file of the samples of a tag. Limited to 1000 images # noqa: E501 + DEPRECATED - Download a zip file of the samples of a tag. Limited to 1000 images # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -593,6 +593,8 @@ def download_zip_of_samples_by_tag_id_with_http_info(self, dataset_id : Annotate :rtype: tuple(bytearray, status_code(int), headers(HTTPHeaderDict)) """ + warnings.warn("GET /v1/datasets/{datasetId}/tags/{tagId}/export/zip is deprecated.", DeprecationWarning) + _params = locals() _all_params = [ @@ -1112,7 +1114,7 @@ def export_tag_to_basic_filenames_and_read_urls_with_http_info(self, dataset_id def export_tag_to_label_box_data_rows(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[LabelBoxDataRow]: # noqa: E501 """(Deprecated) export_tag_to_label_box_data_rows # noqa: E501 - Deprecated. Please use V4 unless there is a specific need to use the LabelBox V3 API. Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v3/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501 + DEPRECATED - Please use V4 unless there is a specific need to use the LabelBox V3 API. Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v3/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -1159,7 +1161,7 @@ def export_tag_to_label_box_data_rows(self, dataset_id : Annotated[constr(strict def export_tag_to_label_box_data_rows_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501 """(Deprecated) export_tag_to_label_box_data_rows # noqa: E501 - Deprecated. Please use V4 unless there is a specific need to use the LabelBox V3 API. Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v3/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501 + DEPRECATED - Please use V4 unless there is a specific need to use the LabelBox V3 API. Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v3/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2070,7 +2072,7 @@ def export_tag_to_sama_tasks_with_http_info(self, dataset_id : Annotated[constr( def get_filenames_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> List[str]: # noqa: E501 """(Deprecated) get_filenames_by_tag_id # noqa: E501 - Get list of filenames by tag. Deprecated, please use # noqa: E501 + DEPRECATED, please use exportTagToBasicFilenames - Get list of filenames by tag. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2101,7 +2103,7 @@ def get_filenames_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Fi def get_filenames_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501 """(Deprecated) get_filenames_by_tag_id # noqa: E501 - Get list of filenames by tag. Deprecated, please use # noqa: E501 + DEPRECATED, please use exportTagToBasicFilenames - Get list of filenames by tag. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2675,7 +2677,7 @@ def perform_tag_arithmetics_with_http_info(self, dataset_id : Annotated[constr(s def perform_tag_arithmetics_bitmask(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_arithmetics_request : TagArithmeticsRequest, **kwargs) -> TagBitMaskResponse: # noqa: E501 """(Deprecated) perform_tag_arithmetics_bitmask # noqa: E501 - Performs tag arithmetics to compute a new bitmask out of two existing tags. Does not create a new tag regardless if newTagName is provided # noqa: E501 + DEPRECATED, use performTagArithmetics - Performs tag arithmetics to compute a new bitmask out of two existing tags. Does not create a new tag regardless if newTagName is provided # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -2706,7 +2708,7 @@ def perform_tag_arithmetics_bitmask(self, dataset_id : Annotated[constr(strict=T def perform_tag_arithmetics_bitmask_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_arithmetics_request : TagArithmeticsRequest, **kwargs) -> ApiResponse: # noqa: E501 """(Deprecated) perform_tag_arithmetics_bitmask # noqa: E501 - Performs tag arithmetics to compute a new bitmask out of two existing tags. Does not create a new tag regardless if newTagName is provided # noqa: E501 + DEPRECATED, use performTagArithmetics - Performs tag arithmetics to compute a new bitmask out of two existing tags. Does not create a new tag regardless if newTagName is provided # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True diff --git a/lightly/openapi_generated/swagger_client/api/teams_api.py b/lightly/openapi_generated/swagger_client/api/teams_api.py index 811099f83..7eed9b158 100644 --- a/lightly/openapi_generated/swagger_client/api/teams_api.py +++ b/lightly/openapi_generated/swagger_client/api/teams_api.py @@ -51,7 +51,7 @@ def __init__(self, api_client=None): self.api_client = api_client @validate_arguments - def add_team_member(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], create_team_membership_request : CreateTeamMembershipRequest, **kwargs) -> None: # noqa: E501 + def add_team_member(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], create_team_membership_request : CreateTeamMembershipRequest, **kwargs) -> str: # noqa: E501 """add_team_member # noqa: E501 Add a team member. One needs to be part of the team to do so. # noqa: E501 @@ -74,7 +74,7 @@ def add_team_member(self, team_id : Annotated[constr(strict=True), Field(..., de :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: None + :rtype: str """ kwargs['_return_http_data_only'] = True if '_preload_content' in kwargs: @@ -118,7 +118,7 @@ def add_team_member_with_http_info(self, team_id : Annotated[constr(strict=True) :return: Returns the result object. If the method is called asynchronously, returns the request thread. - :rtype: None + :rtype: tuple(str, status_code(int), headers(HTTPHeaderDict)) """ _params = locals() @@ -171,7 +171,7 @@ def add_team_member_with_http_info(self, team_id : Annotated[constr(strict=True) # set the HTTP header `Accept` _header_params['Accept'] = self.api_client.select_header_accept( - ['application/json']) # noqa: E501 + ['text/plain', 'application/json']) # noqa: E501 # set the HTTP header `Content-Type` _content_types_list = _params.get('_content_type', @@ -183,7 +183,13 @@ def add_team_member_with_http_info(self, team_id : Annotated[constr(strict=True) # authentication setting _auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501 - _response_types_map = {} + _response_types_map = { + '200': "str", + '400': "ApiErrorResponse", + '401': "ApiErrorResponse", + '403': "ApiErrorResponse", + '404': "ApiErrorResponse", + } return self.api_client.call_api( '/v1/teams/{teamId}/members', 'POST', diff --git a/lightly/openapi_generated/swagger_client/models/api_error_code.py b/lightly/openapi_generated/swagger_client/models/api_error_code.py index 4876a203d..6e2b5dd56 100644 --- a/lightly/openapi_generated/swagger_client/models/api_error_code.py +++ b/lightly/openapi_generated/swagger_client/models/api_error_code.py @@ -37,6 +37,7 @@ class ApiErrorCode(str, Enum): UNAUTHORIZED = 'UNAUTHORIZED' NOT_FOUND = 'NOT_FOUND' NOT_MODIFIED = 'NOT_MODIFIED' + CONFLICT = 'CONFLICT' MALFORMED_REQUEST = 'MALFORMED_REQUEST' MALFORMED_RESPONSE = 'MALFORMED_RESPONSE' PAYLOAD_TOO_LARGE = 'PAYLOAD_TOO_LARGE' @@ -104,6 +105,7 @@ class ApiErrorCode(str, Enum): DOCKER_WORKER_SCHEDULE_UPDATE_FAILED = 'DOCKER_WORKER_SCHEDULE_UPDATE_FAILED' METADATA_CONFIGURATION_UNKNOWN = 'METADATA_CONFIGURATION_UNKNOWN' CUSTOM_METADATA_AT_MAX_SIZE = 'CUSTOM_METADATA_AT_MAX_SIZE' + ONPREM_SUBSCRIPTION_INSUFFICIENT = 'ONPREM_SUBSCRIPTION_INSUFFICIENT' ACCOUNT_SUBSCRIPTION_INSUFFICIENT = 'ACCOUNT_SUBSCRIPTION_INSUFFICIENT' TEAM_UNKNOWN = 'TEAM_UNKNOWN' diff --git a/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker.py b/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker.py index 39310739e..87322b288 100644 --- a/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker.py +++ b/lightly/openapi_generated/swagger_client/models/docker_worker_config_v3_docker.py @@ -20,7 +20,7 @@ from typing import Optional -from pydantic import Extra, BaseModel, Field, StrictBool, StrictStr, conint +from pydantic import Extra, BaseModel, Field, StrictBool, StrictStr, conint, constr, validator from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_corruptness_check import DockerWorkerConfigV3DockerCorruptnessCheck from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_datasource import DockerWorkerConfigV3DockerDatasource from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_training import DockerWorkerConfigV3DockerTraining @@ -30,6 +30,7 @@ class DockerWorkerConfigV3Docker(BaseModel): docker run configurations, keys should match the structure of https://github.com/lightly-ai/lightly-core/blob/develop/onprem-docker/lightly_worker/src/lightly_worker/resources/docker/docker.yaml """ checkpoint: Optional[StrictStr] = None + checkpoint_run_id: Optional[constr(strict=True)] = Field(None, alias="checkpointRunId", description="MongoDB ObjectId") corruptness_check: Optional[DockerWorkerConfigV3DockerCorruptnessCheck] = Field(None, alias="corruptnessCheck") datasource: Optional[DockerWorkerConfigV3DockerDatasource] = None embeddings: Optional[StrictStr] = None @@ -44,7 +45,17 @@ class DockerWorkerConfigV3Docker(BaseModel): relevant_filenames_file: Optional[StrictStr] = Field(None, alias="relevantFilenamesFile") selected_sequence_length: Optional[conint(strict=True, ge=1)] = Field(None, alias="selectedSequenceLength") upload_report: Optional[StrictBool] = Field(None, alias="uploadReport") - __properties = ["checkpoint", "corruptnessCheck", "datasource", "embeddings", "enableTraining", "training", "normalizeEmbeddings", "numProcesses", "numThreads", "outputImageFormat", "pretagging", "pretaggingUpload", "relevantFilenamesFile", "selectedSequenceLength", "uploadReport"] + __properties = ["checkpoint", "checkpointRunId", "corruptnessCheck", "datasource", "embeddings", "enableTraining", "training", "normalizeEmbeddings", "numProcesses", "numThreads", "outputImageFormat", "pretagging", "pretaggingUpload", "relevantFilenamesFile", "selectedSequenceLength", "uploadReport"] + + @validator('checkpoint_run_id') + def checkpoint_run_id_validate_regular_expression(cls, value): + """Validates the regular expression""" + if value is None: + return value + + if not re.match(r"^[a-f0-9]{24}$", value): + raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/") + return value class Config: """Pydantic configuration""" @@ -99,6 +110,7 @@ def from_dict(cls, obj: dict) -> DockerWorkerConfigV3Docker: _obj = DockerWorkerConfigV3Docker.parse_obj({ "checkpoint": obj.get("checkpoint"), + "checkpoint_run_id": obj.get("checkpointRunId"), "corruptness_check": DockerWorkerConfigV3DockerCorruptnessCheck.from_dict(obj.get("corruptnessCheck")) if obj.get("corruptnessCheck") is not None else None, "datasource": DockerWorkerConfigV3DockerDatasource.from_dict(obj.get("datasource")) if obj.get("datasource") is not None else None, "embeddings": obj.get("embeddings"), diff --git a/lightly/openapi_generated/swagger_client/models/sample_create_request.py b/lightly/openapi_generated/swagger_client/models/sample_create_request.py index ab3c6c34d..3c292a76a 100644 --- a/lightly/openapi_generated/swagger_client/models/sample_create_request.py +++ b/lightly/openapi_generated/swagger_client/models/sample_create_request.py @@ -78,6 +78,16 @@ def to_dict(self, by_alias: bool = False): if self.custom_meta_data is None and "custom_meta_data" in self.__fields_set__: _dict['customMetaData' if by_alias else 'custom_meta_data'] = None + # set to None if video_frame_data (nullable) is None + # and __fields_set__ contains the field + if self.video_frame_data is None and "video_frame_data" in self.__fields_set__: + _dict['videoFrameData' if by_alias else 'video_frame_data'] = None + + # set to None if crop_data (nullable) is None + # and __fields_set__ contains the field + if self.crop_data is None and "crop_data" in self.__fields_set__: + _dict['cropData' if by_alias else 'crop_data'] = None + return _dict @classmethod diff --git a/lightly/openapi_generated/swagger_client/models/sample_data.py b/lightly/openapi_generated/swagger_client/models/sample_data.py index 42ff509a7..84d3c39fb 100644 --- a/lightly/openapi_generated/swagger_client/models/sample_data.py +++ b/lightly/openapi_generated/swagger_client/models/sample_data.py @@ -102,11 +102,26 @@ def to_dict(self, by_alias: bool = False): if self.thumb_name is None and "thumb_name" in self.__fields_set__: _dict['thumbName' if by_alias else 'thumb_name'] = None + # set to None if exif (nullable) is None + # and __fields_set__ contains the field + if self.exif is None and "exif" in self.__fields_set__: + _dict['exif' if by_alias else 'exif'] = None + # set to None if custom_meta_data (nullable) is None # and __fields_set__ contains the field if self.custom_meta_data is None and "custom_meta_data" in self.__fields_set__: _dict['customMetaData' if by_alias else 'custom_meta_data'] = None + # set to None if video_frame_data (nullable) is None + # and __fields_set__ contains the field + if self.video_frame_data is None and "video_frame_data" in self.__fields_set__: + _dict['videoFrameData' if by_alias else 'video_frame_data'] = None + + # set to None if crop_data (nullable) is None + # and __fields_set__ contains the field + if self.crop_data is None and "crop_data" in self.__fields_set__: + _dict['cropData' if by_alias else 'crop_data'] = None + return _dict @classmethod diff --git a/lightly/openapi_generated/swagger_client/models/sample_data_modes.py b/lightly/openapi_generated/swagger_client/models/sample_data_modes.py index 426589a33..736754e2e 100644 --- a/lightly/openapi_generated/swagger_client/models/sample_data_modes.py +++ b/lightly/openapi_generated/swagger_client/models/sample_data_modes.py @@ -102,11 +102,26 @@ def to_dict(self, by_alias: bool = False): if self.thumb_name is None and "thumb_name" in self.__fields_set__: _dict['thumbName' if by_alias else 'thumb_name'] = None + # set to None if exif (nullable) is None + # and __fields_set__ contains the field + if self.exif is None and "exif" in self.__fields_set__: + _dict['exif' if by_alias else 'exif'] = None + # set to None if custom_meta_data (nullable) is None # and __fields_set__ contains the field if self.custom_meta_data is None and "custom_meta_data" in self.__fields_set__: _dict['customMetaData' if by_alias else 'custom_meta_data'] = None + # set to None if video_frame_data (nullable) is None + # and __fields_set__ contains the field + if self.video_frame_data is None and "video_frame_data" in self.__fields_set__: + _dict['videoFrameData' if by_alias else 'video_frame_data'] = None + + # set to None if crop_data (nullable) is None + # and __fields_set__ contains the field + if self.crop_data is None and "crop_data" in self.__fields_set__: + _dict['cropData' if by_alias else 'crop_data'] = None + return _dict @classmethod diff --git a/lightly/openapi_generated/swagger_client/models/sample_meta_data.py b/lightly/openapi_generated/swagger_client/models/sample_meta_data.py index 705c8b391..2153a9487 100644 --- a/lightly/openapi_generated/swagger_client/models/sample_meta_data.py +++ b/lightly/openapi_generated/swagger_client/models/sample_meta_data.py @@ -66,6 +66,16 @@ def to_dict(self, by_alias: bool = False): exclude={ }, exclude_none=True) + # set to None if custom (nullable) is None + # and __fields_set__ contains the field + if self.custom is None and "custom" in self.__fields_set__: + _dict['custom' if by_alias else 'custom'] = None + + # set to None if dynamic (nullable) is None + # and __fields_set__ contains the field + if self.dynamic is None and "dynamic" in self.__fields_set__: + _dict['dynamic' if by_alias else 'dynamic'] = None + return _dict @classmethod