From 02bbb83407ff80393b58ee60afd11e72a33a8964 Mon Sep 17 00:00:00 2001 From: Heiru Wu Date: Thu, 15 Aug 2024 00:23:22 +0800 Subject: [PATCH] chore(model): revert task input/output refactor This reverts commit bedba500f036685a29fd9c1dbd93f4d76dcb70f3. --- artifact/artifact/v1alpha/file_catalog.proto | 146 +-- common/task/v1alpha/task.proto | 10 +- model/model/v1alpha/model.proto | 371 +++--- model/model/v1alpha/task_classification.proto | 34 + model/model/v1alpha/task_detection.proto | 43 + model/model/v1alpha/task_image_to_image.proto | 36 + .../v1alpha/task_instance_segmentation.proto | 48 + model/model/v1alpha/task_keypoint.proto | 54 + model/model/v1alpha/task_ocr.proto | 44 + .../v1alpha/task_semantic_segmentation.proto | 43 + .../model/v1alpha/task_text_generation.proto | 37 + .../v1alpha/task_text_generation_chat.proto | 37 + model/model/v1alpha/task_text_to_image.proto | 36 + model/model/v1alpha/task_unspecified.proto | 19 + .../task_visual_question_answering.proto | 39 + openapiv2/model/service.swagger.yaml | 1074 +++++++++++++++-- openapiv2/vdp/service.swagger.yaml | 11 +- 17 files changed, 1747 insertions(+), 335 deletions(-) create mode 100644 model/model/v1alpha/task_classification.proto create mode 100644 model/model/v1alpha/task_detection.proto create mode 100644 model/model/v1alpha/task_image_to_image.proto create mode 100644 model/model/v1alpha/task_instance_segmentation.proto create mode 100644 model/model/v1alpha/task_keypoint.proto create mode 100644 model/model/v1alpha/task_ocr.proto create mode 100644 model/model/v1alpha/task_semantic_segmentation.proto create mode 100644 model/model/v1alpha/task_text_generation.proto create mode 100644 model/model/v1alpha/task_text_generation_chat.proto create mode 100644 model/model/v1alpha/task_text_to_image.proto create mode 100644 model/model/v1alpha/task_unspecified.proto create mode 100644 model/model/v1alpha/task_visual_question_answering.proto diff --git a/artifact/artifact/v1alpha/file_catalog.proto b/artifact/artifact/v1alpha/file_catalog.proto index 92f33657..167e613d 100644 --- a/artifact/artifact/v1alpha/file_catalog.proto +++ b/artifact/artifact/v1alpha/file_catalog.proto @@ -20,77 +20,77 @@ message GetFileCatalogRequest { } // GetFileCatalogResponse -message GetFileCatalogResponse{ - // metadata - message Metadata { - // file uid - string file_uid = 1; - // file id - string file_id = 2; - // file type - FileType file_type = 3; - // file size in bytes - int64 file_size = 4; - // upload time - google.protobuf.Timestamp file_upload_time = 5; - // file process status - FileProcessStatus file_process_status = 6; - } - // text message - message Text{ - // pipelines - repeated string pipeline_ids = 1; - // transformed content - string transformed_content = 2; - // transformed content uid - string transformed_content_uid = 3; - // transformed content chunk number - int32 transformed_content_chunk_num = 4; - // transformed content token number - int32 transformed_content_token_num = 5; - // transformed content update time - google.protobuf.Timestamp transformed_content_update_time = 6; - } - // chunk type - enum ChunkType { - // unspecified - CHUNK_TYPE_UNSPECIFIED = 0; - // text - CHUNK_TYPE_TEXT = 1; - // image - CHUNK_TYPE_IMAGE = 2; - // audio - CHUNK_TYPE_AUDIO = 3; - // video - CHUNK_TYPE_VIDEO = 4; - } - // chunk message - message Chunk { - // chunk uid - string uid = 1; - // chunk type. i.e. text, image, audio, and video - ChunkType type = 2; - // chunk start position - int32 start_pos = 3; - // chunk end position - int32 end_pos =4; - // chunk content - string content = 5; - // chunk tokens num - int32 tokens_num = 6; - // embedding. float32 array - repeated float embedding = 7; - // chunk create time - google.protobuf.Timestamp create_time = 8; - // chunk retrievable - bool retrievable = 9; - } - // original data is encoded in base64 - string original_data = 1; - // file catalog - Metadata metadata = 2; - // text - Text text = 3; - // chunks - repeated Chunk chunks = 4; +message GetFileCatalogResponse { + // metadata + message Metadata { + // file uid + string file_uid = 1; + // file id + string file_id = 2; + // file type + FileType file_type = 3; + // file size in bytes + int64 file_size = 4; + // upload time + google.protobuf.Timestamp file_upload_time = 5; + // file process status + FileProcessStatus file_process_status = 6; + } + // text message + message Text { + // pipelines + repeated string pipeline_ids = 1; + // transformed content + string transformed_content = 2; + // transformed content uid + string transformed_content_uid = 3; + // transformed content chunk number + int32 transformed_content_chunk_num = 4; + // transformed content token number + int32 transformed_content_token_num = 5; + // transformed content update time + google.protobuf.Timestamp transformed_content_update_time = 6; + } + // chunk type + enum ChunkType { + // unspecified + CHUNK_TYPE_UNSPECIFIED = 0; + // text + CHUNK_TYPE_TEXT = 1; + // image + CHUNK_TYPE_IMAGE = 2; + // audio + CHUNK_TYPE_AUDIO = 3; + // video + CHUNK_TYPE_VIDEO = 4; + } + // chunk message + message Chunk { + // chunk uid + string uid = 1; + // chunk type. i.e. text, image, audio, and video + ChunkType type = 2; + // chunk start position + int32 start_pos = 3; + // chunk end position + int32 end_pos = 4; + // chunk content + string content = 5; + // chunk tokens num + int32 tokens_num = 6; + // embedding. float32 array + repeated float embedding = 7; + // chunk create time + google.protobuf.Timestamp create_time = 8; + // chunk retrievable + bool retrievable = 9; + } + // original data is encoded in base64 + string original_data = 1; + // file catalog + Metadata metadata = 2; + // text + Text text = 3; + // chunks + repeated Chunk chunks = 4; } diff --git a/common/task/v1alpha/task.proto b/common/task/v1alpha/task.proto index d265d23f..919e6ee5 100644 --- a/common/task/v1alpha/task.proto +++ b/common/task/v1alpha/task.proto @@ -21,19 +21,15 @@ enum Task { // Text to Image - generate images from input text prompts. TASK_TEXT_TO_IMAGE = 7; // Text Generation - generate texts from input text prompts. - reserved 8; + TASK_TEXT_GENERATION = 8; // Conversational Text Generation - generate text as responses to a dialog input. - reserved 9; + TASK_TEXT_GENERATION_CHAT = 9; // Visual Question Answering - generate text as a response to a visual prompt. - reserved 10; + TASK_VISUAL_QUESTION_ANSWERING = 10; // Image to Image - generate an image from another image. TASK_IMAGE_TO_IMAGE = 11; // Text Embeddings - generate an embedding (a representation as coordinates) from a text input. TASK_TEXT_EMBEDDINGS = 12; // Speech Recognition - transcribe the words in an audio input. TASK_SPEECH_RECOGNITION = 13; - // Conversational Text Generation - generate text as responses to a dialog input. - TASK_CHAT = 14; - // Completion Text Generation - generate text following the input prompt. - TASK_COMPLETION = 15; } diff --git a/model/model/v1alpha/model.proto b/model/model/v1alpha/model.proto index e27c84ca..aaada2ce 100644 --- a/model/model/v1alpha/model.proto +++ b/model/model/v1alpha/model.proto @@ -11,14 +11,26 @@ import "core/mgmt/v1beta/mgmt.proto"; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; // Protobuf standard import "google/protobuf/field_mask.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; // Model definitions import "model/model/v1alpha/common.proto"; import "model/model/v1alpha/model_definition.proto"; +import "model/model/v1alpha/task_classification.proto"; +import "model/model/v1alpha/task_detection.proto"; +import "model/model/v1alpha/task_image_to_image.proto"; +import "model/model/v1alpha/task_instance_segmentation.proto"; +import "model/model/v1alpha/task_keypoint.proto"; +import "model/model/v1alpha/task_ocr.proto"; +import "model/model/v1alpha/task_semantic_segmentation.proto"; +import "model/model/v1alpha/task_text_generation.proto"; +import "model/model/v1alpha/task_text_generation_chat.proto"; +import "model/model/v1alpha/task_text_to_image.proto"; +import "model/model/v1alpha/task_unspecified.proto"; +import "model/model/v1alpha/task_visual_question_answering.proto"; // OpenAPI definition import "protoc-gen-openapiv2/options/annotations.proto"; @@ -190,10 +202,10 @@ message Model { optional string documentation_url = 22 [(google.api.field_behavior) = OPTIONAL]; // License under which the model is distributed. optional string license = 23 [(google.api.field_behavior) = OPTIONAL]; - // Deprecated: Sample input for this model - reserved 24; - // Deprecated: Sample output for this model - reserved 25; + // Sample input for this model + TaskInput sample_input = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Sample output for this model + TaskOutput sample_output = 25 [(google.api.field_behavior) = OUTPUT_ONLY]; // Model profile image in base64 format. optional string profile_image = 26 [(google.api.field_behavior) = OPTIONAL]; // Permission defines how a pipeline can be used. @@ -234,11 +246,11 @@ message ListModelsRequest { // ListModelsResponse contains a list of models. message ListModelsResponse { // A list of model resources. - repeated Model models = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated Model models = 1; // Next page token. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + string next_page_token = 2; // Total number of models. - int32 total_size = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 total_size = 3; } // LookUpModelRequest represents a request to query a model by its UID. @@ -253,7 +265,7 @@ message LookUpModelRequest { // LookUpModelResponse contains the requested model. message LookUpModelResponse { // The requested model. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // ListNamespaceModelsRequest represents a request to list namepsace models. @@ -284,11 +296,11 @@ message ListNamespaceModelsRequest { // ListNamespaceModelsResponse contains a list of models. message ListNamespaceModelsResponse { // A list of model resources. - repeated Model models = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated Model models = 1; // Next page token. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + string next_page_token = 2; // Total number of models. - int32 total_size = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 total_size = 3; } // CreateNamespaceModelRequest represents a request from a namespace to create a model. @@ -302,7 +314,7 @@ message CreateNamespaceModelRequest { // CreateNamespaceModelResponse contains the created model. message CreateNamespaceModelResponse { // The created model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // GetNamespaceModelRequest represents a request to fetch the details of a model @@ -319,7 +331,7 @@ message GetNamespaceModelRequest { // GetNamespaceModelResponse contains the requested model. message GetNamespaceModelResponse { // The model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // UpdateNamespaceModelRequest represents a request to update a model owned by a @@ -341,7 +353,7 @@ message UpdateNamespaceModelRequest { // UpdateNamespaceModelResponse contains the updated model. message UpdateNamespaceModelResponse { // The updated model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // DeleteNamespaceModelRequest represents a request to delete a model owned by a @@ -370,7 +382,7 @@ message RenameNamespaceModelRequest { // RenameNamespaceModelResponse contains a renamed model. message RenameNamespaceModelResponse { // The renamed model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // WatchNamespaceModelRequest represents a request to fetch current state of a model @@ -386,11 +398,11 @@ message WatchNamespaceModelRequest { // WatchNamespaceModelResponse contains the state of a model. message WatchNamespaceModelResponse { // State. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + State state = 1; // Deprecated field `progress` reserved 2; // Detail description of the state - string message = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + string message = 3; } // WatchNamespaceNamespaceLatestModelRequest represents a request to fetch current state of @@ -405,11 +417,11 @@ message WatchNamespaceLatestModelRequest { // WatchNamespaceLatestModelResponse contains the state of the latest model version. message WatchNamespaceLatestModelResponse { // State. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + State state = 1; // Deprecated field `progress` reserved 2; // Detail description of the state - string message = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + string message = 3; } // ListNamespaceModelVersionsRequest represents a request to list all the versions @@ -458,22 +470,18 @@ message TriggerNamespaceModelRequest { string namespace_id = 1 [(google.api.field_behavior) = REQUIRED]; // Model ID string model_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Deleteted field. - reserved 3; + // Inference input parameters. + repeated TaskInput task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 4 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 5 [(google.api.field_behavior) = REQUIRED]; } // TriggerNamespaceModelResponse contains the model inference results. message TriggerNamespaceModelResponse { // Task type. - common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Deleted field. - reserved 2; + common.task.v1alpha.Task task = 1; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2; } // TriggerAsyncNamespaceModelRequest represents a request to trigger a model inference @@ -483,12 +491,10 @@ message TriggerAsyncNamespaceModelRequest { string namespace_id = 1 [(google.api.field_behavior) = REQUIRED]; // Model ID string model_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Deleteted field. - reserved 3; + // Inference input parameters. + repeated TaskInput task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 4 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 5 [(google.api.field_behavior) = REQUIRED]; } // TriggerAsyncNamespaceModelResponse contains the information to access the @@ -505,20 +511,16 @@ message TriggerNamespaceLatestModelRequest { string namespace_id = 1 [(google.api.field_behavior) = REQUIRED]; // Model ID string model_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Deleteted field. - reserved 3; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; + // Inference input parameters. + repeated TaskInput task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; } // TriggerNamespaceLatestModelResponse contains the model inference results. message TriggerNamespaceLatestModelResponse { // Task type. - common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Deleted field. - reserved 2; + common.task.v1alpha.Task task = 1; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2; } // TriggerAsyncNamespaceLatestModelRequest represents a request to trigger a model inference @@ -528,10 +530,8 @@ message TriggerAsyncNamespaceLatestModelRequest { string namespace_id = 1 [(google.api.field_behavior) = REQUIRED]; // Model ID string model_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Deleteted field. - reserved 3; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; + // Inference input parameters. + repeated TaskInput task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; } // TriggerAsyncNamespaceLatestModelResponse contains the information to access the @@ -548,24 +548,20 @@ message TriggerNamespaceModelBinaryFileUploadRequest { string namespace_id = 1 [(google.api.field_behavior) = REQUIRED]; // Model ID string model_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Deleted field. - reserved 3; + // Inference input as a binary file. + TaskInputStream task_input = 3 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 4 [(google.api.field_behavior) = REQUIRED]; - // Inference input as a binary file. - repeated google.protobuf.Struct task_input = 5 [(google.api.field_behavior) = REQUIRED]; } // TriggerNamespaceModelBinaryFileUploadResponse contains the model inference results. message TriggerNamespaceModelBinaryFileUploadResponse { // Task type. common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = REQUIRED]; - // Deleteted field. - reserved 2; - // Deleteted field. - reserved 3; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2 [(google.api.field_behavior) = REQUIRED]; + // Model version + string version = 3 [(google.api.field_behavior) = REQUIRED]; } // TriggerNamespaceModelLatestBinaryFileUploadRequest represents a request trigger a model @@ -575,20 +571,16 @@ message TriggerNamespaceLatestModelBinaryFileUploadRequest { string namespace_id = 1 [(google.api.field_behavior) = REQUIRED]; // Model ID string model_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Deleted field. - reserved 3; // Inference input as a binary file. - repeated google.protobuf.Struct task_input = 4 [(google.api.field_behavior) = REQUIRED]; + TaskInputStream task_input = 3 [(google.api.field_behavior) = REQUIRED]; } // TriggerNamespaceLatestModelBinaryFileUploadResponse contains the model inference results. message TriggerNamespaceLatestModelBinaryFileUploadResponse { // Task type. common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = REQUIRED]; - reserved 2; - // Deleteted field. // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2 [(google.api.field_behavior) = REQUIRED]; } // GetNamespaceLatestModelOperationRequest represents a request to fetch the latest long-running @@ -606,7 +598,7 @@ message GetNamespaceLatestModelOperationRequest { // operation. message GetNamespaceLatestModelOperationResponse { // The long-running operation. - google.longrunning.Operation operation = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.longrunning.Operation operation = 1; } // DeployNamespaceModelAdminRequest represents a request to deploy a model to online state @@ -664,7 +656,7 @@ message CreateUserModelRequest { // CreateUserModelResponse contains the created model. message CreateUserModelResponse { // The created model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // ListUserModelsRequest represents a request to list the models of a user. @@ -702,11 +694,11 @@ message ListUserModelsRequest { // ListUserModelsResponse contains a list of models. message ListUserModelsResponse { // A list of model resources. - repeated Model models = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated Model models = 1; // Next page token. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + string next_page_token = 2; // Total number of models. - int32 total_size = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 total_size = 3; } // GetUserModelRequest represents a request to fetch the details of a model @@ -729,7 +721,7 @@ message GetUserModelRequest { // GetUserModelResponse contains the requested model. message GetUserModelResponse { // The model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // UpdateUserModelRequest represents a request to update a model owned by a @@ -747,7 +739,7 @@ message UpdateUserModelRequest { // UpdateUserModelResponse contains the updated model. message UpdateUserModelResponse { // The updated model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // DeleteUserModelRequest represents a request to delete a model owned by a @@ -789,7 +781,7 @@ message RenameUserModelRequest { // RenameUserModelResponse contains a renamed model. message RenameUserModelResponse { // The renamed model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // WatchUserModelRequest represents a request to fetch current state of a model @@ -811,11 +803,11 @@ message WatchUserModelRequest { // WatchUserModelResponse contains the state of a model. message WatchUserModelResponse { // State. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + State state = 1; // Deprecated field `progress` reserved 2; // Detail description of the state - string message = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + string message = 3; } // WatchUserLatestModelRequest represents a request to fetch current state of @@ -836,11 +828,11 @@ message WatchUserLatestModelRequest { // WatchUserLatestModelResponse contains the state of the latest model version. message WatchUserLatestModelResponse { // State. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + State state = 1; // Deprecated field `progress` reserved 2; // Detail description of the state - string message = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + string message = 3; } // ListUserModelVersionsRequest represents a request to list all the versions @@ -868,13 +860,13 @@ message ListUserModelVersionsRequest { // ListUserModelVersionsResponse contains a list of models. message ListUserModelVersionsResponse { // A list of model resources. - repeated ModelVersion versions = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated ModelVersion versions = 1; // Total number of versions. - int32 total_size = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 total_size = 2; // The requested page size. - int32 page_size = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 page_size = 3; // The requested page offset. - int32 page = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 page = 4; } // DeleteUserModelVersionRequest represents a request to delete a model version @@ -901,6 +893,99 @@ message DeleteUserModelVersionResponse {} // Trigger methods //////////////////////////////////// +// TaskInput represents a question or task for an AI model. +message TaskInput { + // Input type. + oneof input { + // Image classification input. + ClassificationInput classification = 1; + // Object detection input. + DetectionInput detection = 2; + // Keypoint detection input. + KeypointInput keypoint = 3; + // Optical Character Recognition input. + OcrInput ocr = 4; + // Instance segmentation input. + InstanceSegmentationInput instance_segmentation = 5; + // Semantic segmentation input. + SemanticSegmentationInput semantic_segmentation = 6; + // Text to image input. + TextToImageInput text_to_image = 7; + // Image to image input. + ImageToImageInput image_to_image = 8; + // Text generation input. + TextGenerationInput text_generation = 9; + // Conversational text generation input. + TextGenerationChatInput text_generation_chat = 10; + // Visual question answering input. + VisualQuestionAnsweringInput visual_question_answering = 11; + // Unspecified task input. + UnspecifiedInput unspecified = 12; + } +} + +// TaskInputStream represents the input to trigger a model with stream method +message TaskInputStream { + // Input type + oneof input { + // The classification input + ClassificationInputStream classification = 1; + // The detection input + DetectionInputStream detection = 2; + // The keypoint input + KeypointInputStream keypoint = 3; + // The ocr input + OcrInputStream ocr = 4; + // The instance segmentation input + InstanceSegmentationInputStream instance_segmentation = 5; + // The semantic segmentation input + SemanticSegmentationInputStream semantic_segmentation = 6; + // The text to image input + TextToImageInput text_to_image = 7; + // The image to image input + ImageToImageInput image_to_image = 8; + // The text generation input + TextGenerationInput text_generation = 9; + // The text generation chat input + TextGenerationChatInput text_generation_chat = 10; + // The visual question answering input + VisualQuestionAnsweringInput visual_question_answering = 11; + // The unspecified task input + UnspecifiedInput unspecified = 12; + } +} + +// TaskOutput represents the result of an AI task performed by a model. +message TaskOutput { + // Inference task output. + oneof output { + // Image classification output. + ClassificationOutput classification = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Object detection output. + DetectionOutput detection = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Keypoint detection output. + KeypointOutput keypoint = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Optical Character Recognition output. + OcrOutput ocr = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Instance segmentation output. + InstanceSegmentationOutput instance_segmentation = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Semantic segmentation output. + SemanticSegmentationOutput semantic_segmentation = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Text to image output. + TextToImageOutput text_to_image = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Image to image output. + ImageToImageOutput image_to_image = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Text generation output. + TextGenerationOutput text_generation = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Conversational text generation output. + TextGenerationChatOutput text_generation_chat = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Visual question answering output. + VisualQuestionAnsweringOutput visual_question_answering = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Unspecified task output. + UnspecifiedOutput unspecified = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + } +} + // TriggerUserModelRequest represents a request to trigger a model inference. message TriggerUserModelRequest { // The resource name of the model , which allows its access by parent user @@ -913,22 +998,18 @@ message TriggerUserModelRequest { field_configuration: {path_param_name: "user_model_name"} } ]; - // Deleted field. - reserved 2; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 3 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; } // TriggerUserModelResponse contains the model inference results. message TriggerUserModelResponse { // Task type. - common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Deleted field. - reserved 2; + common.task.v1alpha.Task task = 1; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2; } // TriggerAsyncUserModelRequest represents a request to trigger a model inference @@ -944,12 +1025,10 @@ message TriggerAsyncUserModelRequest { field_configuration: {path_param_name: "user_model_name"} } ]; - // Deleted field. - reserved 2; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 3 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; } // TriggerAsyncUserModelResponse contains the information to access the @@ -972,20 +1051,16 @@ message TriggerUserLatestModelRequest { field_configuration: {path_param_name: "user_model_name"} } ]; - // Deleted field. - reserved 2; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; } // TriggerUserLatestModelResponse contains the model inference results. message TriggerUserLatestModelResponse { // Task type. - common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Deleted field. - reserved 2; + common.task.v1alpha.Task task = 1; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2; } // TriggerAsyncUserLatestModelRequest represents a request to trigger a model inference @@ -1001,10 +1076,8 @@ message TriggerAsyncUserLatestModelRequest { field_configuration: {path_param_name: "user_model_name"} } ]; - // Deleted field. - reserved 2; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; } // TriggerAsyncUserLatestModelResponse contains the information to access the @@ -1027,24 +1100,20 @@ message TriggerUserModelBinaryFileUploadRequest { field_configuration: {path_param_name: "user_model_name"} } ]; - // Deleted field. - reserved 2; + // Inference input as a binary file. + TaskInputStream task_input = 2 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 3 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; } // TriggerUserModelBinaryFileUploadResponse contains the model inference results. message TriggerUserModelBinaryFileUploadResponse { // Task type. common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = REQUIRED]; - // Deleted field. - reserved 2; - // Deleted field. - reserved 3; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2 [(google.api.field_behavior) = REQUIRED]; + // Model version + string version = 3 [(google.api.field_behavior) = REQUIRED]; } //////////////////////////////////// @@ -1069,7 +1138,7 @@ message CreateOrganizationModelRequest { // CreateOrganizationModelResponse contains the created model. message CreateOrganizationModelResponse { // The created model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // ListOrganizationModelsRequest represents a request to list the models @@ -1108,11 +1177,11 @@ message ListOrganizationModelsRequest { // ListOrganizationModelsResponse contains a list of models. message ListOrganizationModelsResponse { // A list of model resources. - repeated Model models = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated Model models = 1; // Next page token. - string next_page_token = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + string next_page_token = 2; // Total number of models. - int32 total_size = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 total_size = 3; } // GetOrganizationModelRequest represents a request to fetch the details of a model @@ -1135,7 +1204,7 @@ message GetOrganizationModelRequest { // GetOrganizationModelResponse contains the requested model. message GetOrganizationModelResponse { // The model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // UpdateOrganizationModelRequest represents a request to update a model owned by an @@ -1153,7 +1222,7 @@ message UpdateOrganizationModelRequest { // UpdateOrganizationModelResponse contains the updated model. message UpdateOrganizationModelResponse { // The updated model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // DeleteOrganizationModelRequest represents a request to delete a model owned by an @@ -1195,7 +1264,7 @@ message RenameOrganizationModelRequest { // RenameOrganizationModelResponse contains a renamed model. message RenameOrganizationModelResponse { // The renamed model resource. - Model model = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + Model model = 1; } // WatchOrganizationModelRequest represents a request to fetch current state of a model. @@ -1217,11 +1286,11 @@ message WatchOrganizationModelRequest { // WatchOrganizationModelResponse contains the state of a model. message WatchOrganizationModelResponse { // State. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + State state = 1; // Deprecated field `progress` reserved 2; // Detail description of the state - string message = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + string message = 3; } // WatchOrganizationLatestModelRequest represents a request to fetch current state of @@ -1242,11 +1311,11 @@ message WatchOrganizationLatestModelRequest { // WatchOrganizationLatestModelResponse contains the state of the latest model version. message WatchOrganizationLatestModelResponse { // State. - State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + State state = 1; // Deprecated field `progress` reserved 2; // Detail description of the state - string message = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + string message = 3; } // ListOrganizationModelVersionsRequest represents a request to list all the versions @@ -1274,13 +1343,13 @@ message ListOrganizationModelVersionsRequest { // ListOrganizationModelVersionsResponse contains a list of models. message ListOrganizationModelVersionsResponse { // A list of model resources. - repeated ModelVersion versions = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated ModelVersion versions = 1; // Total number of versions. - int32 total_size = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 total_size = 2; // The requested page size. - int32 page_size = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 page_size = 3; // The requested page offset. - int32 page = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + int32 page = 4; } // DeleteOrganizationModelVersionRequest represents a request to delete a model version @@ -1317,22 +1386,18 @@ message TriggerOrganizationModelRequest { field_configuration: {path_param_name: "organization_model_name"} } ]; - // Deleted field. - reserved 2; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 3 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; } // TriggerOrganizationModelResponse contains the model inference results. message TriggerOrganizationModelResponse { // Task type. - common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Deleted field. - reserved 2; + common.task.v1alpha.Task task = 1; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2; } // TriggerAsyncOrganizationModelRequest represents a request to trigger a model inference @@ -1348,12 +1413,10 @@ message TriggerAsyncOrganizationModelRequest { field_configuration: {path_param_name: "organization_model_name"} } ]; - // Deleted field. - reserved 2; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 3 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; } // TriggerAsyncOrganizationModelResponse contains the information to access the @@ -1375,20 +1438,16 @@ message TriggerOrganizationLatestModelRequest { field_configuration: {path_param_name: "organization_model_name"} } ]; - // Deleted field. - reserved 2; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; } // TriggerOrganizationLatestModelResponse contains the model inference results. message TriggerOrganizationLatestModelResponse { // Task type. - common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Deleted field. - reserved 2; + common.task.v1alpha.Task task = 1; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated TaskOutput task_outputs = 2; } // TriggerAsyncOrganizationLatestModelRequest represents a request to trigger a model inference @@ -1404,10 +1463,8 @@ message TriggerAsyncOrganizationLatestModelRequest { field_configuration: {path_param_name: "organization_model_name"} } ]; - // Deleted field. - reserved 2; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 3 [(google.api.field_behavior) = REQUIRED]; + // Inference input parameters. + repeated TaskInput task_inputs = 2 [(google.api.field_behavior) = REQUIRED]; } // TriggerAsyncOrganizationLatestModelResponse contains the information to access the @@ -1430,22 +1487,18 @@ message TriggerOrganizationModelBinaryFileUploadRequest { field_configuration: {path_param_name: "organization_model_name"} } ]; - // Deleted field. - reserved 2; + // Inference input as a binary file. + TaskInputStream task_input = 2 [(google.api.field_behavior) = REQUIRED]; // Model version string version = 3 [(google.api.field_behavior) = REQUIRED]; - // Model inference inputs. - repeated google.protobuf.Struct task_inputs = 4 [(google.api.field_behavior) = REQUIRED]; } // TriggerOrganizationModelBinaryFileUploadResponse contains the model inference results. message TriggerOrganizationModelBinaryFileUploadResponse { // Task type. common.task.v1alpha.Task task = 1 [(google.api.field_behavior) = REQUIRED]; - // Deleted field. - reserved 2; // Model inference outputs. - repeated google.protobuf.Struct task_outputs = 4; + repeated TaskOutput task_outputs = 2 [(google.api.field_behavior) = REQUIRED]; } // GetModelOperationRequest represents a request to fetch a long-running @@ -1464,7 +1517,7 @@ message GetModelOperationRequest { // operation. message GetModelOperationResponse { // The long-running operation. - google.longrunning.Operation operation = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.longrunning.Operation operation = 1; } // LatestOperation represents an internal message for GetLatestModelOperation Response @@ -1500,7 +1553,7 @@ message GetUserLatestModelOperationRequest { // operation. message GetUserLatestModelOperationResponse { // The long-running operation. - google.longrunning.Operation operation = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.longrunning.Operation operation = 1; } // GetOrganizationLatestModelOperationRequest represents a request to fetch the latest long-running @@ -1524,7 +1577,7 @@ message GetOrganizationLatestModelOperationRequest { // operation. message GetOrganizationLatestModelOperationResponse { // The long-running operation. - google.longrunning.Operation operation = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + google.longrunning.Operation operation = 1; } // ListAvailableRegionsRequest represents a request to fetch a list @@ -1535,7 +1588,7 @@ message ListAvailableRegionsRequest {} // regions and hardware types a model can be deployed on. message ListAvailableRegionsResponse { // A list of available region - repeated Region regions = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + repeated Region regions = 1; } // ========== Private endpoints @@ -1764,7 +1817,7 @@ message ListModelRunsRequest { // View allows clients to specify the desired run view in the response. // The basic view excludes input / output data. optional View view = 3 [(google.api.field_behavior) = OPTIONAL]; - // Sort the results by the given expression. + // Sort the results by the given expression. // Format: `field [ASC | DESC], where `field` can be: // - `create_time` // - `update_time` diff --git a/model/model/v1alpha/task_classification.proto b/model/model/v1alpha/task_classification.proto new file mode 100644 index 00000000..34b15fc8 --- /dev/null +++ b/model/model/v1alpha/task_classification.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; + +// ClassificationInput is the input of an image classification task. +message ClassificationInput { + // Content of the input. + oneof type { + // Image URL. + string image_url = 1; + // Base64-encoded image. + string image_base64 = 2; + } +} + +// ClassificationInputStream represents the input of an image classification +// task when the input is streamed as a binary files. +message ClassificationInputStream { + // File length for each uploaded binary file. + repeated uint32 file_lengths = 1 [(google.api.field_behavior) = REQUIRED]; + // Byte representation of the images. + bytes content = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// ClassificationOutput contains the result of an image classification task. +message ClassificationOutput { + // Category. + string category = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Score. + float score = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_detection.proto b/model/model/v1alpha/task_detection.proto new file mode 100644 index 00000000..c4c7c1c9 --- /dev/null +++ b/model/model/v1alpha/task_detection.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "model/model/v1alpha/common.proto"; + +// DetectionObject represents an identified object in an image. +message DetectionObject { + // Category. + string category = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Score. + float score = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Bounding box. + BoundingBox bounding_box = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// DetectionInput represents the input of an object detection task. +message DetectionInput { + // Content of the input. + oneof type { + // Image URL. + string image_url = 1; + // Base64-encoded image. + string image_base64 = 2; + } +} + +// DetectionInputStream represents the input of an object detection task when +// the input is streamed as binary files. +message DetectionInputStream { + // File length for each uploaded binary file. + repeated uint32 file_lengths = 1 [(google.api.field_behavior) = REQUIRED]; + // Byte representation of the images. + bytes content = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// DetectionOutput contains the result of an object detection task. +message DetectionOutput { + // A list of detected objects. + repeated DetectionObject objects = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_image_to_image.proto b/model/model/v1alpha/task_image_to_image.proto new file mode 100644 index 00000000..0ae81645 --- /dev/null +++ b/model/model/v1alpha/task_image_to_image.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; + +// ImageToImageInput represents the input of an image-to-image task. +message ImageToImageInput { + // The Prompt Image, only for multimodal input. + oneof type { + // Image URL. + string prompt_image_url = 1; + // Base64-encoded image. + string prompt_image_base64 = 2; + } + // Input text. + optional string prompt = 3 [(google.api.field_behavior) = REQUIRED]; + // Steps, defaults to 5. + optional int32 steps = 4 [(google.api.field_behavior) = OPTIONAL]; + // Guidance scale, defaults to 7.5 + optional float cfg_scale = 5 [(google.api.field_behavior) = OPTIONAL]; + // Seed, defaults to 0. + optional int32 seed = 6 [(google.api.field_behavior) = OPTIONAL]; + // Number of generated samples, defaults to 1. + optional int32 samples = 7 [(google.api.field_behavior) = OPTIONAL]; + // Extra parameters. + google.protobuf.Struct extra_params = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// ImageToImageOutput contains the result of an image-to-image task. +message ImageToImageOutput { + // A list of generated images. + repeated string images = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_instance_segmentation.proto b/model/model/v1alpha/task_instance_segmentation.proto new file mode 100644 index 00000000..fa1a50ca --- /dev/null +++ b/model/model/v1alpha/task_instance_segmentation.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "model/model/v1alpha/common.proto"; + +// InstanceSegmentationObject is an object in an image, localized and +// delineated. +message InstanceSegmentationObject { + // RLE segmentation mask. + string rle = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Category. + string category = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Score. + float score = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Bounding box. + BoundingBox bounding_box = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// InstanceSegmentationInput represents the input of an instance segmentation +// task. +message InstanceSegmentationInput { + // Content of the input. + oneof type { + // Image URL. + string image_url = 1; + // Base64-encoded image. + string image_base64 = 2; + } +} + +// InstanceSegmentationInputStream represents the input of an instance +// segmentation task when the input is streamed as binary files. +message InstanceSegmentationInputStream { + // File length for each uploaded binary file. + repeated uint32 file_lengths = 1 [(google.api.field_behavior) = REQUIRED]; + // Byte representation of the images. + bytes content = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// InstanceSegmentationOutput contains the result of an instance segmentation +// task. +message InstanceSegmentationOutput { + // A list of instance segmentation objects. + repeated InstanceSegmentationObject objects = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_keypoint.proto b/model/model/v1alpha/task_keypoint.proto new file mode 100644 index 00000000..5fec00e6 --- /dev/null +++ b/model/model/v1alpha/task_keypoint.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "model/model/v1alpha/common.proto"; + +// Keypoint contains the coordinates and visibility of a keypoint in an object. +message Keypoint { + // X coordinate. + float x = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Y coordinate. + float y = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Visibility. + float v = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// KeypointObject is a detected object with its keypoints, e.g. a detected +// human shape with its legs, arms, core, etc. +message KeypointObject { + // Keypoints. + repeated Keypoint keypoints = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Score. + float score = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Bounding box. + BoundingBox bounding_box = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// KeypointInput represents the input of a keypoint detection task. +message KeypointInput { + // Content of the input. + oneof type { + // Image URL. + string image_url = 1; + // Base64-encoded image. + string image_base64 = 2; + } +} + +// KeypointInputStream represents the input of a keypoint detection task when +// the input is streamed as binary files. +message KeypointInputStream { + // File length for each uploaded binary file. + repeated uint32 file_lengths = 1 [(google.api.field_behavior) = REQUIRED]; + // Byte representation of the images. + bytes content = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// KeypointOutput represents the result of a keypoint detection task. +message KeypointOutput { + // A list of keypoint objects. + repeated KeypointObject objects = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_ocr.proto b/model/model/v1alpha/task_ocr.proto new file mode 100644 index 00000000..d0ddc46a --- /dev/null +++ b/model/model/v1alpha/task_ocr.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "model/model/v1alpha/common.proto"; + +// OcrObject is a text object within an image. OCR stands for Optical Character +// Recognition. +message OcrObject { + // Text. + string text = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Score. + float score = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Bounding box. + BoundingBox bounding_box = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// OcrInput represents the input of an OCR task. +message OcrInput { + // Content of the input. + oneof type { + // Image URL. + string image_url = 1; + // Base64-encoded image. + string image_base64 = 2; + } +} + +// OcrInputStream represents the input of an OCR task when the input is +// streamed as binary files. +message OcrInputStream { + // File length for each uploaded binary file. + repeated uint32 file_lengths = 1 [(google.api.field_behavior) = REQUIRED]; + // Byte representation of the images. + bytes content = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// OcrOutput contains the result of an OCR task. +message OcrOutput { + // A list of text objects. + repeated OcrObject objects = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_semantic_segmentation.proto b/model/model/v1alpha/task_semantic_segmentation.proto new file mode 100644 index 00000000..757ac56d --- /dev/null +++ b/model/model/v1alpha/task_semantic_segmentation.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; + +// SemanticSegmentationStuff is an object detected within an image and +// classified into a predefined category. +message SemanticSegmentationStuff { + // RLE segmentation mask. + string rle = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // Category. + string category = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// SemanticSegmentationInput represents the input of a semantic segmentation +// task. +message SemanticSegmentationInput { + // Content of the input. + oneof type { + // Image URL. + string image_url = 1; + // Base64-encoded image. + string image_base64 = 2; + } +} + +// SemanticSegmentationInputStream represents the input of a semantic +// segmentation task when the input is streamed as a binary files. +message SemanticSegmentationInputStream { + // File length for each uploaded binary file. + repeated uint32 file_lengths = 1 [(google.api.field_behavior) = REQUIRED]; + // Byte representation of the images. + bytes content = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// SemanticSegmentationOutput represents the output of a semantic segmentation +// task. +message SemanticSegmentationOutput { + // A list of detected objects classified into categories. + repeated SemanticSegmentationStuff stuffs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_text_generation.proto b/model/model/v1alpha/task_text_generation.proto new file mode 100644 index 00000000..87157d56 --- /dev/null +++ b/model/model/v1alpha/task_text_generation.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "model/model/v1alpha/common.proto"; + +// TextGenerationInput represents the input of a text generation task. +message TextGenerationInput { + // Prompt text. + string prompt = 1 [(google.api.field_behavior) = REQUIRED]; + // Prompt images. + repeated PromptImage prompt_images = 2 [(google.api.field_behavior) = OPTIONAL]; + // Chat history. + repeated Message chat_history = 3 [(google.api.field_behavior) = OPTIONAL]; + // System message, which sets the behaviour of the assistant. + optional string system_message = 4 [(google.api.field_behavior) = OPTIONAL]; + // Maximum number of generation tokens. + optional int32 max_new_tokens = 5 [(google.api.field_behavior) = OPTIONAL]; + // Sampling temperature. + optional float temperature = 6 [(google.api.field_behavior) = OPTIONAL]; + // Sampling Top K, number of tokens at the top from which the model will + // sample. + optional int32 top_k = 7 [(google.api.field_behavior) = OPTIONAL]; + // Seed. + optional int32 seed = 8 [(google.api.field_behavior) = OPTIONAL]; + // Extra parameters. + google.protobuf.Struct extra_params = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// TextGenerationOutput contains the result of a text generation task. +message TextGenerationOutput { + // Text generated by the model. + string text = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_text_generation_chat.proto b/model/model/v1alpha/task_text_generation_chat.proto new file mode 100644 index 00000000..25df5f5c --- /dev/null +++ b/model/model/v1alpha/task_text_generation_chat.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "model/model/v1alpha/common.proto"; + +// TextGenerationChatInput represents the input of a text generation chat task. +message TextGenerationChatInput { + // Prompt text. + string prompt = 1 [(google.api.field_behavior) = REQUIRED]; + // Prompt images. + repeated PromptImage prompt_images = 2 [(google.api.field_behavior) = OPTIONAL]; + // Chat history. + repeated Message chat_history = 3 [(google.api.field_behavior) = OPTIONAL]; + // System message, which sets the behaviour of the assistant. + optional string system_message = 4 [(google.api.field_behavior) = OPTIONAL]; + // Maximum number of generation tokens. + optional int32 max_new_tokens = 5 [(google.api.field_behavior) = OPTIONAL]; + // Sampling temperature. + optional float temperature = 6 [(google.api.field_behavior) = OPTIONAL]; + // Sampling Top K, number of tokens at the top from which the model will + // sample. + optional int32 top_k = 7 [(google.api.field_behavior) = OPTIONAL]; + // Seed. + optional int32 seed = 8 [(google.api.field_behavior) = OPTIONAL]; + // Extra parameters. + google.protobuf.Struct extra_params = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// TextGenerationChatOutput contains the result of a text generation chat task. +message TextGenerationChatOutput { + // Text generated by the model. + string text = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_text_to_image.proto b/model/model/v1alpha/task_text_to_image.proto new file mode 100644 index 00000000..24acca89 --- /dev/null +++ b/model/model/v1alpha/task_text_to_image.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; + +// TextToImageInput represents the input of a text-to-image task. +message TextToImageInput { + // Prompt text. + string prompt = 1 [(google.api.field_behavior) = REQUIRED]; + // Prompt image, only for multimodal input. + oneof type { + // Image URL. + string prompt_image_url = 2; + // Base64-encoded image. + string prompt_image_base64 = 3; + } + // Steps, defaults to 5. + optional int32 steps = 4 [(google.api.field_behavior) = OPTIONAL]; + // Guidance scale, defaults to 7.5. + optional float cfg_scale = 5 [(google.api.field_behavior) = OPTIONAL]; + // Seed, defaults to 0. + optional int32 seed = 6 [(google.api.field_behavior) = OPTIONAL]; + // Number of generated samples, default is 1. + optional int32 samples = 7 [(google.api.field_behavior) = OPTIONAL]; + // Extra parameters. + google.protobuf.Struct extra_params = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// TextToImageOutput contains the result of a text-to-image task. +message TextToImageOutput { + // A list of generated images, encoded in base64. + repeated string images = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_unspecified.proto b/model/model/v1alpha/task_unspecified.proto new file mode 100644 index 00000000..79714b5f --- /dev/null +++ b/model/model/v1alpha/task_unspecified.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; + +// UnspecifiedInput represents the input of an unspecified task. +message UnspecifiedInput { + // A list of values for the task. + repeated google.protobuf.Struct raw_inputs = 1; +} + +// UnspecifiedOutput contains the result of an unspecified task. +message UnspecifiedOutput { + // The task outputs. + repeated google.protobuf.Struct raw_outputs = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/model/model/v1alpha/task_visual_question_answering.proto b/model/model/v1alpha/task_visual_question_answering.proto new file mode 100644 index 00000000..5bb5d89c --- /dev/null +++ b/model/model/v1alpha/task_visual_question_answering.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package model.model.v1alpha; + +// Google api +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "model/model/v1alpha/common.proto"; + +// VisualQuestionAnsweringInput represents the input of a visual +// question-answering task. +message VisualQuestionAnsweringInput { + // Prompt text. + string prompt = 1 [(google.api.field_behavior) = REQUIRED]; + // Prompt images. + repeated PromptImage prompt_images = 2 [(google.api.field_behavior) = OPTIONAL]; + // Chat history. + repeated Message chat_history = 3 [(google.api.field_behavior) = OPTIONAL]; + // System message, which sets the behaviour of the assistant. + optional string system_message = 4 [(google.api.field_behavior) = OPTIONAL]; + // Maximum number of generation tokens. + optional int32 max_new_tokens = 5 [(google.api.field_behavior) = OPTIONAL]; + // Sampling temperature. + optional float temperature = 6 [(google.api.field_behavior) = OPTIONAL]; + // Sampling Top K, number of tokens at the top from which the model will + // sample. + optional int32 top_k = 7 [(google.api.field_behavior) = OPTIONAL]; + // Seed. + optional int32 seed = 8 [(google.api.field_behavior) = OPTIONAL]; + // Extra parameters. + google.protobuf.Struct extra_params = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// VisualQuestionAnsweringOutput contains the result of a visual +// question-answering task. +message VisualQuestionAnsweringOutput { + // The text generated by the model. + string text = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/openapiv2/model/service.swagger.yaml b/openapiv2/model/service.swagger.yaml index f014d0c8..4cadd69a 100644 --- a/openapiv2/model/service.swagger.yaml +++ b/openapiv2/model/service.swagger.yaml @@ -1210,6 +1210,16 @@ paths: license: type: string description: License under which the model is distributed. + sampleInput: + title: Sample input for this model + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTaskInput' + sampleOutput: + title: Sample output for this model + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTaskOutput' profileImage: type: string description: Model profile image in base64 format. @@ -1938,6 +1948,16 @@ paths: license: type: string description: License under which the model is distributed. + sampleInput: + title: Sample input for this model + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTaskInput' + sampleOutput: + title: Sample output for this model + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTaskOutput' profileImage: type: string description: Model profile image in base64 format. @@ -2499,7 +2519,12 @@ paths: - VIEW_BASIC - VIEW_FULL - name: orderBy - description: "Sort the results by the given expression. \nFormat: `field [ASC | DESC], where `field` can be:\n- `create_time`\n- `update_time`\nBy default, results are sorted by descending creation time." + description: |- + Sort the results by the given expression. + Format: `field [ASC | DESC], where `field` can be: + - `create_time` + - `update_time` + By default, results are sorted by descending creation time. in: query required: false type: string @@ -2556,7 +2581,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: |- TriggerAsyncNamespaceLatestModelRequest represents a request to trigger a model inference asynchronously with the latest uploaded version. @@ -2569,7 +2595,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: |- TriggerAsyncNamespaceModelRequest represents a request to trigger a model inference asynchronously. @@ -2582,7 +2609,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. title: |- TriggerAsyncOrganizationLatestModelRequest represents a request to trigger a model inference asynchronously @@ -2595,7 +2623,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. title: |- TriggerAsyncOrganizationModelRequest represents a request to trigger a model inference asynchronously @@ -2608,7 +2637,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: |- TriggerAsyncUserLatestModelRequest represents a request to trigger a model inference asynchronously with the latest uploaded version. @@ -2621,7 +2651,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: |- TriggerAsyncUserModelRequest represents a request to trigger a model inference asynchronously. @@ -2634,7 +2665,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: |- TriggerNamespaceLatestModelRequest represents a request to trigger a model inference with the latest uploaded version. @@ -2647,7 +2679,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: TriggerNamespaceModelRequest represents a request to trigger a model inference. required: - taskInputs @@ -2658,7 +2691,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: TriggerOrganizationLatestModelRequest represents a request to trigger a model inference. required: - taskInputs @@ -2669,7 +2703,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: TriggerOrganizationModelRequest represents a request to trigger a model inference. required: - taskInputs @@ -2680,7 +2715,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: |- TriggerUserLatestModelRequest represents a request to trigger a model inference with the latest uploaded version. @@ -2693,7 +2729,8 @@ definitions: type: array items: type: object - description: Model inference inputs. + $ref: '#/definitions/v1alphaTaskInput' + description: Inference input parameters. description: TriggerUserModelRequest represents a request to trigger a model inference. required: - taskInputs @@ -2873,8 +2910,7 @@ definitions: Note: this functionality is not currently available in the official protobuf release, and it is not used for type URLs beginning with - type.googleapis.com. As of May 2023, there are no widely used type server - implementations and no plans to implement one. + type.googleapis.com. Schemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics. @@ -2909,7 +2945,7 @@ definitions: foo = any.unpack(Foo.getDefaultInstance()); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -2919,7 +2955,7 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} any, err := anypb.New(foo) @@ -2939,7 +2975,7 @@ definitions: name "y.z". JSON - ==== + The JSON representation of an `Any` value uses the regular representation of the deserialized, embedded message, with an additional field `@type` which contains the type URL. Example: @@ -2971,13 +3007,80 @@ definitions: `NullValue` is a singleton enumeration to represent the null value for the `Value` type union. - The JSON representation for `NullValue` is JSON `null`. + The JSON representation for `NullValue` is JSON `null`. + v1alphaBoundingBox: + type: object + properties: + top: + type: number + format: float + description: Top Y-axis. + readOnly: true + left: + type: number + format: float + description: Left X-axis. + readOnly: true + width: + type: number + format: float + description: Width. + readOnly: true + height: + type: number + format: float + description: Height. + readOnly: true + description: |- + BoundingBox represents a frame in an image that can be drawn when detecting + patterns in objects. + v1alphaClassificationInput: + type: object + properties: + imageUrl: + type: string + description: Image URL. + imageBase64: + type: string + description: Base64-encoded image. + description: ClassificationInput is the input of an image classification task. + v1alphaClassificationInputStream: + type: object + properties: + fileLengths: + type: array + items: + type: integer + format: int64 + description: File length for each uploaded binary file. + content: + type: string + format: byte + description: Byte representation of the images. + description: |- + ClassificationInputStream represents the input of an image classification + task when the input is streamed as a binary files. + required: + - fileLengths + - content + v1alphaClassificationOutput: + type: object + properties: + category: + type: string + description: Category. + readOnly: true + score: + type: number + format: float + description: Score. + readOnly: true + description: ClassificationOutput contains the result of an image classification task. v1alphaCreateNamespaceModelResponse: type: object properties: model: description: The created model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: CreateNamespaceModelResponse contains the created model. @@ -2986,7 +3089,6 @@ definitions: properties: model: description: The created model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: CreateOrganizationModelResponse contains the created model. @@ -2995,7 +3097,6 @@ definitions: properties: model: description: The created model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: CreateUserModelResponse contains the created model. @@ -3026,6 +3127,64 @@ definitions: v1alphaDeployUserModelAdminResponse: type: object title: DeployUserModelAdminResponse represents a response for a deployed model + v1alphaDetectionInput: + type: object + properties: + imageUrl: + type: string + description: Image URL. + imageBase64: + type: string + description: Base64-encoded image. + description: DetectionInput represents the input of an object detection task. + v1alphaDetectionInputStream: + type: object + properties: + fileLengths: + type: array + items: + type: integer + format: int64 + description: File length for each uploaded binary file. + content: + type: string + format: byte + description: Byte representation of the images. + description: |- + DetectionInputStream represents the input of an object detection task when + the input is streamed as binary files. + required: + - fileLengths + - content + v1alphaDetectionObject: + type: object + properties: + category: + type: string + description: Category. + readOnly: true + score: + type: number + format: float + description: Score. + readOnly: true + boundingBox: + description: Bounding box. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaBoundingBox' + description: DetectionObject represents an identified object in an image. + v1alphaDetectionOutput: + type: object + properties: + objects: + type: array + items: + type: object + $ref: '#/definitions/v1alphaDetectionObject' + description: A list of detected objects. + readOnly: true + description: DetectionOutput contains the result of an object detection task. v1alphaGetModelDefinitionResponse: type: object properties: @@ -3040,7 +3199,6 @@ definitions: properties: operation: description: The long-running operation. - readOnly: true allOf: - $ref: '#/definitions/googlelongrunningOperation' description: |- @@ -3051,7 +3209,6 @@ definitions: properties: operation: description: The long-running operation. - readOnly: true allOf: - $ref: '#/definitions/googlelongrunningOperation' description: |- @@ -3062,7 +3219,6 @@ definitions: properties: model: description: The model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: GetNamespaceModelResponse contains the requested model. @@ -3071,7 +3227,6 @@ definitions: properties: operation: description: The long-running operation. - readOnly: true allOf: - $ref: '#/definitions/googlelongrunningOperation' description: |- @@ -3082,7 +3237,6 @@ definitions: properties: model: description: The model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: GetOrganizationModelResponse contains the requested model. @@ -3091,7 +3245,6 @@ definitions: properties: operation: description: The long-running operation. - readOnly: true allOf: - $ref: '#/definitions/googlelongrunningOperation' description: |- @@ -3102,10 +3255,218 @@ definitions: properties: model: description: The model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: GetUserModelResponse contains the requested model. + v1alphaImageContent: + type: object + properties: + imageUrl: + description: |- + Image as URL or base64 code. + + NOTE: the `image_url` name comes from a convention from OpenAI, it doesn't + determine the format of the image. + allOf: + - $ref: '#/definitions/v1alphaPromptImage' + detail: + type: string + description: Additional information. + description: ImageContent holds an image with some details in plain text. + v1alphaImageToImageInput: + type: object + properties: + promptImageUrl: + type: string + description: Image URL. + promptImageBase64: + type: string + description: Base64-encoded image. + prompt: + type: string + description: Input text. + steps: + type: integer + format: int32 + description: Steps, defaults to 5. + cfgScale: + type: number + format: float + title: Guidance scale, defaults to 7.5 + seed: + type: integer + format: int32 + description: Seed, defaults to 0. + samples: + type: integer + format: int32 + description: Number of generated samples, defaults to 1. + extraParams: + type: object + description: Extra parameters. + description: ImageToImageInput represents the input of an image-to-image task. + required: + - prompt + v1alphaImageToImageOutput: + type: object + properties: + images: + type: array + items: + type: string + description: A list of generated images. + readOnly: true + description: ImageToImageOutput contains the result of an image-to-image task. + v1alphaInstanceSegmentationInput: + type: object + properties: + imageUrl: + type: string + description: Image URL. + imageBase64: + type: string + description: Base64-encoded image. + description: |- + InstanceSegmentationInput represents the input of an instance segmentation + task. + v1alphaInstanceSegmentationInputStream: + type: object + properties: + fileLengths: + type: array + items: + type: integer + format: int64 + description: File length for each uploaded binary file. + content: + type: string + format: byte + description: Byte representation of the images. + description: |- + InstanceSegmentationInputStream represents the input of an instance + segmentation task when the input is streamed as binary files. + required: + - fileLengths + - content + v1alphaInstanceSegmentationObject: + type: object + properties: + rle: + type: string + description: RLE segmentation mask. + readOnly: true + category: + type: string + description: Category. + readOnly: true + score: + type: number + format: float + description: Score. + readOnly: true + boundingBox: + description: Bounding box. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaBoundingBox' + description: |- + InstanceSegmentationObject is an object in an image, localized and + delineated. + v1alphaInstanceSegmentationOutput: + type: object + properties: + objects: + type: array + items: + type: object + $ref: '#/definitions/v1alphaInstanceSegmentationObject' + description: A list of instance segmentation objects. + readOnly: true + description: |- + InstanceSegmentationOutput contains the result of an instance segmentation + task. + v1alphaKeypoint: + type: object + properties: + x: + type: number + format: float + description: X coordinate. + readOnly: true + "y": + type: number + format: float + description: Y coordinate. + readOnly: true + v: + type: number + format: float + description: Visibility. + readOnly: true + description: Keypoint contains the coordinates and visibility of a keypoint in an object. + v1alphaKeypointInput: + type: object + properties: + imageUrl: + type: string + description: Image URL. + imageBase64: + type: string + description: Base64-encoded image. + description: KeypointInput represents the input of a keypoint detection task. + v1alphaKeypointInputStream: + type: object + properties: + fileLengths: + type: array + items: + type: integer + format: int64 + description: File length for each uploaded binary file. + content: + type: string + format: byte + description: Byte representation of the images. + description: |- + KeypointInputStream represents the input of a keypoint detection task when + the input is streamed as binary files. + required: + - fileLengths + - content + v1alphaKeypointObject: + type: object + properties: + keypoints: + type: array + items: + type: object + $ref: '#/definitions/v1alphaKeypoint' + description: Keypoints. + readOnly: true + score: + type: number + format: float + description: Score. + readOnly: true + boundingBox: + description: Bounding box. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaBoundingBox' + description: |- + KeypointObject is a detected object with its keypoints, e.g. a detected + human shape with its legs, arms, core, etc. + v1alphaKeypointOutput: + type: object + properties: + objects: + type: array + items: + type: object + $ref: '#/definitions/v1alphaKeypointObject' + description: A list of keypoint objects. + readOnly: true + description: KeypointOutput represents the result of a keypoint detection task. v1alphaListAvailableRegionsResponse: type: object properties: @@ -3115,7 +3476,6 @@ definitions: type: object $ref: '#/definitions/v1alphaRegion' title: A list of available region - readOnly: true description: |- ListAvailableRegionsResponse contains a list of available regions and hardware types a model can be deployed on. @@ -3188,16 +3548,13 @@ definitions: type: object $ref: '#/definitions/v1alphaModel' description: A list of model resources. - readOnly: true nextPageToken: type: string description: Next page token. - readOnly: true totalSize: type: integer format: int32 description: Total number of models. - readOnly: true description: ListModelsResponse contains a list of models. v1alphaListNamespaceModelVersionsResponse: type: object @@ -3234,16 +3591,13 @@ definitions: type: object $ref: '#/definitions/v1alphaModel' description: A list of model resources. - readOnly: true nextPageToken: type: string description: Next page token. - readOnly: true totalSize: type: integer format: int32 description: Total number of models. - readOnly: true description: ListNamespaceModelsResponse contains a list of models. v1alphaListOrganizationModelVersionsResponse: type: object @@ -3254,22 +3608,18 @@ definitions: type: object $ref: '#/definitions/v1alphaModelVersion' description: A list of model resources. - readOnly: true totalSize: type: integer format: int32 description: Total number of versions. - readOnly: true pageSize: type: integer format: int32 description: The requested page size. - readOnly: true page: type: integer format: int32 description: The requested page offset. - readOnly: true description: ListOrganizationModelVersionsResponse contains a list of models. v1alphaListOrganizationModelsResponse: type: object @@ -3280,16 +3630,13 @@ definitions: type: object $ref: '#/definitions/v1alphaModel' description: A list of model resources. - readOnly: true nextPageToken: type: string description: Next page token. - readOnly: true totalSize: type: integer format: int32 description: Total number of models. - readOnly: true description: ListOrganizationModelsResponse contains a list of models. v1alphaListUserModelVersionsResponse: type: object @@ -3300,22 +3647,18 @@ definitions: type: object $ref: '#/definitions/v1alphaModelVersion' description: A list of model resources. - readOnly: true totalSize: type: integer format: int32 description: Total number of versions. - readOnly: true pageSize: type: integer format: int32 description: The requested page size. - readOnly: true page: type: integer format: int32 description: The requested page offset. - readOnly: true description: ListUserModelVersionsResponse contains a list of models. v1alphaListUserModelsResponse: type: object @@ -3326,16 +3669,13 @@ definitions: type: object $ref: '#/definitions/v1alphaModel' description: A list of model resources. - readOnly: true nextPageToken: type: string description: Next page token. - readOnly: true totalSize: type: integer format: int32 description: Total number of models. - readOnly: true description: ListUserModelsResponse contains a list of models. v1alphaLookUpModelAdminResponse: type: object @@ -3345,6 +3685,39 @@ definitions: allOf: - $ref: '#/definitions/v1alphaModel' title: LookUpModelResponse represents a response for a model + v1alphaMessage: + type: object + properties: + role: + type: string + description: The Role of a message, e.g. `system`, `user`, etc. Defines the way the model answers questions. + content: + type: array + items: + type: object + $ref: '#/definitions/v1alphaMessageContent' + description: Message content. + description: Message is used in chat history in text generation models. + v1alphaMessageContent: + type: object + properties: + type: + type: string + description: Content type. + imageUrl: + description: |- + Image content. + + NOTE: the `image_url` name comes from a convention from OpenAI, it doesn't + determine the format of the image. + allOf: + - $ref: '#/definitions/v1alphaImageContent' + text: + type: string + description: Text content. + description: |- + MessageContent is a message in a chat history message, used in text + generation models. v1alphaModel: type: object properties: @@ -3427,6 +3800,16 @@ definitions: license: type: string description: License under which the model is distributed. + sampleInput: + title: Sample input for this model + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTaskInput' + sampleOutput: + title: Sample output for this model + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTaskOutput' profileImage: type: string description: Model profile image in base64 format. @@ -3634,6 +4017,76 @@ definitions: - VISIBILITY_PRIVATE: Only the owner can see the model. - VISIBILITY_PUBLIC: Other users can see the model. + v1alphaOcrInput: + type: object + properties: + imageUrl: + type: string + description: Image URL. + imageBase64: + type: string + description: Base64-encoded image. + description: OcrInput represents the input of an OCR task. + v1alphaOcrInputStream: + type: object + properties: + fileLengths: + type: array + items: + type: integer + format: int64 + description: File length for each uploaded binary file. + content: + type: string + format: byte + description: Byte representation of the images. + description: |- + OcrInputStream represents the input of an OCR task when the input is + streamed as binary files. + required: + - fileLengths + - content + v1alphaOcrObject: + type: object + properties: + text: + type: string + description: Text. + readOnly: true + score: + type: number + format: float + description: Score. + readOnly: true + boundingBox: + description: Bounding box. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaBoundingBox' + description: |- + OcrObject is a text object within an image. OCR stands for Optical Character + Recognition. + v1alphaOcrOutput: + type: object + properties: + objects: + type: array + items: + type: object + $ref: '#/definitions/v1alphaOcrObject' + description: A list of text objects. + readOnly: true + description: OcrOutput contains the result of an OCR task. + v1alphaPromptImage: + type: object + properties: + promptImageUrl: + type: string + description: Image URL. + promptImageBase64: + type: string + description: Base64-encoded image. + description: PromptImage is an image input for model inference. v1alphaRegion: type: object properties: @@ -3667,7 +4120,6 @@ definitions: properties: model: description: The renamed model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: RenameNamespaceModelResponse contains a renamed model. @@ -3676,7 +4128,6 @@ definitions: properties: model: description: The renamed model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: RenameOrganizationModelResponse contains a renamed model. @@ -3685,10 +4136,67 @@ definitions: properties: model: description: The renamed model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: RenameUserModelResponse contains a renamed model. + v1alphaSemanticSegmentationInput: + type: object + properties: + imageUrl: + type: string + description: Image URL. + imageBase64: + type: string + description: Base64-encoded image. + description: |- + SemanticSegmentationInput represents the input of a semantic segmentation + task. + v1alphaSemanticSegmentationInputStream: + type: object + properties: + fileLengths: + type: array + items: + type: integer + format: int64 + description: File length for each uploaded binary file. + content: + type: string + format: byte + description: Byte representation of the images. + description: |- + SemanticSegmentationInputStream represents the input of a semantic + segmentation task when the input is streamed as a binary files. + required: + - fileLengths + - content + v1alphaSemanticSegmentationOutput: + type: object + properties: + stuffs: + type: array + items: + type: object + $ref: '#/definitions/v1alphaSemanticSegmentationStuff' + description: A list of detected objects classified into categories. + readOnly: true + description: |- + SemanticSegmentationOutput represents the output of a semantic segmentation + task. + v1alphaSemanticSegmentationStuff: + type: object + properties: + rle: + type: string + description: RLE segmentation mask. + readOnly: true + category: + type: string + description: Category. + readOnly: true + description: |- + SemanticSegmentationStuff is an object detected within an image and + classified into a predefined category. v1alphaTask: type: string enum: @@ -3699,11 +4207,12 @@ definitions: - TASK_INSTANCE_SEGMENTATION - TASK_SEMANTIC_SEGMENTATION - TASK_TEXT_TO_IMAGE + - TASK_TEXT_GENERATION + - TASK_TEXT_GENERATION_CHAT + - TASK_VISUAL_QUESTION_ANSWERING - TASK_IMAGE_TO_IMAGE - TASK_TEXT_EMBEDDINGS - TASK_SPEECH_RECOGNITION - - TASK_CHAT - - TASK_COMPLETION description: |- Task enumerates the AI task that a model is designed to solve. @@ -3714,11 +4223,330 @@ definitions: - TASK_INSTANCE_SEGMENTATION: Instance Segmentation - detect, localize and delineate multiple objects in images. - TASK_SEMANTIC_SEGMENTATION: Semantic Segmentation - classify image pixels into predefined categories. - TASK_TEXT_TO_IMAGE: Text to Image - generate images from input text prompts. + - TASK_TEXT_GENERATION: Text Generation - generate texts from input text prompts. + - TASK_TEXT_GENERATION_CHAT: Conversational Text Generation - generate text as responses to a dialog input. + - TASK_VISUAL_QUESTION_ANSWERING: Visual Question Answering - generate text as a response to a visual prompt. - TASK_IMAGE_TO_IMAGE: Image to Image - generate an image from another image. - TASK_TEXT_EMBEDDINGS: Text Embeddings - generate an embedding (a representation as coordinates) from a text input. - TASK_SPEECH_RECOGNITION: Speech Recognition - transcribe the words in an audio input. - - TASK_CHAT: Conversational Text Generation - generate text as responses to a dialog input. - - TASK_COMPLETION: Completion Text Generation - generate text following the input prompt. + v1alphaTaskInput: + type: object + properties: + classification: + description: Image classification input. + allOf: + - $ref: '#/definitions/v1alphaClassificationInput' + detection: + description: Object detection input. + allOf: + - $ref: '#/definitions/v1alphaDetectionInput' + keypoint: + description: Keypoint detection input. + allOf: + - $ref: '#/definitions/v1alphaKeypointInput' + ocr: + description: Optical Character Recognition input. + allOf: + - $ref: '#/definitions/v1alphaOcrInput' + instanceSegmentation: + description: Instance segmentation input. + allOf: + - $ref: '#/definitions/v1alphaInstanceSegmentationInput' + semanticSegmentation: + description: Semantic segmentation input. + allOf: + - $ref: '#/definitions/v1alphaSemanticSegmentationInput' + textToImage: + description: Text to image input. + allOf: + - $ref: '#/definitions/v1alphaTextToImageInput' + imageToImage: + description: Image to image input. + allOf: + - $ref: '#/definitions/v1alphaImageToImageInput' + textGeneration: + description: Text generation input. + allOf: + - $ref: '#/definitions/v1alphaTextGenerationInput' + textGenerationChat: + description: Conversational text generation input. + allOf: + - $ref: '#/definitions/v1alphaTextGenerationChatInput' + visualQuestionAnswering: + description: Visual question answering input. + allOf: + - $ref: '#/definitions/v1alphaVisualQuestionAnsweringInput' + unspecified: + description: Unspecified task input. + allOf: + - $ref: '#/definitions/v1alphaUnspecifiedInput' + description: TaskInput represents a question or task for an AI model. + v1alphaTaskInputStream: + type: object + properties: + classification: + title: The classification input + allOf: + - $ref: '#/definitions/v1alphaClassificationInputStream' + detection: + title: The detection input + allOf: + - $ref: '#/definitions/v1alphaDetectionInputStream' + keypoint: + title: The keypoint input + allOf: + - $ref: '#/definitions/v1alphaKeypointInputStream' + ocr: + title: The ocr input + allOf: + - $ref: '#/definitions/v1alphaOcrInputStream' + instanceSegmentation: + title: The instance segmentation input + allOf: + - $ref: '#/definitions/v1alphaInstanceSegmentationInputStream' + semanticSegmentation: + title: The semantic segmentation input + allOf: + - $ref: '#/definitions/v1alphaSemanticSegmentationInputStream' + textToImage: + title: The text to image input + allOf: + - $ref: '#/definitions/v1alphaTextToImageInput' + imageToImage: + title: The image to image input + allOf: + - $ref: '#/definitions/v1alphaImageToImageInput' + textGeneration: + title: The text generation input + allOf: + - $ref: '#/definitions/v1alphaTextGenerationInput' + textGenerationChat: + title: The text generation chat input + allOf: + - $ref: '#/definitions/v1alphaTextGenerationChatInput' + visualQuestionAnswering: + title: The visual question answering input + allOf: + - $ref: '#/definitions/v1alphaVisualQuestionAnsweringInput' + unspecified: + title: The unspecified task input + allOf: + - $ref: '#/definitions/v1alphaUnspecifiedInput' + title: TaskInputStream represents the input to trigger a model with stream method + v1alphaTaskOutput: + type: object + properties: + classification: + description: Image classification output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaClassificationOutput' + detection: + description: Object detection output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaDetectionOutput' + keypoint: + description: Keypoint detection output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaKeypointOutput' + ocr: + description: Optical Character Recognition output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaOcrOutput' + instanceSegmentation: + description: Instance segmentation output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaInstanceSegmentationOutput' + semanticSegmentation: + description: Semantic segmentation output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaSemanticSegmentationOutput' + textToImage: + description: Text to image output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTextToImageOutput' + imageToImage: + description: Image to image output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaImageToImageOutput' + textGeneration: + description: Text generation output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTextGenerationOutput' + textGenerationChat: + description: Conversational text generation output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaTextGenerationChatOutput' + visualQuestionAnswering: + description: Visual question answering output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaVisualQuestionAnsweringOutput' + unspecified: + description: Unspecified task output. + readOnly: true + allOf: + - $ref: '#/definitions/v1alphaUnspecifiedOutput' + description: TaskOutput represents the result of an AI task performed by a model. + v1alphaTextGenerationChatInput: + type: object + properties: + prompt: + type: string + description: Prompt text. + promptImages: + type: array + items: + type: object + $ref: '#/definitions/v1alphaPromptImage' + description: Prompt images. + chatHistory: + type: array + items: + type: object + $ref: '#/definitions/v1alphaMessage' + description: Chat history. + systemMessage: + type: string + description: System message, which sets the behaviour of the assistant. + maxNewTokens: + type: integer + format: int32 + description: Maximum number of generation tokens. + temperature: + type: number + format: float + description: Sampling temperature. + topK: + type: integer + format: int32 + description: |- + Sampling Top K, number of tokens at the top from which the model will + sample. + seed: + type: integer + format: int32 + description: Seed. + extraParams: + type: object + description: Extra parameters. + description: TextGenerationChatInput represents the input of a text generation chat task. + required: + - prompt + v1alphaTextGenerationChatOutput: + type: object + properties: + text: + type: string + description: Text generated by the model. + readOnly: true + description: TextGenerationChatOutput contains the result of a text generation chat task. + v1alphaTextGenerationInput: + type: object + properties: + prompt: + type: string + description: Prompt text. + promptImages: + type: array + items: + type: object + $ref: '#/definitions/v1alphaPromptImage' + description: Prompt images. + chatHistory: + type: array + items: + type: object + $ref: '#/definitions/v1alphaMessage' + description: Chat history. + systemMessage: + type: string + description: System message, which sets the behaviour of the assistant. + maxNewTokens: + type: integer + format: int32 + description: Maximum number of generation tokens. + temperature: + type: number + format: float + description: Sampling temperature. + topK: + type: integer + format: int32 + description: |- + Sampling Top K, number of tokens at the top from which the model will + sample. + seed: + type: integer + format: int32 + description: Seed. + extraParams: + type: object + description: Extra parameters. + description: TextGenerationInput represents the input of a text generation task. + required: + - prompt + v1alphaTextGenerationOutput: + type: object + properties: + text: + type: string + description: Text generated by the model. + readOnly: true + description: TextGenerationOutput contains the result of a text generation task. + v1alphaTextToImageInput: + type: object + properties: + prompt: + type: string + description: Prompt text. + promptImageUrl: + type: string + description: Image URL. + promptImageBase64: + type: string + description: Base64-encoded image. + steps: + type: integer + format: int32 + description: Steps, defaults to 5. + cfgScale: + type: number + format: float + description: Guidance scale, defaults to 7.5. + seed: + type: integer + format: int32 + description: Seed, defaults to 0. + samples: + type: integer + format: int32 + description: Number of generated samples, default is 1. + extraParams: + type: object + description: Extra parameters. + description: TextToImageInput represents the input of a text-to-image task. + required: + - prompt + v1alphaTextToImageOutput: + type: object + properties: + images: + type: array + items: + type: string + description: A list of generated images, encoded in base64. + readOnly: true + description: TextToImageOutput contains the result of a text-to-image task. v1alphaTriggerAsyncNamespaceLatestModelResponse: type: object properties: @@ -3796,27 +4624,25 @@ definitions: type: array items: type: object - description: |- - Deleteted field. - Model inference outputs. - readOnly: true + $ref: '#/definitions/v1alphaTaskOutput' + description: Model inference outputs. description: TriggerNamespaceLatestModelBinaryFileUploadResponse contains the model inference results. required: - task + - taskOutputs v1alphaTriggerNamespaceLatestModelResponse: type: object properties: task: description: Task type. - readOnly: true allOf: - $ref: '#/definitions/v1alphaTask' taskOutputs: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true description: TriggerNamespaceLatestModelResponse contains the model inference results. v1alphaTriggerNamespaceModelBinaryFileUploadResponse: type: object @@ -3829,40 +4655,43 @@ definitions: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true + version: + type: string + title: Model version description: TriggerNamespaceModelBinaryFileUploadResponse contains the model inference results. required: - task + - taskOutputs + - version v1alphaTriggerNamespaceModelResponse: type: object properties: task: description: Task type. - readOnly: true allOf: - $ref: '#/definitions/v1alphaTask' taskOutputs: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true description: TriggerNamespaceModelResponse contains the model inference results. v1alphaTriggerOrganizationLatestModelResponse: type: object properties: task: description: Task type. - readOnly: true allOf: - $ref: '#/definitions/v1alphaTask' taskOutputs: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true description: TriggerOrganizationLatestModelResponse contains the model inference results. v1alphaTriggerOrganizationModelBinaryFileUploadResponse: type: object @@ -3875,39 +4704,39 @@ definitions: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. description: TriggerOrganizationModelBinaryFileUploadResponse contains the model inference results. required: - task + - taskOutputs v1alphaTriggerOrganizationModelResponse: type: object properties: task: description: Task type. - readOnly: true allOf: - $ref: '#/definitions/v1alphaTask' taskOutputs: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true description: TriggerOrganizationModelResponse contains the model inference results. v1alphaTriggerUserLatestModelResponse: type: object properties: task: description: Task type. - readOnly: true allOf: - $ref: '#/definitions/v1alphaTask' taskOutputs: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true description: TriggerUserLatestModelResponse contains the model inference results. v1alphaTriggerUserModelBinaryFileUploadResponse: type: object @@ -3920,25 +4749,29 @@ definitions: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true + version: + type: string + title: Model version description: TriggerUserModelBinaryFileUploadResponse contains the model inference results. required: - task + - taskOutputs + - version v1alphaTriggerUserModelResponse: type: object properties: task: description: Task type. - readOnly: true allOf: - $ref: '#/definitions/v1alphaTask' taskOutputs: type: array items: type: object + $ref: '#/definitions/v1alphaTaskOutput' description: Model inference outputs. - readOnly: true description: TriggerUserModelResponse contains the model inference results. v1alphaUndeployNamespaceModelAdminResponse: type: object @@ -3949,12 +4782,30 @@ definitions: v1alphaUndeployUserModelAdminResponse: type: object title: UndeployUserModelAdminResponse represents a response for a undeployed model + v1alphaUnspecifiedInput: + type: object + properties: + rawInputs: + type: array + items: + type: object + description: A list of values for the task. + description: UnspecifiedInput represents the input of an unspecified task. + v1alphaUnspecifiedOutput: + type: object + properties: + rawOutputs: + type: array + items: + type: object + description: The task outputs. + readOnly: true + description: UnspecifiedOutput contains the result of an unspecified task. v1alphaUpdateNamespaceModelResponse: type: object properties: model: description: The updated model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: UpdateNamespaceModelResponse contains the updated model. @@ -3963,7 +4814,6 @@ definitions: properties: model: description: The updated model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: UpdateOrganizationModelResponse contains the updated model. @@ -3972,87 +4822,131 @@ definitions: properties: model: description: The updated model resource. - readOnly: true allOf: - $ref: '#/definitions/v1alphaModel' description: UpdateUserModelResponse contains the updated model. + v1alphaVisualQuestionAnsweringInput: + type: object + properties: + prompt: + type: string + description: Prompt text. + promptImages: + type: array + items: + type: object + $ref: '#/definitions/v1alphaPromptImage' + description: Prompt images. + chatHistory: + type: array + items: + type: object + $ref: '#/definitions/v1alphaMessage' + description: Chat history. + systemMessage: + type: string + description: System message, which sets the behaviour of the assistant. + maxNewTokens: + type: integer + format: int32 + description: Maximum number of generation tokens. + temperature: + type: number + format: float + description: Sampling temperature. + topK: + type: integer + format: int32 + description: |- + Sampling Top K, number of tokens at the top from which the model will + sample. + seed: + type: integer + format: int32 + description: Seed. + extraParams: + type: object + description: Extra parameters. + description: |- + VisualQuestionAnsweringInput represents the input of a visual + question-answering task. + required: + - prompt + v1alphaVisualQuestionAnsweringOutput: + type: object + properties: + text: + type: string + description: The text generated by the model. + readOnly: true + description: |- + VisualQuestionAnsweringOutput contains the result of a visual + question-answering task. v1alphaWatchNamespaceLatestModelResponse: type: object properties: state: description: State. - readOnly: true allOf: - $ref: '#/definitions/modelv1alphaState' message: type: string title: Detail description of the state - readOnly: true description: WatchNamespaceLatestModelResponse contains the state of the latest model version. v1alphaWatchNamespaceModelResponse: type: object properties: state: description: State. - readOnly: true allOf: - $ref: '#/definitions/modelv1alphaState' message: type: string title: Detail description of the state - readOnly: true description: WatchNamespaceModelResponse contains the state of a model. v1alphaWatchOrganizationLatestModelResponse: type: object properties: state: description: State. - readOnly: true allOf: - $ref: '#/definitions/modelv1alphaState' message: type: string title: Detail description of the state - readOnly: true description: WatchOrganizationLatestModelResponse contains the state of the latest model version. v1alphaWatchOrganizationModelResponse: type: object properties: state: description: State. - readOnly: true allOf: - $ref: '#/definitions/modelv1alphaState' message: type: string title: Detail description of the state - readOnly: true description: WatchOrganizationModelResponse contains the state of a model. v1alphaWatchUserLatestModelResponse: type: object properties: state: description: State. - readOnly: true allOf: - $ref: '#/definitions/modelv1alphaState' message: type: string title: Detail description of the state - readOnly: true description: WatchUserLatestModelResponse contains the state of the latest model version. v1alphaWatchUserModelResponse: type: object properties: state: description: State. - readOnly: true allOf: - $ref: '#/definitions/modelv1alphaState' message: type: string title: Detail description of the state - readOnly: true description: WatchUserModelResponse contains the state of a model. v1betaOrganization: type: object diff --git a/openapiv2/vdp/service.swagger.yaml b/openapiv2/vdp/service.swagger.yaml index d4b3043f..110a65ea 100644 --- a/openapiv2/vdp/service.swagger.yaml +++ b/openapiv2/vdp/service.swagger.yaml @@ -4890,8 +4890,7 @@ definitions: Note: this functionality is not currently available in the official protobuf release, and it is not used for type URLs beginning with - type.googleapis.com. As of May 2023, there are no widely used type server - implementations and no plans to implement one. + type.googleapis.com. Schemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics. @@ -4926,7 +4925,7 @@ definitions: foo = any.unpack(Foo.getDefaultInstance()); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -4936,7 +4935,7 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} any, err := anypb.New(foo) @@ -4956,7 +4955,7 @@ definitions: name "y.z". JSON - ==== + The JSON representation of an `Any` value uses the regular representation of the deserialized, embedded message, with an additional field `@type` which contains the type URL. Example: @@ -4988,7 +4987,7 @@ definitions: `NullValue` is a singleton enumeration to represent the null value for the `Value` type union. - The JSON representation for `NullValue` is JSON `null`. + The JSON representation for `NullValue` is JSON `null`. v1betaCheckNameRequest: type: object properties: