summaryrefslogtreecommitdiff
path: root/third_party/googleapis/google/cloud/aiplatform/v1beta1/model.proto
blob: 9e408aebda494cfa4c657cbfb8f9660f71b0449f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto3";

package google.cloud.aiplatform.v1beta1;

import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/cloud/aiplatform/v1beta1/deployed_model_ref.proto";
import "google/cloud/aiplatform/v1beta1/encryption_spec.proto";
import "google/cloud/aiplatform/v1beta1/env_var.proto";
import "google/cloud/aiplatform/v1beta1/explanation.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";

option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform";
option java_multiple_files = true;
option java_outer_classname = "ModelProto";
option java_package = "com.google.cloud.aiplatform.v1beta1";
option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
option ruby_package = "Google::Cloud::AIPlatform::V1beta1";

// A trained machine learning Model.
message Model {
  option (google.api.resource) = {
    type: "aiplatform.googleapis.com/Model"
    pattern: "projects/{project}/locations/{location}/models/{model}"
  };

  // Represents export format supported by the Model.
  // All formats export to Google Cloud Storage.
  message ExportFormat {
    // The Model content that can be exported.
    enum ExportableContent {
      // Should not be used.
      EXPORTABLE_CONTENT_UNSPECIFIED = 0;

      // Model artifact and any of its supported files. Will be exported to the
      // location specified by the `artifactDestination` field of the
      // [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
      ARTIFACT = 1;

      // The container image that is to be used when deploying this Model. Will
      // be exported to the location specified by the `imageDestination` field
      // of the [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object.
      IMAGE = 2;
    }

    // Output only. The ID of the export format.
    // The possible format IDs are:
    //
    // * `tflite`
    // Used for Android mobile devices.
    //
    // * `edgetpu-tflite`
    // Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices.
    //
    // * `tf-saved-model`
    // A tensorflow model in SavedModel format.
    //
    // * `tf-js`
    // A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used
    // in the browser and in Node.js using JavaScript.
    //
    // * `core-ml`
    // Used for iOS mobile devices.
    //
    // * `custom-trained`
    // A Model that was uploaded or trained by custom code.
    string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];

    // Output only. The content of this Model that may be exported.
    repeated ExportableContent exportable_contents = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  }

  // Identifies a type of Model's prediction resources.
  enum DeploymentResourcesType {
    // Should not be used.
    DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0;

    // Resources that are dedicated to the [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel], and that need a
    // higher degree of manual configuration.
    DEDICATED_RESOURCES = 1;

    // Resources that to large degree are decided by Vertex AI, and require
    // only a modest additional configuration.
    AUTOMATIC_RESOURCES = 2;

    // Resources that can be shared by multiple [DeployedModels][google.cloud.aiplatform.v1beta1.DeployedModel].
    // A pre-configured [DeploymentResourcePool][google.cloud.aiplatform.v1beta1.DeploymentResourcePool] is required.
    SHARED_RESOURCES = 3;
  }

  // The resource name of the Model.
  string name = 1;

  // Output only. Immutable. The version ID of the model.
  // A new version is committed when a new model version is uploaded or
  // trained under an existing model id. It is an auto-incrementing decimal
  // number in string representation.
  string version_id = 28 [
    (google.api.field_behavior) = IMMUTABLE,
    (google.api.field_behavior) = OUTPUT_ONLY
  ];

  // User provided version aliases so that a model version can be referenced via
  // alias (i.e.
  // projects/{project}/locations/{location}/models/{model_id}@{version_alias}
  // instead of auto-generated version id (i.e.
  // projects/{project}/locations/{location}/models/{model_id}@{version_id}).
  // The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to distinguish from
  // version_id. A default version alias will be created for the first version
  // of the model, and there must be exactly one default version alias for a
  // model.
  repeated string version_aliases = 29;

  // Output only. Timestamp when this version was created.
  google.protobuf.Timestamp version_create_time = 31 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. Timestamp when this version was most recently updated.
  google.protobuf.Timestamp version_update_time = 32 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Required. The display name of the Model.
  // The name can be up to 128 characters long and can be consist of any UTF-8
  // characters.
  string display_name = 2 [(google.api.field_behavior) = REQUIRED];

  // The description of the Model.
  string description = 3;

  // The description of this version.
  string version_description = 30;

  // The schemata that describe formats of the Model's predictions and
  // explanations as given and returned via
  // [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] and [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
  PredictSchemata predict_schemata = 4;

  // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional
  // information about the Model, that is specific to it. Unset if the Model
  // does not have any additional information.
  // The schema is defined as an OpenAPI 3.0.2 [Schema
  // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  // AutoML Models always have this field populated by Vertex AI, if no
  // additional metadata is needed, this field is set to an empty string.
  // Note: The URI given on output will be immutable and probably different,
  // including the URI scheme, than the one given on input. The output URI will
  // point to a location where the user only has a read access.
  string metadata_schema_uri = 5 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. An additional information about the Model; the schema of the metadata can
  // be found in [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri].
  // Unset if the Model does not have any additional information.
  google.protobuf.Value metadata = 6 [(google.api.field_behavior) = IMMUTABLE];

  // Output only. The formats in which this Model may be exported. If empty, this Model is
  // not available for export.
  repeated ExportFormat supported_export_formats = 20 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. The resource name of the TrainingPipeline that uploaded this Model, if
  // any.
  string training_pipeline = 7 [
    (google.api.field_behavior) = OUTPUT_ONLY,
    (google.api.resource_reference) = {
      type: "aiplatform.googleapis.com/TrainingPipeline"
    }
  ];

  // Input only. The specification of the container that is to be used when deploying
  // this Model. The specification is ingested upon
  // [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied
  // and stored internally by Vertex AI.
  // Not present for AutoML Models.
  ModelContainerSpec container_spec = 9 [(google.api.field_behavior) = INPUT_ONLY];

  // Immutable. The path to the directory containing the Model artifact and any of its
  // supporting files.
  // Not present for AutoML Models.
  string artifact_uri = 26 [(google.api.field_behavior) = IMMUTABLE];

  // Output only. When this Model is deployed, its prediction resources are described by the
  // `prediction_resources` field of the [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object.
  // Because not all Models support all resource configuration types, the
  // configuration types this Model supports are listed here. If no
  // configuration types are listed, the Model cannot be deployed to an
  // [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and does not support
  // online predictions ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
  // [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). Such a Model can serve predictions by
  // using a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], if it has at least one entry each in
  // [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] and
  // [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats].
  repeated DeploymentResourcesType supported_deployment_resources_types = 10 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. The formats this Model supports in
  // [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If
  // [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] exists, the instances
  // should be given as per that schema.
  //
  // The possible formats are:
  //
  // * `jsonl`
  // The JSON Lines format, where each instance is a single line. Uses
  // [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
  //
  // * `csv`
  // The CSV format, where each instance is a single comma-separated line.
  // The first line in the file is the header, containing comma-separated field
  // names. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
  //
  // * `tf-record`
  // The TFRecord format, where each instance is a single record in tfrecord
  // syntax. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
  //
  // * `tf-record-gzip`
  // Similar to `tf-record`, but the file is gzipped. Uses
  // [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source].
  //
  // * `bigquery`
  // Each instance is a single row in BigQuery. Uses
  // [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source].
  //
  // * `file-list`
  // Each line of the file is the location of an instance to process, uses
  // `gcs_source` field of the
  // [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] object.
  //
  //
  // If this Model doesn't support any of these formats it means it cannot be
  // used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has
  // [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online
  // predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
  // [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
  repeated string supported_input_storage_formats = 11 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. The formats this Model supports in
  // [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. If both
  // [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and
  // [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] exist, the predictions
  // are returned together with their instances. In other words, the
  // prediction has the original instance data first, followed
  // by the actual prediction content (as per the schema).
  //
  // The possible formats are:
  //
  // * `jsonl`
  // The JSON Lines format, where each prediction is a single line. Uses
  // [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination].
  //
  // * `csv`
  // The CSV format, where each prediction is a single comma-separated line.
  // The first line in the file is the header, containing comma-separated field
  // names. Uses
  // [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination].
  //
  // * `bigquery`
  // Each prediction is a single row in a BigQuery table, uses
  // [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination]
  // .
  //
  //
  // If this Model doesn't support any of these formats it means it cannot be
  // used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has
  // [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online
  // predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or
  // [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain].
  repeated string supported_output_storage_formats = 12 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. Timestamp when this Model was uploaded into Vertex AI.
  google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. Timestamp when this Model was most recently updated.
  google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];

  // Output only. The pointers to DeployedModels created from this Model. Note that
  // Model could have been deployed to Endpoints in different Locations.
  repeated DeployedModelRef deployed_models = 15 [(google.api.field_behavior) = OUTPUT_ONLY];

  // The default explanation specification for this Model.
  //
  // The Model can be used for [requesting
  // explanation][PredictionService.Explain] after being
  // [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] if it is populated.
  // The Model can be used for [batch
  // explanation][BatchPredictionJob.generate_explanation] if it is populated.
  //
  // All fields of the explanation_spec can be overridden by
  // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of
  // [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], or
  // [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of
  // [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
  //
  // If the default explanation specification is not set for this Model, this
  // Model can still be used for [requesting
  // explanation][PredictionService.Explain] by setting
  // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of
  // [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] and for [batch
  // explanation][BatchPredictionJob.generate_explanation] by setting
  // [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] of
  // [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
  ExplanationSpec explanation_spec = 23;

  // Used to perform consistent read-modify-write updates. If not set, a blind
  // "overwrite" update happens.
  string etag = 16;

  // The labels with user-defined metadata to organize your Models.
  //
  // Label keys and values can be no longer than 64 characters
  // (Unicode codepoints), can only contain lowercase letters, numeric
  // characters, underscores and dashes. International characters are allowed.
  //
  // See https://goo.gl/xmQnxf for more information and examples of labels.
  map<string, string> labels = 17;

  // Customer-managed encryption key spec for a Model. If set, this
  // Model and all sub-resources of this Model will be secured by this key.
  EncryptionSpec encryption_spec = 24;

  // Output only. Source of a model. It can either be automl training pipeline, custom
  // training pipeline, BigQuery ML, or existing Vertex AI Model.
  ModelSourceInfo model_source_info = 38 [(google.api.field_behavior) = OUTPUT_ONLY];
}

// Contains the schemata used in Model's predictions and explanations via
// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] and
// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
message PredictSchemata {
  // Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
  // of a single instance, which are used in [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances],
  // [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] and
  // [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
  // The schema is defined as an OpenAPI 3.0.2 [Schema
  // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  // AutoML Models always have this field populated by Vertex AI.
  // Note: The URI given on output will be immutable and probably different,
  // including the URI scheme, than the one given on input. The output URI will
  // point to a location where the user only has a read access.
  string instance_schema_uri = 1 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. Points to a YAML file stored on Google Cloud Storage describing the
  // parameters of prediction and explanation via
  // [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] and
  // [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters].
  // The schema is defined as an OpenAPI 3.0.2 [Schema
  // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  // AutoML Models always have this field populated by Vertex AI, if no
  // parameters are supported, then it is set to an empty string.
  // Note: The URI given on output will be immutable and probably different,
  // including the URI scheme, than the one given on input. The output URI will
  // point to a location where the user only has a read access.
  string parameters_schema_uri = 2 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
  // of a single prediction produced by this Model, which are returned via
  // [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], and
  // [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
  // The schema is defined as an OpenAPI 3.0.2 [Schema
  // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  // AutoML Models always have this field populated by Vertex AI.
  // Note: The URI given on output will be immutable and probably different,
  // including the URI scheme, than the one given on input. The output URI will
  // point to a location where the user only has a read access.
  string prediction_schema_uri = 3 [(google.api.field_behavior) = IMMUTABLE];
}

// Specification of a container for serving predictions. Some fields in this
// message correspond to fields in the [Kubernetes Container v1 core
// specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
message ModelContainerSpec {
  // Required. Immutable. URI of the Docker image to be used as the custom container for serving
  // predictions. This URI must identify an image in Artifact Registry or
  // Container Registry. Learn more about the [container publishing
  // requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
  // including permissions requirements for the Vertex AI Service Agent.
  //
  // The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored
  // internally, and this original path is afterwards not used.
  //
  // To learn about the requirements for the Docker image itself, see
  // [Custom container
  // requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
  //
  // You can use the URI to one of Vertex AI's [pre-built container images for
  // prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
  // in this field.
  string image_uri = 1 [
    (google.api.field_behavior) = REQUIRED,
    (google.api.field_behavior) = IMMUTABLE
  ];

  // Immutable. Specifies the command that runs when the container starts. This overrides
  // the container's
  // [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
  // Specify this field as an array of executable and arguments, similar to a
  // Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
  //
  // If you do not specify this field, then the container's `ENTRYPOINT` runs,
  // in conjunction with the [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or the
  // container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
  // if either exists. If this field is not specified and the container does not
  // have an `ENTRYPOINT`, then refer to the Docker documentation about [how
  // `CMD` and `ENTRYPOINT`
  // interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
  //
  // If you specify this field, then you can also specify the `args` field to
  // provide additional arguments for this command. However, if you specify this
  // field, then the container's `CMD` is ignored. See the
  // [Kubernetes documentation about how the
  // `command` and `args` fields interact with a container's `ENTRYPOINT` and
  // `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
  //
  // In this field, you can reference [environment variables set by Vertex
  // AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
  // and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
  // You cannot reference environment variables set in the Docker image. In
  // order for environment variables to be expanded, reference them by using the
  // following syntax:
  // <code>$(<var>VARIABLE_NAME</var>)</code>
  // Note that this differs from Bash variable expansion, which does not use
  // parentheses. If a variable cannot be resolved, the reference in the input
  // string is used unchanged. To avoid variable expansion, you can escape this
  // syntax with `$$`; for example:
  // <code>$$(<var>VARIABLE_NAME</var>)</code>
  // This field corresponds to the `command` field of the Kubernetes Containers
  // [v1 core
  // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
  repeated string command = 2 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. Specifies arguments for the command that runs when the container starts.
  // This overrides the container's
  // [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
  // this field as an array of executable and arguments, similar to a Docker
  // `CMD`'s "default parameters" form.
  //
  // If you don't specify this field but do specify the
  // [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the
  // `command` field runs without any additional arguments. See the
  // [Kubernetes documentation about how the
  // `command` and `args` fields interact with a container's `ENTRYPOINT` and
  // `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
  //
  // If you don't specify this field and don't specify the `command` field,
  // then the container's
  // [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
  // `CMD` determine what runs based on their default behavior. See the Docker
  // documentation about [how `CMD` and `ENTRYPOINT`
  // interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
  //
  // In this field, you can reference [environment variables
  // set by Vertex
  // AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
  // and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field.
  // You cannot reference environment variables set in the Docker image. In
  // order for environment variables to be expanded, reference them by using the
  // following syntax:
  // <code>$(<var>VARIABLE_NAME</var>)</code>
  // Note that this differs from Bash variable expansion, which does not use
  // parentheses. If a variable cannot be resolved, the reference in the input
  // string is used unchanged. To avoid variable expansion, you can escape this
  // syntax with `$$`; for example:
  // <code>$$(<var>VARIABLE_NAME</var>)</code>
  // This field corresponds to the `args` field of the Kubernetes Containers
  // [v1 core
  // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
  repeated string args = 3 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. List of environment variables to set in the container. After the container
  // starts running, code running in the container can read these environment
  // variables.
  //
  // Additionally, the [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and
  // [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can reference these variables. Later
  // entries in this list can also reference earlier entries. For example, the
  // following example sets the variable `VAR_2` to have the value `foo bar`:
  //
  // ```json
  // [
  //   {
  //     "name": "VAR_1",
  //     "value": "foo"
  //   },
  //   {
  //     "name": "VAR_2",
  //     "value": "$(VAR_1) bar"
  //   }
  // ]
  // ```
  //
  // If you switch the order of the variables in the example, then the expansion
  // does not occur.
  //
  // This field corresponds to the `env` field of the Kubernetes Containers
  // [v1 core
  // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
  repeated EnvVar env = 4 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. List of ports to expose from the container. Vertex AI sends any
  // prediction requests that it receives to the first port on this list. Vertex
  // AI also sends
  // [liveness and health
  // checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
  // to this port.
  //
  // If you do not specify this field, it defaults to following value:
  //
  // ```json
  // [
  //   {
  //     "containerPort": 8080
  //   }
  // ]
  // ```
  //
  // Vertex AI does not use ports other than the first one listed. This field
  // corresponds to the `ports` field of the Kubernetes Containers
  // [v1 core
  // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
  repeated Port ports = 5 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. HTTP path on the container to send prediction requests to. Vertex AI
  // forwards requests sent using
  // [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this
  // path on the container's IP address and port. Vertex AI then returns the
  // container's response in the API response.
  //
  // For example, if you set this field to `/foo`, then when Vertex AI
  // receives a prediction request, it forwards the request body in a POST
  // request to the `/foo` path on the port of your container specified by the
  // first value of this `ModelContainerSpec`'s
  // [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
  //
  // If you don't specify this field, it defaults to the following value when
  // you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
  // <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
  // The placeholders in this value are replaced as follows:
  //
  // * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
  //   Endpoint.name][] field of the Endpoint where this Model has been
  //   deployed. (Vertex AI makes this value available to your container code
  //   as the [`AIP_ENDPOINT_ID` environment
  //  variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
  //
  // * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
  //   (Vertex AI makes this value available to your container code
  //   as the [`AIP_DEPLOYED_MODEL_ID` environment
  //   variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
  string predict_route = 6 [(google.api.field_behavior) = IMMUTABLE];

  // Immutable. HTTP path on the container to send health checks to. Vertex AI
  // intermittently sends GET requests to this path on the container's IP
  // address and port to check that the container is healthy. Read more about
  // [health
  // checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
  //
  // For example, if you set this field to `/bar`, then Vertex AI
  // intermittently sends a GET request to the `/bar` path on the port of your
  // container specified by the first value of this `ModelContainerSpec`'s
  // [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field.
  //
  // If you don't specify this field, it defaults to the following value when
  // you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]:
  // <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
  // The placeholders in this value are replaced as follows:
  //
  // * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
  //   Endpoint.name][] field of the Endpoint where this Model has been
  //   deployed. (Vertex AI makes this value available to your container code
  //   as the [`AIP_ENDPOINT_ID` environment
  //   variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
  //
  // * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`.
  //   (Vertex AI makes this value available to your container code as the
  //   [`AIP_DEPLOYED_MODEL_ID` environment
  //   variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
  string health_route = 7 [(google.api.field_behavior) = IMMUTABLE];
}

// Represents a network port in a container.
message Port {
  // The number of the port to expose on the pod's IP address.
  // Must be a valid port number, between 1 and 65535 inclusive.
  int32 container_port = 3;
}

// Detail description of the source information of the model.
message ModelSourceInfo {
  // Source of the model.
  enum ModelSourceType {
    // Should not be used.
    MODEL_SOURCE_TYPE_UNSPECIFIED = 0;

    // The Model is uploaded by automl training pipeline.
    AUTOML = 1;

    // The Model is uploaded by user or custom training pipeline.
    CUSTOM = 2;

    // The Model is registered and sync'ed from BigQuery ML.
    BQML = 3;
  }

  // Type of the model source.
  ModelSourceType source_type = 1;
}